text
stringlengths
0
1.25M
meta
stringlengths
47
1.89k
[STATEMENT] lemma OT_14_correct: "OT_14.correctness M C" [PROOF STATE] proof (prove) goal (1 subgoal): 1. OT_14.correctness M C [PROOF STEP] unfolding OT_14.correctness_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. protocol_14_OT M C = funct_OT_14 M C [PROOF STEP] using correctness_OT_14 [PROOF STATE] proof (prove) using this: funct_OT_14 ?M ?C = protocol_14_OT ?M ?C goal (1 subgoal): 1. protocol_14_OT M C = funct_OT_14 M C [PROOF STEP] by auto
{"llama_tokens": 221, "file": "Multi_Party_Computation_OT14", "length": 3}
#Script to plot Rydberg radial wave functions #23/07/2017 using Plots, JLD, LaTeXStrings pyplot() include("functions.jl") PyPlot.close("all") #Input information atom = "87Rb" nn = 50 ll = 0 jj = 0.5 #Calculate wave function normY_sol, rr = numerovfunc(atom,nn,ll,jj) #Rescale for plotting plotscale = sqrt(rr) probamp = (normY_sol.*plotscale).^2 alpha_c = getalpha(atom) PyPlot.figure() if nn>20 plot(plotscale[plotscale .> sqrt(alpha_c^(1/3))],normY_sol[plotscale .> sqrt(alpha_c^(1/3))],linewidth=2) else plot(plotscale,normY_sol,linewidth=2) end plot!(xlabel=L"(r/a_0)^{1/2}") plot!(ylabel=L"r^{1/2}R(r) \,(a_0^{-1})") plot!(title=string(atom," radial wavefunction |n,l,j⟩ = |",string(nn),",",string(ll),",",string(jj),"⟩")) plot!(leg=false) gui() PyPlot.figure() if nn>20 plot(rr[rr .> alpha_c^(1/3)],probamp[rr .> alpha_c^(1/3)],linewidth=2) else plot(rr,probamp,linewidth=2) end plot!(xlabel=L"r/a_0") plot!(ylabel=L"|rR(r)|^2 \,(a_0^{-1})") plot!(title=string(atom," radial probability density |n,l,j⟩ = |",string(nn),",",string(ll),",",string(jj),"⟩")) plot!(leg=false) gui()
{"hexsha": "be90041f3c263260609d90173d7359c9d6ed07e0", "size": 1099, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "Julia/PlotWaveFunction.jl", "max_stars_repo_name": "CSChisholm/Rydberg", "max_stars_repo_head_hexsha": "13bd7ff296533c9cc9fc9cb8ffc3f7c6752a80bb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Julia/PlotWaveFunction.jl", "max_issues_repo_name": "CSChisholm/Rydberg", "max_issues_repo_head_hexsha": "13bd7ff296533c9cc9fc9cb8ffc3f7c6752a80bb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Julia/PlotWaveFunction.jl", "max_forks_repo_name": "CSChisholm/Rydberg", "max_forks_repo_head_hexsha": "13bd7ff296533c9cc9fc9cb8ffc3f7c6752a80bb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-09T17:54:40.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-09T17:54:40.000Z", "avg_line_length": 23.3829787234, "max_line_length": 112, "alphanum_fraction": 0.669699727, "num_tokens": 395}
! ! Copyright 2013 Guy Munhoven ! ! This file is part of SolveSAPHE. ! SolveSAPHE is free software: you can redistribute it and/or modify ! it under the terms of the GNU Lesser General Public License as published by ! the Free Software Foundation, either version 3 of the License, or ! (at your option) any later version. ! ! SolveSAPHE is distributed in the hope that it will be useful, ! but WITHOUT ANY WARRANTY; without even the implied warranty of ! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ! GNU Lesser General Public License for more details. ! ! You should have received a copy of the GNU Lesser General Public License ! along with SolveSAPHE. If not, see <http://www.gnu.org/licenses/>. ! MODULE MOD_CHEMCONST USE MOD_PRECISION IMPLICIT NONE ! -------------------------------------------------------- ! List of subroutines for the chemical constants (PRIVATE) ! -------------------------------------------------------- PRIVATE AK_CARB_0_WEIS74 PRIVATE AK_CARB_1_MILL95, AK_CARB_2_MILL95 PRIVATE AK_CARB_1_LUEK00, AK_CARB_2_LUEK00 PRIVATE AK_CARB_1_ROYE93, AK_CARB_2_ROYE93 PRIVATE AK_BORA_DICK90 PRIVATE AK_PHOS_1_MILL95, AK_PHOS_2_MILL95, AK_PHOS_3_MILL95 PRIVATE AK_SILI_1_MILL95 PRIVATE AK_H2S_1_MILL95 PRIVATE AK_AMMO_1_YAMI95 PRIVATE AK_W_MILL95 PRIVATE AK_HSO4_DICK90 PRIVATE ABETA_HF_DIRI79 PRIVATE AK_HF_PEFR87 ! -------------------------------------- ! Parameters for usage within the module ! -------------------------------------- ! Gas constant ! ------------ REAL(KIND=wp), PARAMETER, PRIVATE :: gasconst_bar_cm3_o_mol_k = 83.14510_wp ! libthdyct !REAL(KIND=wp), PARAMETER, PRIVATE :: gasconst_bar_cm3_o_mol_k = 83.14472_wp ! Handbook (2007) ! 0 degrees centigrade in Kelvin ! ------------------------------ REAL(KIND=wp), PARAMETER, PRIVATE :: t_k_zerodegc = 273.15_wp ! Handbook (2007) ! -------------------------------------------------------------- ! Chemical constants' products: for usage by users of the module ! -------------------------------------------------------------- ! For each acid system A, ! - api1_aaa <-- K_A1 ! - api2_aaa <-- K_A1*K_A2 ! - api3_aaa <-- K_A1*K_A2*K_A3 ! - ... REAL(KIND=wp) :: api1_dic, api2_dic REAL(KIND=wp) :: api1_bor REAL(KIND=wp) :: api1_po4, api2_po4, api3_po4 REAL(KIND=wp) :: api1_sil REAL(KIND=wp) :: api1_nh4 REAL(KIND=wp) :: api1_h2s REAL(KIND=wp) :: api1_so4 REAL(KIND=wp) :: api1_flu REAL(KIND=wp) :: api1_wat, aphscale !******************************************************************************* CONTAINS !******************************************************************************* !======================================================================= SUBROUTINE SETUP_API4PHTOT(t_k, s, p_bar) !======================================================================= IMPLICIT NONE ! ------------------ ! Argument variables ! ------------------ ! t_k : temperature in Kelvin ! s : salinity ! p_bar : applied pressure in bar REAL(KIND=wp), INTENT(IN) :: t_k REAL(KIND=wp), INTENT(IN) :: s REAL(KIND=wp), INTENT(IN) :: p_bar ! --------------- ! Local variables ! --------------- REAL(KIND=wp) :: zcvt_htot_o_hsws, zcvt_htot_o_hfree zcvt_htot_o_hsws = 1._wp/ACVT_HSWS_O_HTOT(t_k, s, p_bar) zcvt_htot_o_hfree = ACVT_HTOT_O_HFREE(t_k, s, p_bar) api1_dic = AK_CARB_1_LUEK00(t_k, s, p_bar) api2_dic = api1_dic * AK_CARB_2_LUEK00(t_k, s, p_bar) api1_bor = AK_BORA_DICK90(t_k, s, p_bar) api1_po4 = AK_PHOS_1_MILL95(t_k, s, p_bar) * zcvt_htot_o_hsws api2_po4 = api1_po4 * AK_PHOS_2_MILL95(t_k, s, p_bar) * zcvt_htot_o_hsws api3_po4 = api2_po4 * AK_PHOS_3_MILL95(t_k, s, p_bar) * zcvt_htot_o_hsws api1_sil = AK_SILI_1_MILL95(t_k, s ) * zcvt_htot_o_hsws api1_nh4 = AK_AMMO_1_YAMI95(t_k, s, p_bar) * zcvt_htot_o_hsws api1_h2s = AK_H2S_1_MILL95 (t_k, s, p_bar) * zcvt_htot_o_hsws api1_so4 = AK_HSO4_DICK90(t_k, s, p_bar) * zcvt_htot_o_hfree api1_flu = AK_HF_PEFR87(t_k, s, p_bar) api1_wat = AK_W_MILL95(t_k, s, p_bar) * zcvt_htot_o_hsws aphscale = zcvt_htot_o_hfree !======================================================================= END SUBROUTINE SETUP_API4PHTOT !======================================================================= !======================================================================= SUBROUTINE SETUP_API4PHSWS(t_k, s, p_bar) !======================================================================= ! ------------------ ! Argument variables ! ------------------ ! t_k : temperature in Kelvin ! s : salinity ! p_bar : applied pressure in bar REAL(KIND=wp), INTENT(IN) :: t_k REAL(KIND=wp), INTENT(IN) :: s REAL(KIND=wp), INTENT(IN) :: p_bar ! --------------- ! Local variables ! --------------- REAL(KIND=wp) :: zcvt_hsws_o_htot, zcvt_hsws_o_hfree zcvt_hsws_o_htot = ACVT_HSWS_O_HTOT(t_k, s, p_bar) zcvt_hsws_o_hfree = ACVT_HSWS_O_HFREE(t_k, s, p_bar) api1_dic = AK_CARB_1_MILL95(t_k, s, p_bar) api2_dic = api1_dic * AK_CARB_2_MILL95(t_k, s, p_bar) api1_bor = AK_BORA_DICK90(t_k, s, p_bar) * zcvt_hsws_o_htot api1_po4 = AK_PHOS_1_MILL95(t_k, s, p_bar) api2_po4 = api1_po4 * AK_PHOS_2_MILL95(t_k, s, p_bar) api3_po4 = api2_po4 * AK_PHOS_3_MILL95(t_k, s, p_bar) api1_sil = AK_SILI_1_MILL95(t_k, s ) api1_nh4 = AK_AMMO_1_YAMI95(t_k, s, p_bar) api1_h2s = AK_H2S_1_MILL95 (t_k, s, p_bar) api1_so4 = AK_HSO4_DICK90(t_k, s, p_bar) * zcvt_hsws_o_hfree api1_flu = zcvt_hsws_o_hfree/ABETA_HF_DIRI79(t_k, s, p_bar) api1_wat = AK_W_MILL95(t_k, s, p_bar) aphscale = zcvt_hsws_o_hfree !======================================================================= END SUBROUTINE SETUP_API4PHSWS !======================================================================= !======================================================================= FUNCTION AK_CARB_0_WEIS74(t_k, s) !======================================================================= ! Function calculates K0 in (mol/kg-SW)/atmosphere ! References: Weiss (1979) [(mol/kg-SW)/atm] ! pH scale : N/A ! Note : currently no pressure correction IMPLICIT NONE REAL(KIND=wp) :: AK_CARB_0_WEIS74 ! ------------------ ! Argument variables ! ------------------ ! s : salinity ! t_k : temperature in K REAL(KIND=wp), INTENT(IN) :: t_k REAL(KIND=wp), INTENT(IN) :: s ! --------------- ! Local variables ! --------------- ! zt_k_o_100 : zt_k/100 REAL(KIND=wp) :: zt_k_o_100 zt_k_o_100 = t_k/100._wp AK_CARB_0_WEIS74 & = EXP( -60.2409_wp + 93.4517_wp/zt_k_o_100 & + 23.3585_wp*LOG(zt_k_o_100) & + ( 0.023517_wp - 0.023656_wp*zt_k_o_100 & + 0.0047036_wp*zt_k_o_100*zt_k_o_100)*s ) RETURN !======================================================================= END FUNCTION AK_CARB_0_WEIS74 !======================================================================= !======================================================================= FUNCTION AK_CARB_1_MILL95(t_k, s, p_bar) !======================================================================= ! Function calculates first dissociation constant of carbonic acid ! in mol/kg-SW on the SWS pH-scale. ! References: Millero (1995, eq 50 -- ln K1(COM)) ! Millero (1982) pressure correction ! pH scale: SWS IMPLICIT NONE REAL(KIND=wp) :: AK_CARB_1_MILL95 ! ------------------ ! Argument variables ! ------------------ ! t_k : temperature in Kelvin ! s : salinity ! p_bar : applied pressure in bar REAL(KIND=wp), INTENT(IN) :: t_k REAL(KIND=wp), INTENT(IN) :: s REAL(KIND=wp), INTENT(IN) :: p_bar ! --------------- ! Local variables ! --------------- ! zrt : R*t_k, R in bar*cm3/(mol*K) ! zt_degc : temperature in degrees Celsius ! zdvi : volume change for ionization ! zdki : compressibility change for ionization ! zsqrts : square root of salinity ! zds : salinity-34.8 ! zln_kc1_p0 : ln(K_C1) at p_bar = 0 ! zln_kc1_pp : pressure correction for p_bar /= 0 REAL(KIND=wp) :: zrt, zt_degc, zdvi, zdki, zds, zsqrts REAL(KIND=wp) :: zln_kc1_p0, zln_kc1_pp ! ln(K_C1) value at p_bar = 0 zsqrts = SQRT(s) zln_kc1_p0 = 2.18867_wp & - 2275.0360_wp/t_k & - 1.468591_wp*LOG(t_k) & + ( -0.138681_wp - 9.33291_wp/t_k)*zsqrts & + 0.0726483_wp*s - 0.00574938_wp*s*zsqrts ! Pressure correction zt_degc = t_k - t_k_zerodegc zds = s - 34.8_wp zrt = gasconst_bar_cm3_o_mol_k * t_k zdvi = -25.50_wp - 0.151_wp*zds + 0.1271_wp*zt_degc zdki = ( -3.08_wp - 0.578_wp*zds + 0.0877_wp*zt_degc)*1.0E-03_wp zln_kc1_pp = (-zdvi + zdki*p_bar/2._wp)*p_bar/zrt ! Final K_C1 value AK_CARB_1_MILL95 = EXP( zln_kc1_p0 + zln_kc1_pp ) RETURN !======================================================================= END FUNCTION AK_CARB_1_MILL95 !======================================================================= !======================================================================= FUNCTION AK_CARB_2_MILL95(t_k, s, p_bar) !======================================================================= ! Function calculates second dissociation constant K1 ! in mol/kg-SW on the SWS pH-scale. ! References: Millero (1995, eq 51 -- ln K2(COM)) ! Millero (1979) pressure correction ! pH scale: SWS IMPLICIT NONE REAL(KIND=wp) :: AK_CARB_2_MILL95 ! Argument variables ! ------------------ ! t_k : temperature in Kelvin ! s : salinity ! p_bar : applied pressure in bar REAL(KIND=wp), INTENT(IN) :: t_k REAL(KIND=wp), INTENT(IN) :: s REAL(KIND=wp), INTENT(IN) :: p_bar ! Local variables ! --------------- ! zrt : R*t_k, R in bar*cm3/(mol*K) ! zt_degc : temperature in degrees Celsius ! zdvi : volume change for ionization ! zdki : compressibility change for ionization ! zsqrts : square root of salinity ! zds : salinity-34.8 ! zln_kc2_p0 : ln(K_C2) at p_bar = 0 ! zln_kc2_pp : pressure correction for p_bar /= 0 REAL(KIND=wp) :: zrt, zt_degc, zdvi, zdki, zds, zsqrts REAL(KIND=wp) :: zln_kc2_p0, zln_kc2_pp ! ln(K_C2) value at p_bar = 0 zsqrts = SQRT(s) zln_kc2_p0 = -0.84226_wp & - 3741.1288_wp/t_k & - 1.437139_wp*LOG(t_k) & + (-0.128417_wp - 24.41239_wp/t_k)*zsqrts & + 0.1195308_wp*s & - 0.00912840_wp*s*zsqrts ! Pressure correction zt_degc = t_k - t_k_zerodegc zds = s - 34.8_wp zrt = gasconst_bar_cm3_o_mol_k * t_k zdvi = -15.82_wp + 0.321_wp*zds - 0.0219_wp*zt_degc zdki = ( 1.13_wp - 0.314_wp*zds - 0.1475_wp*zt_degc)*1.0E-03_wp zln_kc2_pp = (-zdvi + zdki*p_bar/2._wp)*p_bar/zrt ! Final K_C2 value AK_CARB_2_MILL95 = EXP( zln_kc2_p0 + zln_kc2_pp ) RETURN !======================================================================= END FUNCTION AK_CARB_2_MILL95 !======================================================================= !======================================================================= FUNCTION AK_CARB_1_LUEK00(t_k, s, p_bar) !======================================================================= ! Function calculates first dissociation constant of carbonic acid ! in mol/kg-SW on the Total pH-scale. ! References: Luecker et al. (2000) -- also Handbook (2007) ! Millero (1979) pressure correction ! pH scale: Total IMPLICIT NONE REAL(KIND=wp) :: AK_CARB_1_LUEK00 ! Argument variables ! ------------------ ! t_k : temperature in Kelvin ! s : salinity ! p_bar : applied pressure in bar REAL(KIND=wp), INTENT(IN) :: t_k REAL(KIND=wp), INTENT(IN) :: s REAL(KIND=wp), INTENT(IN) :: p_bar ! Local variables ! --------------- ! zrt : R*t_k, R in bar*cm3/(mol*K) ! zt_degc : temperature in degrees Celsius ! zdvi : volume change for ionization ! zdki : compressibility change for ionization ! zds : salinity-34.8 ! zlog10_kc1_p0 : log_10(k_C1) at p_bar = 0 ! zln_kc1_pp : pressure correction for p_bar /= 0 REAL(KIND=wp) :: zrt, zt_degc, zdvi, zdki, zds, zsqrts REAL(KIND=wp) :: zlog10_kc1_p0, zln_kc1_pp ! log_10(K_C1) value at p_bar = 0 zlog10_kc1_p0 = 61.2172_wp & - 3633.86_wp/t_k & - 9.67770_wp*LOG(t_k) & + s*(0.011555 - s*0.0001152_wp) ! Pressure correction zt_degc = t_k - t_k_zerodegc zds = s - 34.8_wp zrt = gasconst_bar_cm3_o_mol_k * t_k zdvi = -25.50_wp - 0.151_wp*zds + 0.1271_wp*zt_degc zdki = ( -3.08_wp - 0.578_wp*zds + 0.0877_wp*zt_degc)*1.0E-03_wp zln_kc1_pp = (-zdvi + zdki*p_bar/2._wp)*p_bar/zrt ! Final K_C1 value AK_CARB_1_LUEK00 = 10._wp**zlog10_kc1_p0 * EXP(zln_kc1_pp) RETURN !======================================================================= END FUNCTION AK_CARB_1_LUEK00 !======================================================================= !======================================================================= FUNCTION AK_CARB_2_LUEK00(t_k, s, p_bar) !======================================================================= ! Function calculates second dissociation constant K1 ! in mol/kg-SW on the Total pH-scale. ! References: Luecker et al. (2000) -- also Handbook (2007) ! Millero (1979) pressure correction ! pH scale: Total IMPLICIT NONE REAL(KIND=wp) :: AK_CARB_2_LUEK00 ! Argument variables ! ------------------ ! t_k : temperature in Kelvin ! s : salinity ! p_bar : applied pressure in bar REAL(KIND=wp), INTENT(IN) :: t_k REAL(KIND=wp), INTENT(IN) :: s REAL(KIND=wp), INTENT(IN) :: p_bar ! Local variables ! --------------- ! zrt : R*t_k, R in bar*cm3/(mol*K) ! zt_degc : temperature in degrees Celsius ! zdvi : volume change for ionization ! zdki : compressibility change for ionization ! zsqrts : square root of salinity ! zds : salinity-34.8 ! zlog10_kc2_p0 : log_10(K_C2) at p_bar = 0 ! zln_kc2_pp : pressure correction for p_bar /= 0 REAL(KIND=wp) :: zrt, zt_degc, zdvi, zdki, zds, zsqrts REAL(KIND=wp) :: zlog10_kc2_p0, zln_kc2_pp ! log_10(K_C2) value at p_bar = 0 zlog10_kc2_p0 = -25.9290_wp & - 471.78_wp/t_k + 3.16967_wp*LOG(t_k) & + s*(0.01781_wp - s*0.0001122_wp) ! Pressure correction zt_degc = t_k - t_k_zerodegc zds = s - 34.8_wp zrt = gasconst_bar_cm3_o_mol_k * t_k zdvi = -15.82_wp + 0.321_wp*zds - 0.0219_wp*zt_degc zdki = ( 1.13_wp - 0.314_wp*zds - 0.1475_wp*zt_degc)*1.0E-03_wp zln_kc2_pp = (-zdvi + zdki*p_bar/2._wp)*p_bar/zrt ! Final K_C2 value AK_CARB_2_LUEK00 = 10._wp**zlog10_kc2_p0 *EXP(zln_kc2_pp) RETURN !======================================================================= END FUNCTION AK_CARB_2_LUEK00 !======================================================================= !======================================================================= FUNCTION AK_CARB_1_ROYE93(t_k, s, p_bar) !======================================================================= ! Function calculates first dissociation constant of carbonic acid ! in mol/kg-SW on the Total pH-scale. ! References: Roy et al. (1993) -- also Handbook (1994) ! Millero (1979) pressure correction ! pH scale : Total ! Note : converted here from mol/kg-H2O to mol/kg-SW IMPLICIT NONE REAL(KIND=wp) :: AK_CARB_1_ROYE93 ! Argument variables ! ------------------ ! t_k : temperature in Kelvin ! s : salinity ! p_bar : applied pressure in bar REAL(KIND=wp), INTENT(IN) :: t_k REAL(KIND=wp), INTENT(IN) :: s REAL(KIND=wp), INTENT(IN) :: p_bar ! Local variables ! --------------- ! zrt : R*t_k, R in bar*cm3/(mol*K) ! zt_degc : temperature in degrees Celsius ! zdvi : volume change for ionization ! zdki : compressibility change for ionization ! zds : salinity-34.8 ! zln_kc1_p0 : ln(k_C1) at p_bar = 0 ! zln_kc1_pp : pressure correction for p_bar /= 0 REAL(KIND=wp) :: zsqrts, zcvt_to_kgsw REAL(KIND=wp) :: zrt, zt_degc, zdvi, zdki, zds REAL(KIND=wp) :: zln_kc1_p0, zln_kc1_pp ! ln(K_C1) value at p_bar = 0 zsqrts = SQRT(s) zcvt_to_kgsw = ACVT_KGH2O_O_KGSW(s) zln_kc1_p0 = -2307.1255_wp/t_k + 2.83655_wp - 1.5529413_wp*LOG(t_k) & + (-4.0484_wp/t_k - 0.20760841)*zsqrts & + 0.08468345*s & - 0.00654208*zsqrts*s ! Pressure correction zt_degc = t_k - t_k_zerodegc zds = s - 34.8_wp zrt = gasconst_bar_cm3_o_mol_k * t_k zdvi = -25.50_wp - 0.151_wp*zds + 0.1271_wp*zt_degc zdki = ( -3.08_wp - 0.578_wp*zds + 0.0877_wp*zt_degc)*1.0E-03_wp zln_kc1_pp = (-zdvi + zdki*p_bar/2._wp)*p_bar/zrt ! Final K_C1 value AK_CARB_1_ROYE93 = EXP(zln_kc1_p0 + zln_kc1_pp) * zcvt_to_kgsw RETURN !======================================================================= END FUNCTION AK_CARB_1_ROYE93 !======================================================================= !======================================================================= FUNCTION AK_CARB_2_ROYE93(t_k, s, p_bar) !======================================================================= ! Function calculates second dissociation constant K1 ! in mol/kg-SW on the Total pH-scale. ! References: Roy et al. (1993) -- also Handbook (1994) ! Millero (1979) pressure correction ! pH scale : Total ! Note : converted here from mol/kg-H2O to mol/kg-SW IMPLICIT NONE REAL(KIND=wp) :: AK_CARB_2_ROYE93 ! Argument variables ! ------------------ ! t_k : temperature in Kelvin ! s : salinity ! p_bar : applied pressure in bar REAL(KIND=wp), INTENT(IN) :: t_k REAL(KIND=wp), INTENT(IN) :: s REAL(KIND=wp), INTENT(IN) :: p_bar ! Local variables ! --------------- ! zrt : R*t_k, R in bar*cm3/(mol*K) ! zt_degc : temperature in degrees Celsius ! zdvi : volume change for ionization ! zdki : compressibility change for ionization ! zsqrts : square root of salinity ! zds : salinity-34.8 ! zln_kc2_p0 : ln(K_C2) at p_bar = 0 ! zln_kc2_pp : pressure correction for p_bar /= 0 REAL(KIND=wp) :: zsqrts, zcvt_to_kgsw REAL(KIND=wp) :: zrt, zt_degc, zdvi, zdki, zds REAL(KIND=wp) :: zln_kc2_p0, zln_kc2_pp ! ln(K_C2) value at p_bar = 0 zsqrts = SQRT(s) zcvt_to_kgsw = ACVT_KGH2O_O_KGSW(s) zln_kc2_p0 = -3351.6106_wp/t_k - 9.226508_wp - 0.2005743_wp*LOG(t_k) & + ( -23.9722_wp/t_k - 0.106901773_wp)*zsqrts & + 0.1130822*s - 0.00846934_wp*zsqrts*s ! Pressure correction zt_degc = t_k - t_k_zerodegc zds = s - 34.8_wp zrt = gasconst_bar_cm3_o_mol_k * t_k zdvi = -15.82_wp + 0.321_wp*zds - 0.0219_wp*zt_degc zdki = ( 1.13_wp - 0.314_wp*zds - 0.1475_wp*zt_degc)*1.0E-03_wp zln_kc2_pp = (-zdvi + zdki*p_bar/2._wp)*p_bar/zrt ! Final K_C2 value AK_CARB_2_ROYE93 = EXP(zln_kc2_p0 + zln_kc2_pp) * zcvt_to_kgsw RETURN !======================================================================= END FUNCTION AK_CARB_2_ROYE93 !======================================================================= !======================================================================= FUNCTION AK_BORA_DICK90(t_k, s, p_bar) !======================================================================= ! Function calculates boric acid dissociation constant KB ! in mol/kg-SW on the total pH-scale. ! References: Dickson (1990, eq. 23) -- also Handbook (2007, eq. 37) ! Millero (1979) pressure correction ! pH scale : total IMPLICIT NONE REAL(KIND=wp) :: AK_BORA_DICK90 ! ------------------ ! Argument variables ! ------------------ ! t_k : temperature in Kelvin ! s : salinity ! p_bar : applied pressure in bar REAL(KIND=wp), INTENT(IN) :: t_k REAL(KIND=wp), INTENT(IN) :: s REAL(KIND=wp), INTENT(IN) :: p_bar ! --------------- ! Local variables ! --------------- ! zrt : R*t_k, R in bar*cm3/(mol*K) ! zt_degc : temperature in degrees Celsius ! zdvi : volume change for ionization ! zdki : compressibility change for ionization ! zsqrts : square root of salinity ! zds : salinity-34.8 ! zln_kb_p0 : K_b at p_bar = 0 ! zln_kb_pp : pressure correction for p_bar /= 0 REAL(KIND=wp) :: zrt, zt_degc, zdvi, zdki, zds, zsqrts REAL(KIND=wp) :: zln_kb_p0, zln_kb_pp ! ln(K_B) value at p_bar = 0 zsqrts = SQRT(s) zln_kb_p0 = ( -8966.90_wp & + zsqrts*( -2890.53_wp & + zsqrts*( -77.942_wp & + zsqrts*( 1.728_wp - 0.0996_wp*zsqrts)))) / t_k & + 148.0248_wp + zsqrts*(137.1942_wp + zsqrts*1.62142_wp) & + (-24.4344_wp + zsqrts*(-25.085_wp - zsqrts*0.2474_wp)) * LOG(t_k) & + 0.053105_wp*zsqrts*t_k ! Pressure correction zt_degc = t_k - t_k_zerodegc zds = s - 34.8_wp zrt = gasconst_bar_cm3_o_mol_k * t_k zdvi = -29.48_wp + 0.295_wp*zds + 0.1622_wp*zt_degc - 0.002608_wp*zt_degc*zt_degc zdki = (-2.84_wp + 0.354_wp*zds)*1.0E-03_wp zln_kb_pp = (-zdvi + zdki*p_bar/2._wp)*p_bar/zrt ! Final K_B value AK_BORA_DICK90 = EXP( zln_kb_p0 + zln_kb_pp ) !======================================================================= END FUNCTION AK_BORA_DICK90 !======================================================================= !======================================================================= FUNCTION AK_W_MILL95(t_k, s, p_bar) !======================================================================= ! Function calculates water dissociation constant Kw in (mol/kg-SW)^2 ! References: Millero (1995) for value at p_bar = 0 ! Millero (pers. comm. 1996) for pressure correction ! pH scale : SWS IMPLICIT NONE REAL(KIND=wp) :: AK_W_MILL95 ! ------------------ ! Argument variables ! ------------------ ! t_k : temperature in K ! s : salinity ! p_bar : applied pressure in bar REAL(KIND=wp), INTENT(IN) :: t_k, s, p_bar ! --------------- ! Local variables ! --------------- ! zrt : R*t_k ! zt_degc : temperature in degrees Celsius ! zdvi : volume change for ionization ! zdki : compressibility change for ionization ! zln_kw_p0 : ln(K_w) at p_bar = 0 ! zln_kw_pp : pressure correction for p_bar /= 0 REAL(KIND=wp) :: zrt, zt_degc, zdvi, zdki, zds, zsqrts REAL(KIND=wp) :: zln_kw_p0, zln_kw_pp ! ln(K_w) value at p_bar = 0 zln_kw_p0 = 148.9802_wp & - 13847.26_wp/t_k & - 23.6521_wp*LOG(t_k) & + ( -5.977_wp + 118.67_wp/t_k + 1.0495_wp*LOG(t_k))*SQRT(s) & - 0.01615_wp*s ! Pressure correction zt_degc = t_k - t_k_zerodegc zrt = gasconst_bar_cm3_o_mol_k * t_k zdvi = -20.02_wp + 0.1119_wp*zt_degc - 0.1409E-02_wp*zt_degc*zt_degc zdki = ( -5.13_wp + 0.0794_wp*zt_degc)*1.0E-03_wp zln_kw_pp = (-zdvi + zdki*p_bar/2._wp)*p_bar/zrt ! Final K_w value AK_W_MILL95 = EXP( zln_kw_p0 + zln_kw_pp ) RETURN !======================================================================= END FUNCTION AK_W_MILL95 !======================================================================= !======================================================================= FUNCTION AK_PHOS_1_MILL95(t_k, s, p_bar) !======================================================================= ! Function returns the first dissociation constant ! of phosphoric acid (H3PO4) in seawater ! References: Yao and Millero (1995) ! Millero (1995) for pressure correction ! pH scale : SWS IMPLICIT NONE REAL(KIND=wp) :: AK_PHOS_1_MILL95 ! ------------------ ! Argument variables ! ------------------ ! t_k : temperature in K ! s : salinity ! p_bar : applied pressure in bar REAL(KIND=wp), INTENT(IN) :: t_k, s, p_bar ! --------------- ! Local variables ! --------------- ! zrt : R*t_k, R in bar*cm3/(mol*K) ! zt_degc : temperature in degrees Celsius ! zdvi : volume change for ionization ! zdki : compressibility change for ionization ! zln_kp1_p0 : ln(K_p1) at p_bar = 0 ! zln_kp1_pp : pressure correction for p_bar /= 0 REAL(KIND=wp) :: zrt, zt_degc, zdvi, zdki REAL(KIND=wp) :: zln_kp1_p0, zln_kp1_pp ! ln(K_P1) for p_bar = 0 zln_kp1_p0 = 115.54_wp - 4576.752_wp/t_k - 18.453_wp*LOG(t_k) & + ( 0.69171_wp - 106.736_wp/t_k)* SQRT(s) & + (-0.01844_wp - 0.65643_wp/t_k)*s ! Pressure correction zt_degc = t_k - t_k_zerodegc zrt = gasconst_bar_cm3_o_mol_k * t_k zdvi = -14.51_wp + 0.1211_wp*zt_degc - 0.321E-03*zt_degc*zt_degc zdki = ( -2.67_wp + 0.0427_wp*zt_degc)*1.0E-03_wp zln_kp1_pp = (-zdvi + zdki*p_bar/2._wp)*p_bar/zrt ! Final value of K_P1 AK_PHOS_1_MILL95 = EXP(zln_kp1_p0 + zln_kp1_pp) RETURN !======================================================================= END FUNCTION AK_PHOS_1_MILL95 !======================================================================= !======================================================================= FUNCTION AK_PHOS_2_MILL95(t_k, s, p_bar) !======================================================================= ! Function returns the second dissociation constant ! of phosphoric acid (H3PO4) in seawater ! References: Yao and Millero (1995) ! Millero (1995) for pressure correction ! pH scale : SWS IMPLICIT NONE REAL(KIND=wp) :: AK_PHOS_2_MILL95 ! ------------------ ! Argument variables ! ------------------ ! t_k : temperature in K ! s : salinity ! p_bar : applied pressure in bar REAL(KIND=wp), INTENT(IN) :: t_k, s, p_bar ! --------------- ! Local variables ! --------------- ! zrt : R*t_k, R in bar*cm3/(mol*K) ! zt_degc : temperature in degrees Celsius ! zdvi : volume change for ionization ! zdki : compressibility change for ionization ! zln_kp2_p0 : ln(K_P2) at p_bar = 0 ! zln_kp2_pp : pressure correction for p_bar /= 0 REAL(KIND=wp) :: zrt, zt_degc, zdvi, zdki REAL(KIND=wp) :: zln_kp2_p0, zln_kp2_pp ! ln(K_P2) for p_bar = 0 zln_kp2_p0 = 172.1033_wp & - 8814.715_wp/t_k & - 27.927_wp*LOG(t_k) & + ( 1.3566_wp - 160.340_wp/t_k)*SQRT(s) & + (-0.05778_wp + 0.37335_wp/t_k)*s ! Pressure correction zt_degc = t_k - t_k_zerodegc zrt = gasconst_bar_cm3_o_mol_k * t_k zdvi = -23.12_wp + 0.1758_wp*zt_degc -2.647E-03_wp*zt_degc*zt_degc zdki = ( -5.15_wp + 0.09_wp*zt_degc)*1.0E-03_wp zln_kp2_pp = (-zdvi + zdki*p_bar/2._wp)*p_bar/zrt ! Final K_P2 value AK_PHOS_2_MILL95 = EXP( zln_kp2_p0 + zln_kp2_pp ) RETURN !======================================================================= END FUNCTION AK_PHOS_2_MILL95 !======================================================================= !======================================================================= FUNCTION AK_PHOS_3_MILL95(t_k, s, p_bar) !======================================================================= ! Function returns the third dissociation constant ! of phosphoric acid (H3PO4) in seawater ! References: Yao and Millero (1995) ! Millero (1995) for pressure correction ! pH scale : SWS IMPLICIT NONE REAL(KIND=wp) :: AK_PHOS_3_MILL95 ! ------------------ ! Argument variables ! ------------------ ! t_k : temperature in K ! s : salinity ! p_bar : applied pressure in bar REAL(KIND=wp), INTENT(IN) :: t_k, s, p_bar ! --------------- ! Local variables ! --------------- ! zrt : R*t_k, R in bar*cm3/(mol*K) ! zt_degc : temperature in degrees Celsius ! zdvi : volume change for ionization ! zdki : compressibility change for ionization ! zln_kp3_p0 : ln(K_P3) at p_bar = 0 ! zln_kp3_pp : pressure correction for p_bar /= 0 REAL(KIND=wp) :: zrt, zt_degc, zdvi, zdki REAL(KIND=wp) :: zln_kp3_p0, zln_kp3_pp ! ln(K_P3) for p_bar = 0 zln_kp3_p0 = -18.126_wp - 3070.75_wp/t_k & + ( 2.81197_wp + 17.27039_wp/t_k)*SQRT(s) & + (-0.09984_wp - 44.99486_wp/t_k)*s ! Pressure correction zt_degc = t_k - t_k_zerodegc zrt = gasconst_bar_cm3_o_mol_k * t_k zdvi = -26.57_wp + 0.2020_wp*zt_degc -3.042E-03*zt_degc*zt_degc zdki = ( -4.08_wp + 0.0714_wp*zt_degc)*1.0E-03_wp zln_kp3_pp = (-zdvi + zdki*p_bar/2._wp)*p_bar/zrt ! Final K_P3 value AK_PHOS_3_MILL95 = EXP( zln_kp3_p0 + zln_kp3_pp ) RETURN !======================================================================= END FUNCTION AK_PHOS_3_MILL95 !======================================================================= !======================================================================= FUNCTION AK_SILI_1_MILL95(t_k, s) !======================================================================= ! Function returns the first dissociation constant ! of silicic acid (H4SiO4) in seawater ! References: Yao and Millero (1995) cited by Millero (1995) ! pH scale : SWS (according to Dickson et al, 2007) ! Note : No pressure correction available ! Note : converted here from mol/kg-H2O to mol/kg-sw IMPLICIT NONE REAL(KIND=wp) :: AK_SILI_1_MILL95 ! ------------------ ! Argument variables ! ------------------ ! t_k : temperature in K ! s : salinity REAL(KIND=wp), INTENT(IN) :: t_k, s ! --------------- ! Local variables ! --------------- ! zcvt_to_kgsw: fraction of pure water in 1 kg seawater at salinity s ! zionst : ionic strength [mol/kg-H2O] ! zln_ksi1_p0 : ln(K_Si1) at p_bar = 0 ! zln_ksi1_pp : pressure correciotn for p_bar /= 0 REAL(KIND=wp) :: zionst, zcvt_to_kgsw REAL(KIND=wp) :: zln_ksi1_p0, zln_ksi1_pp ! K_Si1 value at p_bar = 0 zcvt_to_kgsw = ACVT_KGH2O_O_KGSW(s) zionst = A_IONSTRENGTH_SALIN(s)/zcvt_to_kgsw ! mol/kg-H2O !! zln_ksi1_p0 = 117.40_wp - 8904.2_wp/t_k - 19.334_wp * LOG(t_k) & + ( 3.5913_wp - 458.79_wp/t_k) * SQRT(zionst) & + (-1.5998_wp + 188.74_wp/t_k) * zionst & + (0.07871_wp - 12.1652_wp/t_k) * zionst*zionst ! Pressure correction : currently none zln_ksi1_pp = 0._wp ! Final value AK_SILI_1_MILL95 = EXP( zln_ksi1_p0 + zln_ksi1_pp ) * zcvt_to_kgsw RETURN !======================================================================= END FUNCTION AK_SILI_1_MILL95 !======================================================================= !======================================================================= FUNCTION AK_H2S_1_MILL95(t_k, s, p_bar) !======================================================================= ! Function returns the dissociation constant of hydrogen sulfide in sea-water ! References: Millero et al. (1988) (cited by Millero (1995) ! Millero (1995) for pressure correction ! pH scale : - SWS (according to Yao and Millero, 1995, p. 82: "refitted if necessary") ! - Total (according to Lewis and Wallace, 1998) ! Note : we stick to SWS here for the time being ! Note : the fits from Millero (1995) and Yao and Millero (1995) ! derive from Millero et al. (1998), with all the coefficients ! multiplied by -ln(10) IMPLICIT NONE REAL(KIND=wp) :: AK_H2S_1_MILL95 ! ------------------ ! Argument variables ! ------------------ ! t_k : temperature in K ! s : salinity ! p_bar : applied pressure in bar REAL(KIND=wp), INTENT(IN) :: t_k, s, p_bar ! --------------- ! Local variables ! --------------- ! zt_degc : temperature in degrees Celsius ! zrt : R*t_k, R in bar*cm3/(mol*K) ! zdvi : volume change for ionization ! zdki : compressibility change for ionization ! zln_kh2s_p0 : ln(K_H2S) at p_bar = 0 ! zln_kh2s_pp : pressure correction for p_bar /= 0 REAL(KIND=wp) :: zrt, zt_degc, zdvi, zdki REAL(KIND=wp) :: zln_kh2s_p0, zln_kh2s_pp ! K_H2S value at p_bar = 0 ! ------------------------ zln_kh2s_p0 = 225.838_wp & - 13275.3_wp/t_k & - 34.6435_wp * LOG(t_k) & + 0.3449_wp*SQRT(s) & - 0.0274_wp*s ! Pressure correction ! ------------------- zt_degc = t_k - t_k_zerodegc zrt = gasconst_bar_cm3_o_mol_k * t_k zdvi = -14.80_wp + zt_degc*(0.0020_wp - zt_degc*0.400E-03_wp) zdki = ( 2.89_wp + zt_degc*0.054_wp)*1.0E-03_wp zln_kh2s_pp = (-zdvi + zdki*p_bar/2._wp)*p_bar/zrt ! Final K_H2S value ! ----------------- AK_H2S_1_MILL95 = EXP( zln_kh2s_p0 + zln_kh2s_pp ) RETURN !======================================================================= END FUNCTION AK_H2S_1_MILL95 !======================================================================= !======================================================================= FUNCTION AK_AMMO_1_YAMI95(t_k, s, p_bar) !======================================================================= ! Function returns the dissociation constant ! of ammonium in sea-water [mol/kg-SW] ! References: Yao and Millero (1995) ! Millero (1995) for pressure correction ! pH scale : SWS IMPLICIT NONE REAL(KIND=wp) :: AK_AMMO_1_YAMI95 ! ------------------ ! Argument variables ! ------------------ ! t_k : temperature in K ! s : salinity ! p_bar : applied pressure in bar REAL(KIND=wp), INTENT(IN) :: t_k, s, p_bar ! --------------- ! Local variables ! --------------- ! zt_degc : temperature in degrees Celsius ! zrt : R*t_k, R in bar*cm3/(mol*K) ! zdvi : volume change for ionization ! zdki : compressibility change for ionization ! zln_knh4_p0 : ln(K_NH4) at p_bar = 0 ! zln_knh4_pp : pressure correction for p_bar /= 0 REAL(KIND=wp) :: zrt, zt_degc, zdvi, zdki REAL(KIND=wp) :: zln_knh4_p0, zln_knh4_pp ! K_NH4 value at p_bar = 0 ! ------------------------ zln_knh4_p0 = -0.25444_wp - 6285.33_wp/t_k + 0.0001635_wp*t_k & + ( 0.46532_wp - 123.7184_wp/t_k) * SQRT(s) & + (-0.01992_wp + 3.17556_wp/t_k) * s ! Pressure correction ! ------------------- zt_degc = t_k - t_k_zerodegc zrt = gasconst_bar_cm3_o_mol_k * t_k zdvi = -26.43_wp + zt_degc*(0.0889_wp - zt_degc*0.905E-03_wp) zdki = ( -5.03_wp + zt_degc*0.0814_wp)*1.0E-03_wp zln_knh4_pp = (-zdvi + zdki*p_bar/2._wp)*p_bar/zrt ! Final K_NH4 value ! ----------------- AK_AMMO_1_YAMI95 = EXP( zln_knh4_p0 + zln_knh4_pp ) RETURN !======================================================================= END FUNCTION AK_AMMO_1_YAMI95 !======================================================================= !======================================================================= FUNCTION ACVT_KGH2O_O_KGSW(s) !======================================================================= ! Function returns the mass of pure water in one kg of seawater ! of salinity s ! References: "libthdyct" -- derived by Munhoven (1997) from data by Millero (1982) ! "Handbook (2007)" -- Handbook (2007) ! pH scale: N/A IMPLICIT NONE REAL(KIND=wp) :: ACVT_KGH2O_O_KGSW REAL(KIND=wp), INTENT(IN) :: s !ACVT_KGH2O_O_KGSW = 1._wp - 0.0010049_wp*s ! libthdyct ACVT_KGH2O_O_KGSW = 1._wp - 0.001005_wp*s ! Handbook (2007) RETURN !======================================================================= END FUNCTION ACVT_KGH2O_O_KGSW !======================================================================= !======================================================================= FUNCTION A_IONSTRENGTH_SALIN(s) !======================================================================= ! Function calculates ionic strength in mol/kg-SW, for given salinity. ! References: "libthdyct" -- derived by Munhoven (1997) from data by Millero (1982) ! "Handbook (2007)" -- Handbook (2007) ! pH scale: N/A IMPLICIT NONE REAL(KIND=wp) :: A_IONSTRENGTH_SALIN ! ------------------ ! Argument variables ! ------------------ REAL(KIND=wp), INTENT(IN) :: s !A_IONSTRENGTH_SALIN = (0.019920D+00*s) ! libthdyct A_IONSTRENGTH_SALIN = (0.019924D+00*s) ! Handbook (2007) RETURN !======================================================================= END FUNCTION A_IONSTRENGTH_SALIN !======================================================================= !======================================================================= FUNCTION ABETA_HF_DIRI79(t_k, s, p_bar) !======================================================================= ! Function calculates association constant \beta_{HF} [(mol/kg-SW)^{-1}] ! in (mol/kg-SW)^{-1}, where ! \beta_{HF} = \frac{ [HF] }{ [H^{+}] [F^{-}] } ! References: Dickson and Riley (1979) ! Millero (1995) for pressure correction ! pH scale : free ! Note : converted here from mol/kg-H2O to mol/kg-SW IMPLICIT NONE REAL(KIND=wp) :: ABETA_HF_DIRI79 ! ------------------ ! Argument variables ! ------------------ ! t_k : temperature in K ! s : salinity ! p_bar : applied pressure in bar REAL(KIND=wp), INTENT(IN) :: t_k, s, p_bar ! --------------- ! Local variables ! --------------- ! zrt : R*t_k, R in bar*cm3/(mol*K) ! zt_degc : temperature in degrees Celsius ! zdvi : volume change for ionization ! zdki : compressibility change for ionization ! zionst : ionic strength [mol/kg-H2O] ! zcvt_to_kgsw : mass of pure water in 1kg of seawater as a fct. of salinity ! zln_bhf_p0 : \beta_HF at p_bar = 0 ! zln_khf_pp : pressure correction for k_HF = 1/\beta_HF at p_bar /= 0 REAL(KIND=wp) :: zrt, zt_degc, zdvi, zdki, zds, zsqrts REAL(KIND=wp) :: zionst, zcvt_to_kgsw REAL(KIND=wp) :: zln_bhf_p0, zln_khf_pp ! \beta_HF at p_bar = 0 ! --------------------- zcvt_to_kgsw = ACVT_KGH2O_O_KGSW(s) zionst = A_IONSTRENGTH_SALIN(s)/zcvt_to_kgsw zln_bhf_p0 = -1590.2_wp/t_k + 12.641_wp - 1.525_wp*SQRT(zionst) ! Pressure correction ! ------------------- zt_degc = t_k - t_k_zerodegc zrt = gasconst_bar_cm3_o_mol_k * t_k zdvi = -9.78_wp + zt_degc*(-0.0090_wp - zt_degc*0.942E-03_wp) zdki = ( -3.91_wp + zt_degc*0.054_wp)*1.0E-03_wp zln_khf_pp = (-zdvi + zdki*p_bar/2._wp)*p_bar/zrt ! Final \beta_HF value ! -------------------- ! notice that ln(k_HF(P)) = ln(k_HF(0)) + zln_khf_pp ! <=> -ln(\beta_HF(P)) = -ln(\beta_HF(0)) + zln_khf_pp ! <=> ln(\beta_HF(P)) = ln(\beta_HF(0)) - zln_khf_pp ABETA_HF_DIRI79 = EXP(zln_bhf_p0 - zln_khf_pp ) / zcvt_to_kgsw RETURN !======================================================================= END FUNCTION ABETA_HF_DIRI79 !======================================================================= !======================================================================= FUNCTION AK_HF_PEFR87(t_k, s, p_bar) !======================================================================= ! Function calculates dissociation constant for hydrogen fluoride ! in mol/kg-SW ! References: Perez and Fraga (1987) ! Millero (1995) for pressure correction ! pH scale : Total (according to Handbook, 2007) IMPLICIT NONE REAL(KIND=wp) :: AK_HF_PEFR87 ! ------------------ ! Argument variables ! ------------------ ! t_k : temperature in K ! s : salinity ! p_bar : applied pressure in bar REAL(KIND=wp), INTENT(IN) :: t_k, s, p_bar ! --------------- ! Local variables ! --------------- ! zrt : R*t_k, R in bar*cm3/(mol*K) ! zt_degc : temperature in degrees Celsius ! zdvi : volume change for ionization ! zdki : compressibility change for ionization ! zln_khf_p0 : ln(K_HF) at p_bar = 0 ! zln_khf_pp : pressure correction for p_bar /= 0 REAL(KIND=wp) :: zrt, zt_degc, zdvi, zdki, zds, zsqrts REAL(KIND=wp) :: zln_khf_p0, zln_khf_pp ! ln(K_HF) at p_bar = 0 zln_khf_p0 = 874._wp/t_k - 9.68_wp + 0.111_wp*SQRT(s) ! Pressure correction zt_degc = t_k - t_k_zerodegc zrt = gasconst_bar_cm3_o_mol_k * t_k zdvi = -9.78_wp + zt_degc*(-0.0090_wp - zt_degc*0.942E-03_wp) zdki = ( -3.91_wp + zt_degc*0.054_wp)*1.0E-03_wp zln_khf_pp = (-zdvi + zdki*p_bar/2._wp)*p_bar/zrt ! Final value of K_HF AK_HF_PEFR87 = EXP( zln_khf_p0 + zln_khf_pp ) RETURN !======================================================================= END FUNCTION AK_HF_PEFR87 !======================================================================= !======================================================================= FUNCTION AK_HSO4_DICK90(t_k, s, p_bar) !======================================================================= ! Function returns the dissociation constant of hydrogen sulfate (bisulfate) ! References: Dickson (1990) -- also Handbook (2007) ! Millero (1995) for pressure correction ! pH scale : free ! Note : converted here from mol/kg-H2O to mol/kg-SW IMPLICIT NONE REAL(KIND=wp) :: AK_HSO4_DICK90 ! ------------------ ! Argument variables ! ------------------ ! t_k : temperature in K ! s : salinity ! p_bar : applied pressure in bar REAL(KIND=wp), INTENT(IN) :: t_k, s, p_bar ! --------------- ! Local variables ! --------------- ! zrt : R*t_k, R in bar*cm3/(mol*K) ! zt_degc : temperature in degrees Celsius ! zdvi : volume change for ionization ! zdki : compressibility change for ionization ! zionst : ionic strength in mol/-kg-H2O ! zsqrti : square root og ion strength ! zcvt_to_kgsw : mass of pure water in 1kg of seawater as a fct. of salinity ! zln_khso4_p0 : K_HSO4 at p_bar = 0 ! zln_khso4_pp : pressure correction for p_bar /= 0 REAL(KIND=wp) :: zrt, zt_degc, zdvi, zdki REAL(KIND=wp) :: zcvt_to_kgsw, zionst, zsqrti REAL(KIND=wp) :: zln_khso4_p0, zln_khso4_pp ! ln(K_HSO4) at p_bar = 0 zcvt_to_kgsw = ACVT_KGH2O_O_KGSW(s) zionst = A_IONSTRENGTH_SALIN(s)/zcvt_to_kgsw zsqrti = SQRT(zionst) zln_khso4_p0 = -4276.1_wp/t_k + 141.328_wp - 23.093_wp*LOG(t_k) & + (-13856._wp/t_k + 324.57_wp - 47.986_wp*LOG(t_k)) * zsqrti & + ( 35474._wp/t_k - 771.54_wp + 114.723_wp*LOG(t_k)) * zionst & - ( 2698._wp/t_k)*zsqrti * zionst & + ( 1776._wp/t_k)*zionst*zionst ! Pressure correction zt_degc = t_k - t_k_zerodegc zrt = gasconst_bar_cm3_o_mol_k * t_k zdvi = -18.03_wp + zt_degc*(0.0466_wp + zt_degc*0.316E-03_wp) zdki = ( -4.53_wp + zt_degc*0.0900_wp)*1.0E-03_wp zln_khso4_pp = (-zdvi + zdki*p_bar/2._wp)*p_bar/zrt ! ln(K_HSO4) at p_bar = 0 AK_HSO4_DICK90 = zcvt_to_kgsw * EXP( zln_khso4_p0 + zln_khso4_pp ) RETURN !======================================================================= END FUNCTION AK_HSO4_DICK90 !======================================================================= !======================================================================= FUNCTION ASP_CALC_MUCC83(t_k, s, p_bar) !======================================================================= ! Function returns stoechiometric solubility product ! of calcite in seawater ! References: Mucci (1983) ! Millero (1995) for pressure correction ! pH scale : N/A ! Units : (mol/kg-SW)^2 IMPLICIT NONE REAL(KIND=wp) :: ASP_CALC_MUCC83 ! ------------------ ! Argument variables ! ------------------ ! t_k : temperature in K ! s : salinity ! p_bar : applied pressure in bar REAL(KIND=wp), INTENT(IN) :: t_k, s, p_bar ! --------------- ! Local variables ! --------------- ! zrt : R*t_k, R in bar*cm3/(mol*K) ! zsqrts : square root of salinity ! zt_degc : temperature in degrees Celsius ! zdvi : volume change for ionization ! zdki : compressibility change for ionization ! zln_kp1_p0 : ln(K_p1) at p_bar = 0 ! zln_kp1_pp : pressure correction for p_bar /= 0 REAL(KIND=wp) :: zrt, zsqrts, zt_degc, zdvi, zdki REAL(KIND=wp) :: zlog10_kspcalc_p0, zln_kspcalc_pp zsqrts = SQRT(s) ! log10(Ksp_Calc) for p_bar = 0 zlog10_kspcalc_p0 = & -171.9065_wp - 0.077993_wp*t_k & + 2839.319_wp/t_k + 71.595_wp*LOG10(t_k) & + ( -0.77712_wp + 0.0028426*t_k + 178.34_wp/t_k)*zsqrts & - 0.07711_wp*s + 0.0041249_wp*s*zsqrts ! Pressure correction zt_degc = t_k - t_k_zerodegc zrt = gasconst_bar_cm3_o_mol_k * t_k zdvi = -48.76_wp + 0.5304_wp*zt_degc zdki = (-11.76_wp + 0.3692_wp*zt_degc)*1.0E-03_wp zln_kspcalc_pp = (-zdvi + zdki*p_bar/2._wp)*p_bar/zrt ! Final value of Ksp_Calc ASP_CALC_MUCC83 = 10._wp**(zlog10_kspcalc_p0) * EXP(zln_kspcalc_pp) RETURN !======================================================================= END FUNCTION ASP_CALC_MUCC83 !======================================================================= !======================================================================= FUNCTION ASP_ARAG_MUCC83(t_k, s, p_bar) !======================================================================= ! Function returns stoechiometric solubility product ! of aragonite in seawater ! References: Mucci (1983) ! Millero (1979) for pressure correction ! pH scale : N/A ! Units : (mol/kg-SW)^2 IMPLICIT NONE REAL(KIND=wp) :: ASP_ARAG_MUCC83 ! ------------------ ! Argument variables ! ------------------ ! t_k : temperature in K ! s : salinity ! p_bar : applied pressure in bar REAL(KIND=wp), INTENT(IN) :: t_k, s, p_bar ! --------------- ! Local variables ! --------------- ! zrt : R*t_k, R in bar*cm3/(mol*K) ! zsqrts : square root of salinity ! zt_degc : temperature in degrees Celsius ! zdvi : volume change for ionization ! zdki : compressibility change for ionization ! zln_kp1_p0 : ln(K_p1) at p_bar = 0 ! zln_kp1_pp : pressure correction for p_bar /= 0 REAL(KIND=wp) :: zrt, zsqrts, zt_degc, zdvi, zdki REAL(KIND=wp) :: zlog10_ksparag_p0, zln_ksparag_pp zsqrts = SQRT(s) ! log10(Ksp_Arag) for p_bar = 0 zlog10_ksparag_p0 = & -171.945_wp - 0.077993_wp*t_k & + 2903.293_wp/t_k + 71.595_wp*LOG10(t_k) & + ( -0.068393_wp + 0.0017276_wp*t_k + 88.135_wp/t_k)*zsqrts & - 0.10018_wp*s + 0.0059415_wp*s*zsqrts ! Pressure correction zt_degc = t_k - t_k_zerodegc zrt = gasconst_bar_cm3_o_mol_k * t_k zdvi = -48.76_wp + 0.5304_wp*zt_degc + 2.8_wp zdki = (-11.76_wp + 0.3692_wp*zt_degc)*1.0E-03_wp zln_ksparag_pp = (-zdvi + zdki*p_bar/2._wp)*p_bar/zrt ! Final value of Ksp_Arag ASP_ARAG_MUCC83 = 10._wp**(zlog10_ksparag_p0) * EXP(zln_ksparag_pp) RETURN !======================================================================= END FUNCTION ASP_ARAG_MUCC83 !======================================================================= !======================================================================= FUNCTION A_BTOT_SALIN(s) !======================================================================= ! Function returns total borate concentration in mol/kg-SW ! given the salinity of a sample ! References: Uppström (1974), cited by Dickson et al. (2007, chapter 5, p 10) ! Millero (1982) cited in Millero (1995) ! pH scale : N/A IMPLICIT NONE REAL(KIND=wp) :: A_BTOT_SALIN ! ------------------ ! Argument variables ! ------------------ REAL(KIND=wp), INTENT(IN) :: s A_BTOT_SALIN = 0.000416_wp*(s/35._wp) RETURN !======================================================================= END FUNCTION A_BTOT_SALIN !======================================================================= !======================================================================= FUNCTION A_CATOT_SALIN(s) !======================================================================= ! Function returns total calcium concentration in mol/kg-SW ! given the salinity of a sample ! References: Culkin (1965) ! pH scale : N/A IMPLICIT NONE REAL(KIND=wp) :: A_CATOT_SALIN ! ------------------ ! Argument variables ! ------------------ REAL(KIND=wp), INTENT(IN) :: s A_CATOT_SALIN = 0.010282_wp*(s/35._wp) RETURN !======================================================================= END FUNCTION A_CATOT_SALIN !======================================================================= !======================================================================= FUNCTION A_FTOT_SALIN(s) !======================================================================= ! Function returns total calcium concentration in mol/kg-SW ! given the salinity of a sample ! References: Culkin (1965) (???) ! pH scale : N/A IMPLICIT NONE REAL(KIND=wp) :: A_FTOT_SALIN ! ------------------ ! Argument variables ! ------------------ REAL(KIND=wp), INTENT(IN) :: s A_FTOT_SALIN = 0.000068_wp*(s/35._wp) RETURN !======================================================================= END FUNCTION A_FTOT_SALIN !======================================================================= !======================================================================= FUNCTION A_SO4TOT_SALIN(s) !======================================================================= ! Function returns total sulfate concentration in mol/kg-SW ! given the salinity of a sample ! References: Morris, A.W. and Riley, J.P. (1966) quoted in Handbook (2007) ! pH scale : N/A IMPLICIT NONE REAL(KIND=wp) :: A_SO4TOT_SALIN ! ------------------ ! Argument variables ! ------------------ REAL(KIND=wp), INTENT(IN) :: s !A_SO4TOT_SALIN = 0.028234_wp*(s/35._wp) ! in libthdyct and Thesis !A_SO4TOT_SALIN = 0.02824_wp*(s/35._wp) ! Handbook (2007, chap 6, p 10, tab 2, col 3) A_SO4TOT_SALIN = (0.1400_wp/96.062_wp)*(s/1.80655_wp) ! Handbook (2007, chap 6, p 10) RETURN !======================================================================= END FUNCTION A_SO4TOT_SALIN !======================================================================= !======================================================================= FUNCTION ACVT_HSWS_O_HTOT(t_k, s, p_bar) !======================================================================= ! Function returns the ratio H_SWS/H_Tot as a function of salinity s ! Reference: Munhoven ! pH scale: all IMPLICIT NONE REAL(KIND=wp) :: ACVT_HSWS_O_HTOT ! ------------------ ! Argument variables ! ------------------ ! t_k : temperature in K ! s : salinity ! p_bar : applied pressure in bar REAL(KIND=wp), INTENT(IN) :: t_k REAL(KIND=wp), INTENT(IN) :: s REAL(KIND=wp), INTENT(IN) :: p_bar ! --------------- ! Local variables ! --------------- ! zso4_tot: total sulfate concentration in mol/kg-SW ! zf_tot : total fluoride concentration in mol/kg-SW REAL(KIND=wp) :: zso4_tot, zf_tot !----------------------------------------------------------------------- zso4_tot = A_SO4TOT_SALIN(s) zf_tot = A_FTOT_SALIN(s) ACVT_HSWS_O_HTOT = 1._wp + (zf_tot*ABETA_HF_DIRI79(t_k, s, p_bar)) & /(1._wp + zso4_tot/AK_HSO4_DICK90(t_k,s, p_bar)) RETURN !======================================================================= END FUNCTION ACVT_HSWS_O_HTOT !======================================================================= !======================================================================= FUNCTION ACVT_HTOT_O_HFREE(t_k, s, p_bar) !======================================================================= ! Function returns the ratio H_Tot/H_free as a function of salinity s ! Reference: Munhoven ! pH scale: N/A IMPLICIT NONE REAL(KIND=wp) :: ACVT_HTOT_O_HFREE ! ------------------ ! Argument variables ! ------------------ ! t_k : temperature in K ! s : salinity ! p_bar : applied pressure in bar REAL(KIND=wp), INTENT(IN) :: t_k REAL(KIND=wp), INTENT(IN) :: s REAL(KIND=wp), INTENT(IN) :: p_bar ! --------------- ! Local variables ! --------------- ! zso4_tot: total sulfate concentration in mol/kg-SW REAL(KIND=wp) :: zso4_tot !----------------------------------------------------------------------- zso4_tot = A_SO4TOT_SALIN(s) ACVT_HTOT_O_HFREE = 1._wp + zso4_tot/AK_HSO4_DICK90(t_k,s, p_bar) RETURN !======================================================================= END FUNCTION ACVT_HTOT_O_HFREE !======================================================================= !======================================================================= FUNCTION ACVT_HSWS_O_HFREE(t_k, s, p_bar) !======================================================================= ! Function returns the ratio H_SWS/H_free as a function ! of salinity s ! Reference: Munhoven ! pH scale: N/A IMPLICIT NONE REAL(KIND=wp) :: ACVT_HSWS_O_HFREE ! ------------------ ! Argument variables ! ------------------ ! t_k : temperature in K ! s : salinity ! p_bar : applied pressure in bar REAL(KIND=wp), INTENT(IN) :: t_k REAL(KIND=wp), INTENT(IN) :: s REAL(KIND=wp), INTENT(IN) :: p_bar ! --------------- ! Local variables ! --------------- ! zso4_tot: total sulfate concentration in mol/kg-SW ! zf_tot : total fluoride concentration in mol/kg-SW REAL(KIND=wp) :: zso4_tot, zf_tot !----------------------------------------------------------------------- zso4_tot = A_SO4TOT_SALIN(s) zf_tot = A_FTOT_SALIN(s) ACVT_HSWS_O_HFREE = 1._wp + zf_tot*ABETA_HF_DIRI79(t_k, s, p_bar) & + zso4_tot/AK_HSO4_DICK90(t_k,s, p_bar) RETURN !======================================================================= END FUNCTION ACVT_HSWS_O_HFREE !======================================================================= !======================================================================= FUNCTION A_RHOSW1_MUNH97(t_k, s, p_bar) !======================================================================= ! Function returns first order approximation of \rho in (kg-SW)/(m^3-SW) ! References: Munhoven (1997) ! after EOS80 (UNESCO, 1981, 1983) IMPLICIT NONE REAL(KIND=wp) :: A_RHOSW1_MUNH97 ! ------------------ ! Argument variables ! ------------------ ! s : salinity ! tk : temperature in K ! p_bar : depth in m REAL(KIND=wp), INTENT(IN) :: t_k, s, p_bar ! --------------- ! Local variables ! --------------- ! s0 : 35.5 ! t_k0 : 285.16 K ! p_bar0 : 300 bar REAL(KIND=wp), PARAMETER :: s0 = 35.5_wp REAL(KIND=wp), PARAMETER :: t_k0 = 285.16_wp REAL(KIND=wp), PARAMETER :: p_bar0 = 300.0_wp A_RHOSW1_MUNH97 = 1039.9044_wp + 0.77629393_wp*(s-s0) & - 0.19692738_wp*(t_k-t_k0) & + 0.044038615_wp*(p_bar-p_bar0) RETURN !======================================================================= END FUNCTION A_RHOSW1_MUNH97 !======================================================================= !======================================================================= FUNCTION A_RHOSW2_MUNH97(t_k, s, p_bar) !======================================================================= ! Function returns first order approximation of \rho in (kg-SW)/(m^3-SW) ! References: Munhoven (1997) ! after EOS80 (UNESCO, 1981, 1983) IMPLICIT NONE REAL(KIND=wp) :: A_RHOSW2_MUNH97 ! ------------------ ! Argument variables ! ------------------ ! s : salinity ! tk : temperature in K ! p_bar : depth in m REAL(KIND=wp), INTENT(IN) :: t_k, s, p_bar ! --------------- ! Local variables ! --------------- ! s0 : 35.5 ! t_k0 : 285.16 K ! p_bar0 : 300 bar REAL(KIND=wp), PARAMETER :: s0 = 35.5_wp REAL(KIND=wp), PARAMETER :: t_k0 = 285.16_wp REAL(KIND=wp), PARAMETER :: p_bar0 = 300.0_wp A_RHOSW2_MUNH97 = 1040.0145_wp & + 0.77629393_wp*(s-s0) & - 0.25013591_wp*(t_k-t_k0) & + 4.2026266E-02_wp*(p_bar-p_bar0) & - 4.7473116E-03_wp*(t_k-t_k0)*(t_k-t_k0) & - 4.7974224E-06_wp*(p_bar-p_bar0)*(p_bar-p_bar0) & - 2.1404592E-04_wp*(t_k-t_k0)*(p_bar-p_bar0) RETURN !======================================================================= END FUNCTION A_RHOSW2_MUNH97 !======================================================================= !======================================================================= SUBROUTINE CHECKCONSTANTS !======================================================================= IMPLICIT NONE ! ------------------ ! Argument variables ! ------------------ ! N/A ! --------------- ! Local variables ! --------------- ! s : salinity ! tk : temperature in K ! p_bar : applied pressure in bar REAL(KIND=wp) :: t_k, s, p_bar REAL(KIND=wp) :: zkc0, zkc1, zkc2 REAL(KIND=wp) :: zkb REAL(KIND=wp) :: zkhf REAL(KIND=wp) :: zkhso4 REAL(KIND=wp) :: zkp1, zkp2, zkp3 REAL(KIND=wp) :: zksi1 REAL(KIND=wp) :: zkw REAL(KIND=wp) :: zknh4 REAL(KIND=wp) :: zkh2s INTEGER, PARAMETER :: logunit = 1 OPEN(logunit,FILE='checkconst.log') WRITE(logunit,*) 'Checking constant values generated from MOD_CHEMCONST' WRITE(logunit,*) WRITE(logunit,*) ' % indicates checking against the Handbook (1994);' WRITE(logunit,*) ' $ indicates checking against the Lewis and Wallace (1998);' WRITE(logunit,*) ' * indicates checking against the Handbook (2007);' WRITE(logunit,*) ' target values are quoted in brackets' WRITE(logunit,*) WRITE(logunit,*) WRITE(logunit,*) ' For S = 35, P = 0 and T/K = 298.15:' WRITE(logunit,*) s = 35._wp p_bar = 0._wp t_k = 298.15_wp zkc0 = AK_CARB_0_WEIS74(t_k, s) WRITE(logunit,*) WRITE(logunit,*) 'K_0 -- Weiss (1974)' WRITE(logunit,*) '===================' WRITE(logunit,*) WRITE(logunit,*) ' K_0 :', zkc0 WRITE(logunit,*) ' ln(K_0) :', LOG(zkc0) WRITE(logunit,*) ' pK_0 :', -LOG10(zkc0) WRITE(logunit,'(" * ln(K_0) :", F8.4, " (-3.5617)")') LOG(zkc0) WRITE(logunit,*) zkhso4 = AK_HSO4_DICK90(t_k, s, p_bar) WRITE(logunit,*) WRITE(logunit,*) 'K_HSO4 -- Dickson (1990) -- pH_free' WRITE(logunit,*) '===================================' WRITE(logunit,*) WRITE(logunit,*) ' K_HSO4 :', zkhso4 WRITE(logunit,*) ' ln(K_HSO4) :', LOG(zkhso4) WRITE(logunit,*) ' pK_HSO4 :', -LOG10(zkhso4) WRITE(logunit,'(" * ln(K_HSO4) :", F6.2, " (-2.30)")') LOG(zkhso4) WRITE(logunit,*) zkb = AK_BORA_DICK90(t_k, s, p_bar) WRITE(logunit,*) WRITE(logunit,*) 'K_b -- Dickson (1990) -- pH_tot' WRITE(logunit,*) '===============================' WRITE(logunit,*) WRITE(logunit,*) ' K_b :', zkb WRITE(logunit,*) ' ln(K_b) :', LOG(zkb) WRITE(logunit,*) ' pK_b :', -LOG10(zkb) WRITE(logunit,'(" * ln(K_b) :", F9.4, " (-19.7964)")') LOG(zkb) WRITE(logunit,*) zkc1 = AK_CARB_1_LUEK00(t_k, s, p_bar) WRITE(logunit,*) WRITE(logunit,*) 'K_1 -- Luecker et al (2000) -- pH_tot' WRITE(logunit,*) '=====================================' WRITE(logunit,*) WRITE(logunit,*) ' K_1 :', zkc1 WRITE(logunit,*) ' ln(K_1) :', LOG(zkc1) WRITE(logunit,*) ' pK_1 :', -LOG10(zkc1) WRITE(logunit,'(" * log10(K_1) :", F8.4, " (-5.8472)")') LOG10(zkc1) WRITE(logunit,*) zkc2 = AK_CARB_2_LUEK00(t_k, s, p_bar) WRITE(logunit,*) WRITE(logunit,*) 'K_2 -- Luecker et al (2000) -- pH_tot' WRITE(logunit,*) '=====================================' WRITE(logunit,*) WRITE(logunit,*) ' K_2 :', zkc2 WRITE(logunit,*) ' ln(K_2) :', LOG(zkc2) WRITE(logunit,*) ' pK_2 :', -LOG10(zkc2) WRITE(logunit,'(" * log10(K_2) :", F8.4, " (-8.9660)")') LOG10(zkc2) WRITE(logunit,*) zkc1 = AK_CARB_1_ROYE93(t_k, s, p_bar) WRITE(logunit,*) WRITE(logunit,*) 'K_1 -- Roy et al (1993) -- pH_tot' WRITE(logunit,*) '=================================' WRITE(logunit,*) WRITE(logunit,*) ' K_1 :', zkc1 WRITE(logunit,*) ' ln(K_1) :', LOG(zkc1) WRITE(logunit,*) ' pK_1 :', -LOG10(zkc1) WRITE(logunit,'(" % ln(K_1) :", F9.4, " (-13.4847)")') LOG(zkc1) WRITE(logunit,*) zkc2 = AK_CARB_2_ROYE93(t_k, s, p_bar) WRITE(logunit,*) WRITE(logunit,*) 'K_2 -- Roy et al (1993) -- pH_tot' WRITE(logunit,*) '=================================' WRITE(logunit,*) WRITE(logunit,*) ' K_2 :', zkc2 WRITE(logunit,*) ' ln(K_2) :', LOG(zkc2) WRITE(logunit,*) ' pK_2 :', -LOG10(zkc2) WRITE(logunit,'(" % ln(K_2) :", F9.4, " (-20.5504)")') LOG(zkc2) WRITE(logunit,*) zkhf = AK_HF_PEFR87(t_k, s, p_bar) WRITE(logunit,*) WRITE(logunit,*) 'K_HF -- Perez and Fraga (1987) -- pH_tot' WRITE(logunit,*) '========================================' WRITE(logunit,*) WRITE(logunit,*) ' K_HF :', zkhf WRITE(logunit,*) ' ln(K_HF) :', LOG(zkhf) WRITE(logunit,*) ' pK_HF :', -LOG10(zkhf) WRITE(logunit,'(" * ln(K_HF) :", F6.2, " (-6.09)")') LOG(zkhf) WRITE(logunit,*) zkp1 = AK_PHOS_1_MILL95(t_k, s, p_bar) WRITE(logunit,*) WRITE(logunit,*) 'K_P1 -- Millero (1995) -- pH_SWS' WRITE(logunit,*) '================================' WRITE(logunit,*) WRITE(logunit,*) ' K_P1 :', zkp1 WRITE(logunit,*) ' ln(K_P1) :', LOG(zkp1) WRITE(logunit,*) ' pK_1 :', -LOG10(zkp1) WRITE(logunit,'(" * ln(K_P1)-0.015 :", F6.2, " (-3.71)")') LOG(zkp1)-0.015_wp WRITE(logunit,*) zkp2 = AK_PHOS_2_MILL95(t_k, s, p_bar) WRITE(logunit,*) WRITE(logunit,*) 'K_P2 -- Millero (1995) -- pH_SWS' WRITE(logunit,*) '================================' WRITE(logunit,*) WRITE(logunit,*) ' K_2 :', zkp2 WRITE(logunit,*) ' ln(K_P2) :', LOG(zkp2) WRITE(logunit,*) ' pK_2 :', -LOG10(zkp2) WRITE(logunit,'(" * ln(K_P2)-0.015 :", F8.3, " (-13.727)")') LOG(zkp2)-0.015_wp WRITE(logunit,*) zkp3 = AK_PHOS_3_MILL95(t_k, s, p_bar) WRITE(logunit,*) WRITE(logunit,*) 'K_P3 -- Millero (1995) -- pH_SWS' WRITE(logunit,*) '================================' WRITE(logunit,*) WRITE(logunit,*) ' K_P3 :', zkp3 WRITE(logunit,*) ' ln(K_P3) :', LOG(zkp3) WRITE(logunit,*) ' pK_P3 :', -LOG10(zkp3) WRITE(logunit,'(" * ln(K_P3)-0.015 :", F7.2, " (-20.24)")') LOG(zkp3)-0.015_wp WRITE(logunit,*) zksi1 = AK_SILI_1_MILL95(t_k, s) WRITE(logunit,*) WRITE(logunit,*) 'K_Si1 -- Millero (1995) -- pH_SWS' WRITE(logunit,*) '=================================' WRITE(logunit,*) WRITE(logunit,*) ' K_Si1 :', zksi1 WRITE(logunit,*) ' ln(K_Si1) :', LOG(zksi1) WRITE(logunit,*) ' pK_Si1 :', -LOG10(zksi1) WRITE(logunit,'(" * ln(K_Si1)-0.015:", F7.2, " (-21.61)")') LOG(zksi1)-0.015_wp WRITE(logunit,*) zkw = AK_W_MILL95(t_k, s, p_bar) WRITE(logunit,*) WRITE(logunit,*) 'K_w -- Millero (1995) -- pH_SWS' WRITE(logunit,*) '===============================' WRITE(logunit,*) WRITE(logunit,*) ' K_w :', zkw WRITE(logunit,*) ' ln(K_w) :', LOG(zkw) WRITE(logunit,*) ' pK_w :', -LOG10(zkw) WRITE(logunit,'(" * ln(K_w)-0.015 :", F8.3, " (-30.434)")') LOG(zkw)-0.015_wp WRITE(logunit,*) zkh2s = AK_H2S_1_MILL95(t_k, s, p_bar) WRITE(logunit,*) WRITE(logunit,*) 'K_H2S -- Millero (1995) -- pH_SWS' WRITE(logunit,*) '=================================' WRITE(logunit,*) WRITE(logunit,*) ' K_H2S :', zkh2s WRITE(logunit,*) ' ln(K_H2S) :', LOG(zkh2s) WRITE(logunit,*) ' pK_H2S :', -LOG10(zkh2s) WRITE(logunit,'(" $ pK_H2S :", F5.2, " (6.51)")') -LOG10(zkh2s) WRITE(logunit,*) zknh4 = AK_AMMO_1_YAMI95(t_k, s, p_bar) WRITE(logunit,*) WRITE(logunit,*) 'K_NH4 -- Yao and Millero (1995) -- pH_SWS' WRITE(logunit,*) '=========================================' WRITE(logunit,*) WRITE(logunit,*) ' K_NH4 :', zknh4 WRITE(logunit,*) ' ln(K_NH4) :', LOG(zknh4) WRITE(logunit,*) ' pK_NH4 :', -LOG10(zknh4) WRITE(logunit,'(" $ pK_NH4 :", F5.2, " (9.26)")') -LOG10(zknh4) WRITE(logunit,*) CLOSE(logunit) RETURN !======================================================================= END SUBROUTINE CHECKCONSTANTS !======================================================================= END MODULE MOD_CHEMCONST
{"hexsha": "973b3d6473113a4ca8fd4a6e09c29404e54c7e9a", "size": 66828, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "docker/water/delft3d/tags/v6686/src/engines_gpl/waq/packages/waq_kernel/src/waq_process/solvesaphe/mod_chemconst.f90", "max_stars_repo_name": "liujiamingustc/phd", "max_stars_repo_head_hexsha": "4f815a738abad43531d02ac66f5bd0d9a1def52a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-01-06T03:01:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T03:02:55.000Z", "max_issues_repo_path": "docker/water/delft3d/tags/v6686/src/engines_gpl/waq/packages/waq_kernel/src/waq_process/solvesaphe/mod_chemconst.f90", "max_issues_repo_name": "liujiamingustc/phd", "max_issues_repo_head_hexsha": "4f815a738abad43531d02ac66f5bd0d9a1def52a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docker/water/delft3d/tags/v6686/src/engines_gpl/waq/packages/waq_kernel/src/waq_process/solvesaphe/mod_chemconst.f90", "max_forks_repo_name": "liujiamingustc/phd", "max_forks_repo_head_hexsha": "4f815a738abad43531d02ac66f5bd0d9a1def52a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.9576442114, "max_line_length": 100, "alphanum_fraction": 0.4872059616, "num_tokens": 20187}
import math import torch import paddle import pgl import numpy as np import paddle.fluid as F import paddle.fluid.layers as L import copy from pgl.contrib.ogb.nodeproppred.dataset_pgl import PglNodePropPredDataset from ogb.nodeproppred import Evaluator from utils import to_undirected, add_self_loop, linear_warmup_decay from model import Products_label_embedding_model from dataloader.ogb_products_dataloader import SampleDataGenerator import paddle.fluid.profiler as profiler from pgl.utils import paddle_helper import argparse from tqdm import tqdm evaluator = Evaluator(name='ogbn-products') def get_config(): parser = argparse.ArgumentParser() ## data_sampling_arg data_group= parser.add_argument_group('data_arg') data_group.add_argument('--batch_size', default=1500, type=int) data_group.add_argument('--num_workers', default=12, type=int) data_group.add_argument('--sizes', default=[10, 10, 10], type=int, nargs='+' ) data_group.add_argument('--buf_size', default=1000, type=int) ## model_arg model_group=parser.add_argument_group('model_base_arg') model_group.add_argument('--num_layers', default=3, type=int) model_group.add_argument('--hidden_size', default=128, type=int) model_group.add_argument('--num_heads', default=4, type=int) model_group.add_argument('--dropout', default=0.3, type=float) model_group.add_argument('--attn_dropout', default=0, type=float) ## label_embed_arg embed_group=parser.add_argument_group('embed_arg') embed_group.add_argument('--use_label_e', action='store_true') embed_group.add_argument('--label_rate', default=0.625, type=float) ## train_arg train_group=parser.add_argument_group('train_arg') train_group.add_argument('--runs', default=10, type=int ) train_group.add_argument('--epochs', default=100, type=int ) train_group.add_argument('--lr', default=0.001, type=float) train_group.add_argument('--place', default=-1, type=int) train_group.add_argument('--log_file', default='result_products.txt', type=str) return parser.parse_args() def optimizer_func(lr): return F.optimizer.AdamOptimizer(learning_rate=lr) def eval_test(parser, test_p_list, model, test_exe, dataset, split_idx): eval_gg=SampleDataGenerator(graph_wrappers=[model.gw_list[0]], buf_size=parser.buf_size, batch_size=parser.batch_size , num_workers=1, sizes=[-1,], shuffle=False, dataset=dataset, nodes_idx=None) out_r_temp=[] test_p, out=test_p_list[0] pbar = tqdm(total=eval_gg.num_nodes* model.num_layers) pbar.set_description('Evaluating') for feed_batch in tqdm(eval_gg.generator()): feed_batch['label_idx']=split_idx['train'] feat_batch= test_exe.run(test_p, feed=feed_batch, fetch_list=out) out_r_temp.append(feat_batch[0]) pbar.update(feed_batch['label'].shape[0]) our_r=np.concatenate(out_r_temp, axis=0) for test_p, out in test_p_list[1:]: #np.concatenate out_r_temp=[] for feed_batch in tqdm(eval_gg.generator()): feed_batch['hidden_node_feat'] = our_r[feed_batch['batch_nodes_0']] feat_batch= test_exe.run(test_p, feed=feed_batch, fetch_list=out) out_r_temp.append(feat_batch[0]) pbar.update(feed_batch['label'].shape[0]) our_r=np.concatenate(out_r_temp, axis=0) pbar.close() y_pred=our_r.argmax(axis=-1) y_pred=np.expand_dims(y_pred, 1) y_true=eval_gg.labels train_acc = evaluator.eval({ 'y_true': y_true[split_idx['train']], 'y_pred': y_pred[split_idx['train']], })['acc'] val_acc = evaluator.eval({ 'y_true': y_true[split_idx['valid']], 'y_pred': y_pred[split_idx['valid']], })['acc'] test_acc = evaluator.eval({ 'y_true': y_true[split_idx['test']], 'y_pred': y_pred[split_idx['test']], })['acc'] return train_acc, val_acc, test_acc def train_loop(parser, start_program, main_program, test_p_list, model, feat_init, place, dataset, split_idx, exe, run_id, wf=None): #build up training program exe.run(start_program) feat_init(place) max_acc=0 # best test_acc max_step=0 # step for best test_acc max_val_acc=0 # best val_acc max_cor_acc=0 # test_acc for best val_acc max_cor_step=0 # step for best val_acc #training loop for epoch_id in range(parser.epochs): #start training if parser.use_label_e: train_idx_temp=copy.deepcopy(split_idx['train']) np.random.shuffle(train_idx_temp) label_idx=train_idx_temp[ :int(parser.label_rate*len(train_idx_temp))] unlabel_idx=train_idx_temp[int(parser.label_rate*len(train_idx_temp)):] train_gg=SampleDataGenerator(graph_wrappers=model.gw_list, buf_size=parser.buf_size, batch_size=parser.batch_size , num_workers=parser.num_workers, sizes=parser.sizes, shuffle=True, dataset=dataset, nodes_idx=unlabel_idx) pbar = tqdm(total=unlabel_idx.shape[0]) pbar.set_description(f'Epoch {epoch_id:02d}') total=0.0 acc_num=0.0 for batch_feed in tqdm(train_gg.generator()): batch_feed['label_idx']=label_idx loss = exe.run(main_program, feed=batch_feed, fetch_list=[model.avg_cost, model.out_feat]) total+=loss[0][0] acc_num=(loss[1].argmax(axis=-1)==batch_feed['label'].reshape(-1)).sum()+acc_num pbar.update(batch_feed['label'].shape[0]) pbar.close() print(total/(len(train_gg)/parser.batch_size)) print('acc: ', (acc_num/unlabel_idx.shape[0])*100) #eval result if (epoch_id+1)>=50 and (epoch_id+1)%10==0: result = eval_test(parser, test_p_list, model, exe, dataset, split_idx) train_acc, valid_acc, test_acc = result max_acc = max(test_acc, max_acc) if max_acc == test_acc: max_step=epoch_id max_val_acc=max(valid_acc, max_val_acc) if max_val_acc==valid_acc: max_cor_acc=test_acc max_cor_step=epoch_id max_acc=max(result[2], max_acc) if max_acc==result[2]: max_step=epoch_id result_t=(f'Run: {run_id:02d}, ' f'Epoch: {epoch_id:02d}, ' f'Loss: {total:.4f}, ' f'Train: {100 * train_acc:.2f}%, ' f'Valid: {100 * valid_acc:.2f}%, ' f'Test: {100 * test_acc:.2f}% \n' f'max_Test: {100 * max_acc:.2f}%, ' f'max_step: {max_step}\n' f'max_val: {100 * max_val_acc:.2f}%, ' f'max_val_Test: {100 * max_cor_acc:.2f}%, ' f'max_val_step: {max_cor_step}\n' ) # if (epoch_id+1)%50==0: print(result_t) wf.write(result_t) wf.write('\n') wf.flush() return max_cor_acc if __name__ == '__main__': parser = get_config() print('===========args==============') print(parser) print('=============================') startup_prog = F.default_startup_program() train_prog = F.default_main_program() place=F.CPUPlace() if parser.place <0 else F.CUDAPlace(parser.place) dataset = PglNodePropPredDataset(name="ogbn-products") # dataset = PglNodePropPredDataset(name="ogbn-arxiv") split_idx=dataset.get_idx_split() graph, label = dataset[0] print(label.shape) with F.program_guard(train_prog, startup_prog): with F.unique_name.guard(): gw_list=[] for i in range(len(parser.sizes)): gw_list.append(pgl.graph_wrapper.GraphWrapper( name="product_"+str(i))) feature_input, feat_init=paddle_helper.constant( name='node_feat_input', dtype='float32', value=graph.node_feat['feat']) if parser.use_label_e: model=Products_label_embedding_model(feature_input, gw_list, parser.hidden_size, parser.num_heads, parser.dropout, parser.num_layers) else: model=Arxiv_baseline_model(gw, parser.hidden_size, parser.num_heads, parser.dropout, parser.num_layers) # test_prog=train_prog.clone(for_test=True) model.train_program() adam_optimizer = optimizer_func(parser.lr)#optimizer adam_optimizer.minimize(model.avg_cost) test_p_list=[] with F.unique_name.guard(): ## build up eval program test_p=F.Program() with F.program_guard(test_p, ): gw_test=pgl.graph_wrapper.GraphWrapper( name="product_"+str(0)) feature_input, feat_init__=paddle_helper.constant( name='node_feat_input', dtype='float32', value=graph.node_feat['feat']) label_feature=model.label_embed_input(model.feature_input) feature_batch=model.get_batch_feature(label_feature) # 把batch_feat打出来 feature_batch=model.get_gat_layer(0, gw_test, feature_batch, hidden_size=model.hidden_size, num_heads=model.num_heads, concat=True, layer_norm=True, relu=True) sub_node_index=F.data(name='sub_node_index_0', shape=[None], dtype="int64") feature_batch=L.gather(feature_batch, sub_node_index, overwrite=False) # test_p=test_p.clone(for_test=True) test_p_list.append((test_p, feature_batch)) for i in range(1,model.num_layers-1): test_p=F.Program() with F.program_guard(test_p, ): gw_test=pgl.graph_wrapper.GraphWrapper( name="product_"+str(0)) # feature_batch=model.get_batch_feature(label_feature, test=True) feature_batch = F.data( 'hidden_node_feat', shape=[None, model.num_heads*model.hidden_size], dtype='float32') feature_batch=model.get_gat_layer(i, gw_test, feature_batch, hidden_size=model.hidden_size, num_heads=model.num_heads, concat=True, layer_norm=True, relu=True) sub_node_index=F.data(name='sub_node_index_0', shape=[None], dtype="int64") feature_batch=L.gather(feature_batch, sub_node_index, overwrite=False) # test_p=test_p.clone(for_test=True) test_p_list.append((test_p, feature_batch)) test_p=F.Program() with F.program_guard(test_p, ): gw_test=pgl.graph_wrapper.GraphWrapper( name="product_"+str(0)) # feature_batch=model.get_batch_feature(label_feature, test=True) feature_batch = F.data( 'hidden_node_feat', shape=[None, model.num_heads*model.hidden_size ], dtype='float32') feature_batch = model.get_gat_layer(model.num_layers-1, gw_test, feature_batch, hidden_size=model.out_size, num_heads=model.num_heads, concat=False, layer_norm=False, relu=False, gate=True) sub_node_index=F.data(name='sub_node_index_0', shape=[None], dtype="int64") feature_batch=L.gather(feature_batch, sub_node_index, overwrite=False) # test_p=test_p.clone(for_test=True) test_p_list.append((test_p, feature_batch)) exe = F.Executor(place) wf = open(parser.log_file, 'w', encoding='utf-8') total_test_acc=0.0 for run_i in range(parser.runs): total_test_acc+=train_loop(parser, startup_prog, train_prog, test_p_list, model, feat_init, place, dataset, split_idx, exe, run_i, wf) wf.write(f'average: {100 * (total_test_acc/parser.runs):.2f}%') wf.close()
{"hexsha": "d9780e3d8f24f9d974b89cc575523a3ab0508530", "size": 13340, "ext": "py", "lang": "Python", "max_stars_repo_path": "ogb_examples/nodeproppred/unimp/main_product.py", "max_stars_repo_name": "zbmain/PGL", "max_stars_repo_head_hexsha": "dbded6a1543248b0a33c05eb476ddc513401a774", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1389, "max_stars_repo_stars_event_min_datetime": "2019-06-11T03:29:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T18:25:43.000Z", "max_issues_repo_path": "ogb_examples/nodeproppred/unimp/main_product.py", "max_issues_repo_name": "zbmain/PGL", "max_issues_repo_head_hexsha": "dbded6a1543248b0a33c05eb476ddc513401a774", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 232, "max_issues_repo_issues_event_min_datetime": "2019-06-21T06:52:10.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T08:20:31.000Z", "max_forks_repo_path": "ogb_examples/nodeproppred/unimp/main_product.py", "max_forks_repo_name": "zbmain/PGL", "max_forks_repo_head_hexsha": "dbded6a1543248b0a33c05eb476ddc513401a774", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 229, "max_forks_repo_forks_event_min_datetime": "2019-06-20T12:13:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T12:04:48.000Z", "avg_line_length": 41.4285714286, "max_line_length": 99, "alphanum_fraction": 0.5690404798, "include": true, "reason": "import numpy", "num_tokens": 2872}
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from caffe2.python import core from hypothesis import given import caffe2.python.hypothesis_test_util as hu import hypothesis.strategies as st import numpy as np # Reference implementation from detectron/lib/utils/boxes.py def bbox_transform(boxes, deltas, weights=(1.0, 1.0, 1.0, 1.0)): """Forward transform that maps proposal boxes to predicted ground-truth boxes using bounding-box regression deltas. See bbox_transform_inv for a description of the weights argument. """ if boxes.shape[0] == 0: return np.zeros((0, deltas.shape[1]), dtype=deltas.dtype) boxes = boxes.astype(deltas.dtype, copy=False) widths = boxes[:, 2] - boxes[:, 0] + 1.0 heights = boxes[:, 3] - boxes[:, 1] + 1.0 ctr_x = boxes[:, 0] + 0.5 * widths ctr_y = boxes[:, 1] + 0.5 * heights wx, wy, ww, wh = weights dx = deltas[:, 0::4] / wx dy = deltas[:, 1::4] / wy dw = deltas[:, 2::4] / ww dh = deltas[:, 3::4] / wh # Prevent sending too large values into np.exp() BBOX_XFORM_CLIP = np.log(1000. / 16.) dw = np.minimum(dw, BBOX_XFORM_CLIP) dh = np.minimum(dh, BBOX_XFORM_CLIP) pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis] pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis] pred_w = np.exp(dw) * widths[:, np.newaxis] pred_h = np.exp(dh) * heights[:, np.newaxis] pred_boxes = np.zeros(deltas.shape, dtype=deltas.dtype) # x1 pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w # y1 pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h # x2 (note: "- 1" is correct; don't be fooled by the asymmetry) pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w - 1 # y2 (note: "- 1" is correct; don't be fooled by the asymmetry) pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h - 1 return pred_boxes # Reference implementation from detectron/lib/utils/boxes.py def clip_tiled_boxes(boxes, im_shape): """Clip boxes to image boundaries. im_shape is [height, width] and boxes has shape (N, 4 * num_tiled_boxes).""" assert boxes.shape[1] % 4 == 0, \ 'boxes.shape[1] is {:d}, but must be divisible by 4.'.format( boxes.shape[1] ) # x1 >= 0 boxes[:, 0::4] = np.maximum(np.minimum(boxes[:, 0::4], im_shape[1] - 1), 0) # y1 >= 0 boxes[:, 1::4] = np.maximum(np.minimum(boxes[:, 1::4], im_shape[0] - 1), 0) # x2 < im_shape[1] boxes[:, 2::4] = np.maximum(np.minimum(boxes[:, 2::4], im_shape[1] - 1), 0) # y2 < im_shape[0] boxes[:, 3::4] = np.maximum(np.minimum(boxes[:, 3::4], im_shape[0] - 1), 0) return boxes def generate_rois(roi_counts, im_dims): assert len(roi_counts) == len(im_dims) all_rois = [] for i, num_rois in enumerate(roi_counts): if num_rois == 0: continue # [batch_idx, x1, y1, x2, y2] rois = np.random.uniform( 0, im_dims[i], size=(roi_counts[i], 5) ).astype(np.float32) rois[:, 0] = i # batch_idx # Swap (x1, x2) if x1 > x2 rois[:, 1], rois[:, 3] = np.minimum(rois[:, 1], rois[:, 3]), \ np.maximum(rois[:, 1], rois[:, 3]) # Swap (y1, y2) if y1 > y2 rois[:, 2], rois[:, 4] = np.minimum(rois[:, 2], rois[:, 4]), \ np.maximum(rois[:, 2], rois[:, 4]) all_rois.append(rois) if len(all_rois) > 0: return np.vstack(all_rois) return np.empty((0, 5)).astype(np.float32) class TestBBoxTransformOp(hu.HypothesisTestCase): @given( num_rois=st.integers(1, 10), num_classes=st.integers(1, 10), im_dim=st.integers(100, 600), skip_batch_id=st.booleans(), **hu.gcs_cpu_only ) def test_bbox_transform( self, num_rois, num_classes, im_dim, skip_batch_id, gc, dc ): """ Test with all rois belonging to a single image per run. """ rois = generate_rois([num_rois], [im_dim]) if skip_batch_id: rois = rois[:, 1:5] deltas = np.random.randn(num_rois, 4 * num_classes).astype(np.float32) im_info = np.array([im_dim, im_dim, 1.0]).astype(np.float32).reshape(1, 3) def bbox_transform_ref(rois, deltas, im_info): boxes = rois if rois.shape[1] == 4 else rois[:, 1:5] box_out = bbox_transform(boxes, deltas) im_shape = im_info[0, 0:2] box_out = clip_tiled_boxes(box_out, im_shape) return [box_out] op = core.CreateOperator( "BBoxTransform", ["rois", "deltas", "im_info"], ["box_out"], apply_scale=False, correct_transform_coords=True, ) self.assertReferenceChecks( device_option=gc, op=op, inputs=[rois, deltas, im_info], reference=bbox_transform_ref, ) @given( roi_counts=st.lists(st.integers(0, 5), min_size=1, max_size=10), num_classes=st.integers(1, 10), **hu.gcs_cpu_only ) def test_bbox_transform_batch(self, roi_counts, num_classes, gc, dc): """ Test with rois for multiple images in a batch """ batch_size = len(roi_counts) total_rois = sum(roi_counts) im_dims = np.random.randint(100, 600, batch_size) rois = generate_rois(roi_counts, im_dims) deltas = np.random.randn(total_rois, 4 * num_classes).astype(np.float32) im_info = np.zeros((batch_size, 3)).astype(np.float32) im_info[:, 0] = im_dims im_info[:, 1] = im_dims im_info[:, 2] = 1.0 def bbox_transform_ref(rois, deltas, im_info): box_out = [] offset = 0 for i, num_rois in enumerate(roi_counts): if num_rois == 0: continue cur_boxes = rois[offset:offset + num_rois, 1:5] cur_deltas = deltas[offset:offset + num_rois] cur_box_out = bbox_transform(cur_boxes, cur_deltas) im_shape = im_info[i, 0:2] cur_box_out = clip_tiled_boxes(cur_box_out, im_shape) box_out.append(cur_box_out) offset += num_rois if len(box_out) > 0: box_out = np.vstack(box_out) else: box_out = np.empty(deltas.shape).astype(np.float32) return [box_out, roi_counts] op = core.CreateOperator( "BBoxTransform", ["rois", "deltas", "im_info"], ["box_out", "roi_batch_splits"], apply_scale=False, correct_transform_coords=True, ) self.assertReferenceChecks( device_option=gc, op=op, inputs=[rois, deltas, im_info], reference=bbox_transform_ref, )
{"hexsha": "7fe1ae7abe46b72c354dbb82ef29a55fff380c64", "size": 6973, "ext": "py", "lang": "Python", "max_stars_repo_path": "caffe2/python/operator_test/bbox_transform_test.py", "max_stars_repo_name": "shigengtian/caffe2", "max_stars_repo_head_hexsha": "e19489d6acd17fea8ca98cd8e4b5b680e23a93c5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-03-26T13:25:03.000Z", "max_stars_repo_stars_event_max_datetime": "2018-03-26T13:25:03.000Z", "max_issues_repo_path": "caffe2/python/operator_test/bbox_transform_test.py", "max_issues_repo_name": "shigengtian/caffe2", "max_issues_repo_head_hexsha": "e19489d6acd17fea8ca98cd8e4b5b680e23a93c5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "caffe2/python/operator_test/bbox_transform_test.py", "max_forks_repo_name": "shigengtian/caffe2", "max_forks_repo_head_hexsha": "e19489d6acd17fea8ca98cd8e4b5b680e23a93c5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-12-20T09:14:48.000Z", "max_forks_repo_forks_event_max_datetime": "2018-12-20T09:14:48.000Z", "avg_line_length": 35.758974359, "max_line_length": 80, "alphanum_fraction": 0.5755055213, "include": true, "reason": "import numpy", "num_tokens": 2023}
from __future__ import print_function import torch import torch.nn as nn import pickle import data_prep as prep from torchvision import transforms, utils import torch.nn.parallel import numpy as np from torch.utils.data import DataLoader from generator import Generator from discriminator import Discriminator from torch.utils.data import DataLoader import matplotlib.pyplot as plt import cv2 __author__ = 'JosueCom' __date__ = '5/8/2020' __email__ = "josue.n.rivera@outlook.com" #image_size = 500 nc = 3 ngf = 25 batch_size = 4 image_size = 500 beta1 = 0.5 ngpu = torch.cuda.device_count() lf_to_rg_ratio = 0.5 diff_pickle = open("planet_earth_diff.pickle","rb") print("loading dataset") dataset = prep.PIFDataset( path='data_prepocessing/PlanetEarth', diff = pickle.load(diff_pickle), transform=transforms.Compose([ transforms.ToPILImage(), #transforms.Resize(image_size), transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) ])) dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True) print("Done loading dataset") device = torch.device("cuda:0" if (torch.cuda.is_available() and ngpu > 0) else "cpu") #device = torch.device("cpu") def weights_init(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: nn.init.normal_(m.weight.data, 0.0, 0.02) elif classname.find('BatchNorm') != -1: nn.init.normal_(m.weight.data, 1.0, 0.02) nn.init.constant_(m.bias.data, 0) ## generator #netG = Generator(ngpu, nc, ngf).to(device) """if (device.type == 'cuda') and (ngpu > 1): netG = nn.DataParallel(netG, list(range(ngpu)))""" #netG.apply(weights_init) netG = torch.load("nice/generator6.bin") netG.eval() print("running model") batch = next(iter(dataloader)) torch.cuda.empty_cache() out = netG(batch["prev"][0].to(device).unsqueeze(0), batch["next"][0].to(device).unsqueeze(0)) print("done running model") #plt.figure(figsize=(2,2)) #plt.axis("off") #plt.title("Previous Training Images") print(batch["prev"].size()) #plt.imshow(np.transpose(utils.make_grid(batch["prev"].to(device)[:batch_size], padding=2, normalize=True).cpu().detach(),(1,2,0))) #plt.show() #img = np.transpose(utils.make_grid(batch["prev"].to(device)[:batch_size], padding=2, normalize=True).cpu().detach(),(1,2,0)).numpy() #plt.imsave('prev.jpg', img) utils.save_image(batch["prev"][0], 'cool/img1.png', normalize=True, padding=0) #plt.axis("off") #plt.title("Next Training Images") print(batch["next"].size()) #plt.imshow(np.transpose(utils.make_grid(batch["next"].to(device)[:batch_size], padding=2, normalize=True).cpu().detach(),(1,2,0))) #plt.show() #img = np.transpose(utils.make_grid(batch["next"].to(device)[:batch_size], padding=2, normalize=True).cpu().detach(),(1,2,0)).numpy() #plt.imsave('next.jpg', img) utils.save_image(batch["next"][0], 'cool/img3.png', normalize=True, padding=0) #plt.axis("off") #plt.title("Infered Images") print(out.size()) #plt.imshow(np.transpose(utils.make_grid(out.to(device)[:batch_size], padding=2, normalize=True).cpu().detach(),(1,2,0))) #plt.show() #img = np.transpose(utils.make_grid(out.to(device)[:batch_size], padding=2, normalize=True).cpu().detach(),(1,2,0)).numpy() #plt.imsave('infered.jpg', img) utils.save_image(out, 'cool/img2.png', normalize=True, padding=0)
{"hexsha": "59e5e6442bd8d698391ccdb58cb20bdb91cedd46", "size": 3357, "ext": "py", "lang": "Python", "max_stars_repo_path": "inference/infer.py", "max_stars_repo_name": "biringaChi/PIF", "max_stars_repo_head_hexsha": "5eca2a7bab8b6acf24db24c37dfaf9f41e66d88f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-10-23T00:41:28.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-24T06:50:28.000Z", "max_issues_repo_path": "inference/infer.py", "max_issues_repo_name": "biringaChi/pif-model", "max_issues_repo_head_hexsha": "5eca2a7bab8b6acf24db24c37dfaf9f41e66d88f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "inference/infer.py", "max_forks_repo_name": "biringaChi/pif-model", "max_forks_repo_head_hexsha": "5eca2a7bab8b6acf24db24c37dfaf9f41e66d88f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-05-09T17:25:16.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-09T17:25:16.000Z", "avg_line_length": 32.2788461538, "max_line_length": 133, "alphanum_fraction": 0.7024128686, "include": true, "reason": "import numpy", "num_tokens": 929}
using Lindenmayer, Luxor, Colors, ColorSchemes crystal = LSystem(Dict( "F" => "9F[F-]+*", ), "F") plant = LSystem(Dict( "A" => "UBB8D", # initialize "X" => "*[-F*X*]+F*X"), "AX") global x = 0 function f(t::Turtle) pos = Point(t.xpos, t.ypos) if x == 0 # we'll just do this at the very start sethue("black") circle(O, 245, :clip) paint() end d = distance(pos, boxbottomcenter(BoundingBox())) setcolor([Luxor.julia_purple, Luxor.julia_red, Luxor.julia_green][rand(1:end)]) circle(pos, 10, :fill) global x += 1 end drawLSystem(plant, forward = 70, turn = 17, iterations=4, #startingx=-250, #startingy=250, startingorientation = -π/2, startingpen = (1, 1, 1), width=500, height=500, asteriskfunction = f, filename="docs/src/assets/logo.png", backgroundcolor = RGBA(1, 1, 1, 0))
{"hexsha": "42388b3046699e21eec6cd73e5a5959928e6b2fa", "size": 957, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/logo.jl", "max_stars_repo_name": "cormullion/Lindenmayer", "max_stars_repo_head_hexsha": "977f22c8386ae50a7fe4992d3b6a7656acfc3ed5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 31, "max_stars_repo_stars_event_min_datetime": "2017-01-26T09:20:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-07T04:25:14.000Z", "max_issues_repo_path": "test/logo.jl", "max_issues_repo_name": "cormullion/Lindenmayer", "max_issues_repo_head_hexsha": "977f22c8386ae50a7fe4992d3b6a7656acfc3ed5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-08-12T12:10:04.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-30T21:34:49.000Z", "max_forks_repo_path": "test/logo.jl", "max_forks_repo_name": "cormullion/Lindenmayer", "max_forks_repo_head_hexsha": "977f22c8386ae50a7fe4992d3b6a7656acfc3ed5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-10-03T21:47:37.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-12T13:04:51.000Z", "avg_line_length": 22.7857142857, "max_line_length": 86, "alphanum_fraction": 0.5444096134, "num_tokens": 308}
import numpy as np from tensorflow.contrib.graph_editor import Transformer def crop(image, bbox, x, y, length): x, y, bbox = x.astype(np.int), y.astype(np.int), bbox.astype(np.int) x_min, y_min, x_max, y_max = bbox w, h = x_max - x_min, y_max - y_min # Crop image to bbox image = image[y_min:y_min + h, x_min:x_min + w, :] # Crop joints and bbox x -= x_min y -= y_min bbox = np.array([0, 0, x_max - x_min, y_max - y_min]) # Scale to desired size side_length = max(w, h) f_xy = float(length) / float(side_length) image, bbox, x, y = Transformer.scale(image, bbox, x, y, f_xy) # Pad new_w, new_h = image.shape[1], image.shape[0] cropped = np.zeros((length, length, image.shape[2])) dx = length - new_w dy = length - new_h x_min, y_min = int(dx / 2.), int(dy / 2.) x_max, y_max = x_min + new_w, y_min + new_h cropped[y_min:y_max, x_min:x_max, :] = image x += x_min y += y_min x = np.clip(x, x_min, x_max) y = np.clip(y, y_min, y_max) bbox += np.array([x_min, y_min, x_min, y_min]) return cropped, bbox, x.astype(np.int), y.astype(np.int) def scale(image, bbox, x, y, f_xy): (h, w, _) = image.shape h, w = int(h * f_xy), int(w * f_xy) from numpy import resize image = resize(image, (h, w), preserve_range=True, anti_aliasing=True, mode='constant').astype(np.uint8) x = x * f_xy y = y * f_xy bbox = bbox * f_xy x = np.clip(x, 0, w) y = np.clip(y, 0, h) return image, bbox, x, y def flip(image, bbox, x, y): image = np.fliplr(image).copy() w = image.shape[1] x_min, y_min, x_max, y_max = bbox bbox = np.array([w - x_max, y_min, w - x_min, y_max]) x = w - x x, y = Transformer.swap_joints(x, y) return image, bbox, x, y def rotate(image, bbox, x, y, angle): # image - -(256, 256, 3) # bbox - -(4,) # x - -[126 129 124 117 107 99 128 107 108 105 137 155 122 99] # y - -[209 176 136 123 178 225 65 47 46 24 44 64 49 54] # angle - --8.165648811999333 # center of image [128,128] o_x, o_y = (np.array(image.shape[:2][::-1]) - 1) / 2. width, height = image.shape[0], image.shape[1] x1 = x y1 = height - y o_x = o_x o_y = height - o_y image = rotate(image, angle, preserve_range=True).astype(np.uint8) r_x, r_y = o_x, o_y angle_rad = (np.pi * angle) / 180.0 x = r_x + np.cos(angle_rad) * (x1 - o_x) - np.sin(angle_rad) * (y1 - o_y) y = r_y + np.sin(angle_rad) * (x1 - o_x) + np.cos(angle_rad) * (y1 - o_y) x = x y = height - y bbox[0] = r_x + np.cos(angle_rad) * (bbox[0] - o_x) + np.sin(angle_rad) * (bbox[1] - o_y) bbox[1] = r_y + -np.sin(angle_rad) * (bbox[0] - o_x) + np.cos(angle_rad) * (bbox[1] - o_y) bbox[2] = r_x + np.cos(angle_rad) * (bbox[2] - o_x) + np.sin(angle_rad) * (bbox[3] - o_y) bbox[3] = r_y + -np.sin(angle_rad) * (bbox[2] - o_x) + np.cos(angle_rad) * (bbox[3] - o_y) return image, bbox, x.astype(np.int), y.astype(np.int)
{"hexsha": "b5dfb121615cfb2b05b1f38e73962de4166a2cad", "size": 3032, "ext": "py", "lang": "Python", "max_stars_repo_path": "my_cv/utils/numpy_handle_image.py", "max_stars_repo_name": "strawsyz/straw", "max_stars_repo_head_hexsha": "db313c78c2e3c0355cd10c70ac25a15bb5632d41", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-04-06T09:09:19.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-24T03:59:55.000Z", "max_issues_repo_path": "my_cv/utils/numpy_handle_image.py", "max_issues_repo_name": "strawsyz/straw", "max_issues_repo_head_hexsha": "db313c78c2e3c0355cd10c70ac25a15bb5632d41", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "my_cv/utils/numpy_handle_image.py", "max_forks_repo_name": "strawsyz/straw", "max_forks_repo_head_hexsha": "db313c78c2e3c0355cd10c70ac25a15bb5632d41", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.2553191489, "max_line_length": 108, "alphanum_fraction": 0.5745382586, "include": true, "reason": "import numpy,from numpy", "num_tokens": 1112}
#include <cstdlib> #include <iostream> #include <fstream> #include <exception> #include <ctime> #include <boost/program_options.hpp> #include <boost/random.hpp> #include "scene.h" #include "../../src/parameters/ParamParser_getopt.hpp" #include "../../src/pointsets/Pointset.hpp" #include "../../src/io/fileIO.hpp" double random01() { static boost::mt19937 rng(time(NULL)); static boost::uniform_01<boost::mt19937&> zeroone(rng); return zeroone(); } int main(int argc, char** argv) { /* ARG PARSER *****************************************************/ bool help=false; int nReal=1; int nPts=1024; std::string fn_output; utk::ParamParser_getopt parser; parser.addShortOption('h', &help, 0, utk::assignBoolTrue, utk::displayBool, "\tDisplays this help message", ""); parser.addShortOption('n', &nPts, 1, utk::assignInt, utk::displayInt, "[int=1024] \tThe number of samples", ""); parser.addShortOption('o', &fn_output, 1, utk::assignString, utk::displayString, "[string]\tThe output file", ""); parser.addShortOption('m', &nReal, 1, utk::assignInt, utk::displayInt, "[int=1]\tThe number of realisations", "Realisations:"); parser.parse(argc, argv); if(fn_output.empty()) { ERROR("Parameter -o mandatory"); std::cout << parser.getHelp() << std::endl; return 0; } if(help) { std::cout << parser.getHelp() << std::endl; return 0; } int current_real=0; while(current_real<nReal) { /* PROG ***********************************************************/ try { Scene bnot_scene; std::vector<Point> in_points; /* GENERATE ***************************************************/ std::cout << "Intializing w " << nPts << " random points" << std::endl; for(int i=0; i<nPts; i++) in_points.push_back(Point(random01(), random01())); std::cout << "Done" << std::endl; /* OPTIMIZE ***************************************************/ std::vector<FT> noise(in_points.size(), 0.0); std::vector<FT> weights(in_points.size(), 0.0); std::cout << "Construct Tglation" << std::endl; bnot_scene.construct_triangulation(in_points, weights, noise); std::cout << "Done" << std::endl; std::cout << "Optimizing" << std::endl; //max newton iter, epsilon, maxiter, ... //high number of points if(nPts > 150) bnot_scene.optimize_all(0.0, 0.0, 500, 0.2, 500, std::cout, true); else //low number of points bnot_scene.optimize_all(0.0, 0.0, 20, 0.2, 20, std::cout, true); std::cout << "Done" << std::endl; std::vector<Point> out_points; bnot_scene.collect_visible_points(out_points); /* WRITE ******************************************************/ { utk::Pointset<2, double, utk::Point<2, double> > pts; pts.resize(out_points.size()); for(unsigned int i=0; i<out_points.size(); i++) { pts[i].pos()[0] = out_points.at(i).x(); pts[i].pos()[1] = out_points.at(i).y(); } utk::PointsetWriter<2, double, utk::Point<2, double> > writer; writer.open(fn_output); writer.writePointset(pts); writer.close(); } } catch(const std::exception& e) { std::cerr << "Error : " << e.what() << std::endl; exit(EXIT_FAILURE); } current_real++; } exit(EXIT_SUCCESS); }
{"hexsha": "d7f84fe66a1ea6ce4ca0c31e977061d45411855d", "size": 3218, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "externals/bnot/main.cpp", "max_stars_repo_name": "FrancoisGaits/utk", "max_stars_repo_head_hexsha": "8c408dd79635f98c46ed075c098f15e23972aad0", "max_stars_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_stars_count": 44.0, "max_stars_repo_stars_event_min_datetime": "2018-01-09T19:56:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-03T06:38:54.000Z", "max_issues_repo_path": "externals/bnot/main.cpp", "max_issues_repo_name": "FrancoisGaits/utk", "max_issues_repo_head_hexsha": "8c408dd79635f98c46ed075c098f15e23972aad0", "max_issues_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_issues_count": 16.0, "max_issues_repo_issues_event_min_datetime": "2018-01-29T18:01:42.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T07:01:09.000Z", "max_forks_repo_path": "externals/bnot/main.cpp", "max_forks_repo_name": "FrancoisGaits/utk", "max_forks_repo_head_hexsha": "8c408dd79635f98c46ed075c098f15e23972aad0", "max_forks_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_forks_count": 12.0, "max_forks_repo_forks_event_min_datetime": "2018-03-14T00:24:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-03T06:40:07.000Z", "avg_line_length": 26.8166666667, "max_line_length": 128, "alphanum_fraction": 0.5835922933, "num_tokens": 923}
import gym import numpy as np from abc import abstractmethod from fault_tolerant_flight_control_drl.agent import SAC from fault_tolerant_flight_control_drl.tools import AltitudeTask, AttitudeTask, BodyRateTask from fault_tolerant_flight_control_drl.tools import ReliabilityTask, DisturbanceRejectionAtt from fault_tolerant_flight_control_drl.tools import plot_response import importlib from fault_tolerant_flight_control_drl.tools.math_util import unscale_action, d2r, r2d from fault_tolerant_flight_control_drl.tools import get_ID from alive_progress import alive_bar class Citation(gym.Env): """ Citation environment that follows the gym.env interface Developed to be interfaced with a modified version of the CitAST environment, built with the DASMAT model and owned by the Delft University of Technology. Follow the 'CitAST for Python' instructions at https://github.com/kdally/fault-tolerant-flight-control-drl/blob/master/docs/CitAST_for_Python.pdf for installation. Author: Killian Dally :param evaluation: (bool) If False, the environment will be given training-specific shorter tasks. If True, the environment is given longer and unseen tasks as part of the evaluation. :param FDD: (bool) If True, the Fault Detection and Diagnosis module is added which switches from robust to adaptive control at self.FDD_switch_time. :param task: (Task) one of AltitudeTask, AttitudeTask, BodyRateTask, ReliabilityTask, DisturbanceRejection :param disturbance: (bool) If True, disturbance forces are added in the environment. Normal disturbance values from https://doi.org/10.2514/6.2018-1127. :param sensor_noise: (bool) If True, sensor noise is added to the environment observations based on the sensor noise estimates of the Cessna Citation 550 given in https://doi.org/10.2514/6.2018-1127. :param low_pass: (bool) It True, control inputs are filtered with a first-order low-pass filter. :param init_alt: (float) Initial flight altitude. One of 2000 or 5000. :param init_speed: (float) Initial speed. One of 90 or 140. """ def __init__(self, evaluation=False, FDD=False, task=AttitudeTask, disturbance=False, sensor_noise=False, low_pass=False, init_alt=2000, init_speed=90): super(Citation, self).__init__() assert bool((FDD and init_alt == 2000 and init_speed == 90) or not FDD), \ 'Failure cases only implemented for initial conditions init_alt == 2000 & init_speed == 90' self.rate_limits = self.ActionLimits(np.array([[-20, -40, -20], [20, 40, 20]])) self.deflection_limits = self.ActionLimits(np.array([[-20.05, -37.24, -21.77], [14.9, 37.24, 21.77]])) self.placeholder_cond = False self.C_MODEL, self.failure_input = self.get_plant() self.FDD_switch_time = 60 self.failure_time = 10 self.task = task() self.task_fun, self.evaluation, self.FDD = self.task.choose_task(evaluation, self.failure_input, FDD) self.has_sensor_noise = sensor_noise self.has_disturbance = disturbance self.enable_low_pass = low_pass self.time = self.task_fun()[3] self.dt = self.time[1] - self.time[0] self.ref_signal = self.task_fun(init_alt=init_alt)[0] self.track_indices = self.task_fun()[1] self.obs_indices = self.task_fun()[2] self.sideslip_factor, self.pitch_factor, self.roll_factor, self.alt_factor = self.adapt_to_failure() self.observation_space = gym.spaces.Box(-100, 100, shape=(len(self.obs_indices) + 3,), dtype=np.float64) self.action_space = gym.spaces.Box(-1., 1., shape=(3,), dtype=np.float64) self.current_deflection = np.zeros(3) self.agent_path = 'fault_tolerant_flight_control_drl/agent/trained' self.agents, self.agentID = self.load_agent(FDD) # type: SAC # self.agents, self.agentID = None, None self.state = None self.state_deg = None self.scale_s = None self.state_history = None self.action_history = None self.error = None self.step_count = None self.external_ref_signal = None def step(self, action_rates: np.ndarray): self.current_deflection = self.current_deflection + self.scale_a(action_rates) * self.dt if self.sideslip_factor[self.step_count - 1] == 0.0: self.current_deflection[2] = 0.0 filtered_deflection = self.filter_control_input(self.current_deflection) if self.time[self.step_count] < self.failure_time and self.evaluation: self.state = self.C_MODEL.step( np.hstack([d2r(filtered_deflection + self.add_disturbance()[:, self.step_count]), 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, self.failure_input[1]])) else: self.state = self.C_MODEL.step( np.hstack([d2r(filtered_deflection + self.add_disturbance()[:, self.step_count]), 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, self.failure_input[2]])) self.state_deg = self.state * self.scale_s self.error = d2r(self.ref_signal[:, self.step_count] - self.state_deg[self.track_indices] + self.get_sensor_noise()[self.track_indices]) \ * self.scale_error(self.step_count) self.state_history[:, self.step_count] = self.state_deg self.action_history[:, self.step_count] = filtered_deflection self.step_count += 1 done = bool(self.step_count >= self.time.shape[0]) if np.isnan(self.state).sum() > 0: self.stop_NaNs() return self.get_obs(), self.get_reward(), done, {'is_success': True} def reset(self): self.reset_soft() self.ref_signal = self.task_fun()[0] return np.zeros(self.observation_space.shape) def reset_soft(self): self.C_MODEL.initialize() action_trim = np.array( [0, 0, 0, 0., 0., 0., 0., 0., 0, 0, self.failure_input[1]]) self.state = self.C_MODEL.step(action_trim) self.scale_s = np.ones(self.state.shape) self.scale_s[[0, 1, 2, 4, 5, 6, 7, 8]] = 180 / np.pi self.state_deg = self.state * self.scale_s self.state_history = np.zeros((self.state.shape[0], self.time.shape[0])) self.action_history = np.zeros((self.action_space.shape[0], self.time.shape[0])) self.error = np.zeros(len(self.track_indices)) self.step_count = 0 self.current_deflection = np.zeros(3) return np.zeros(self.observation_space.shape) def get_reward(self): max_bound = np.ones(self.error.shape) # reward_vec = np.abs(np.maximum(np.minimum(r2d(self.error / 30)**2, max_bound), -max_bound)) # square function reward_vec = np.abs(np.maximum(np.minimum(r2d(self.error / 30), max_bound), -max_bound)) # rational function # reward_vec = - np.maximum(np.minimum(1 / (np.abs(self.error) * 10 + 1), max_bound), # - max_bound) # abs. linear function reward = -reward_vec.sum() / self.error.shape[0] return reward def get_obs(self): untracked_obs_index = np.setdiff1d(self.obs_indices, self.track_indices) return np.hstack([self.error, self.state[untracked_obs_index], self.current_deflection]) def get_RMSE(self): assert bool(self.step_count >= self.time.shape[0]), \ f'Error: cannot obtain RMSE before episode is completed. Current time is {self.time[self.step_count]}s.' y_ref = self.ref_signal.copy() y_ref2 = self.ref_signal.copy() y_meas = self.state_history[self.track_indices, :].copy() y_ref2[-1, 0] = 5 y_ref2[-1, 1] = -5 RMSE = np.sqrt(np.mean(np.square((y_ref - y_meas)), axis=1)) / (y_ref2.max(axis=1) - y_ref2.min(axis=1)) return RMSE def get_MAE(self): assert bool(self.step_count >= self.time.shape[0]), \ f'Error: cannot obtain MAE before episode is completed. Current time is {self.time[self.step_count]}s.' y_ref = self.ref_signal.copy() y_ref2 = self.ref_signal.copy() y_meas = self.state_history[self.track_indices, :].copy() y_ref2[-1, 0] = 5 y_ref2[-1, 1] = -5 MAE = np.mean(np.absolute(y_ref - y_meas), axis=1) / (y_ref2.max(axis=1) - y_ref2.min(axis=1)) return MAE def stop_NaNs(self): print('Encountered crash. Episode terminated early.') if not self.evaluation: ID = get_ID(6) agent = SAC.load("fault_tolerant_flight_control_drl/agent/trained/tmp/best_model.zip", env=self) agent.ID = ID agent.save(f'{self.agent_path}/{self.task_fun()[4]}_{agent.ID}.zip') print('Training is corrupt because of NaN values, terminated early. ' 'So-far best trained agent may show good performance.') plot_response('before_crash', self, self.task_fun(), 100, during_training=False, failure=self.failure_input[0], FDD=self.FDD, broken=True) exit() def filter_control_input(self, deflection): w_0 = 2 * 2 * np.pi # rad/s filtered_deflection = deflection.copy() if self.step_count > 1 and self.enable_low_pass: filtered_deflection = self.action_history[:, self.step_count - 1] / (1 + w_0 * self.dt) + \ deflection * (w_0 * self.dt) / (1 + w_0 * self.dt) return filtered_deflection def get_sensor_noise(self): # values in degrees, SSD sensor_noise = np.zeros(self.state.shape) if self.has_sensor_noise: # p, q, r measurement from https://doi.org/10.2514/6.2018-0385 sensor_noise[0:3] += r2d(np.random.normal(scale=np.sqrt(4.0e-7), size=3)+3.0e-5) # sideslip, estimate from https://doi.org/10.2514/6.2018-0385 sensor_noise[5] += r2d(np.random.normal(scale=np.sqrt(7.5e-8))+1.8e-3) # phi, theta measurement from https://doi.org/10.2514/6.2018-0385 sensor_noise[6:8] += r2d(np.random.normal(scale=np.sqrt(1e-9), size=2)+4.0e-3) # h estimate from https://doi.org/10.2514/6.2018-0385 sensor_noise[9] += np.random.normal(scale=np.sqrt(4.5e-3))+8.0e-3 return sensor_noise def add_disturbance(self): disturbance = np.zeros((self.action_space.shape[0], self.time.shape[0])) if self.has_disturbance: # 3211 input in deg disturbance[0, np.argwhere(self.time == 1)[0, 0]:np.argwhere(self.time == 4)[0, 0]] = 0.5 disturbance[0, np.argwhere(self.time == 4)[0, 0]:np.argwhere(self.time == 6)[0, 0]] = -0.9 disturbance[0, np.argwhere(self.time == 6)[0, 0]:np.argwhere(self.time == 7)[0, 0]] = 1.2 disturbance[0, np.argwhere(self.time == 7)[0, 0]:np.argwhere(self.time == 8)[0, 0]] = -1.2 disturbance[1, np.argwhere(self.time == 10)[0, 0]:np.argwhere(self.time == 13)[0, 0]] = -0.5 disturbance[1, np.argwhere(self.time == 13)[0, 0]:np.argwhere(self.time == 15)[0, 0]] = 0.9 disturbance[1, np.argwhere(self.time == 15)[0, 0]:np.argwhere(self.time == 16)[0, 0]] = -1.2 disturbance[1, np.argwhere(self.time == 16)[0, 0]:np.argwhere(self.time == 17)[0, 0]] = 1.2 return disturbance def scale_error(self, step_count): if 7 in self.track_indices: return np.array([self.pitch_factor[step_count], self.roll_factor[step_count], self.sideslip_factor[step_count]]) else: return np.array([self.alt_factor[step_count], self.roll_factor[step_count], self.sideslip_factor[step_count]]) def scale_a(self, action_unscaled: np.ndarray) -> np.ndarray: """Min-max un-normalization from [-1, 1] action space to actuator limits""" return unscale_action(self.rate_limits, action_unscaled) def bound_a(self, action): return np.minimum(np.maximum(action, self.deflection_limits.low), self.deflection_limits.high) @abstractmethod def get_plant(self): pass @abstractmethod def load_agent(self, FDD): pass def adapt_to_failure(self): pitch_factor = np.ones(self.time.shape[0]) roll_factor = np.ones(self.time.shape[0]) alt_factor = np.ones(self.time.shape[0]) if self.evaluation: sideslip_factor = 4.0 * np.ones(self.time.shape[0]) if self.task_fun()[4] == 'altitude_2attitude': roll_factor *= 2 else: sideslip_factor = 10.0 * np.ones(self.time.shape[0]) return sideslip_factor, pitch_factor, roll_factor, alt_factor def FFD_change(self): pass def render(self, ext_agent=None, verbose=1): during_training = False if ext_agent is not None: self.agents = [ext_agent] # self.agents.save(f'agent/trained/{self.task_fun()[4]}_last.zip') self.agentID = 'last' verbose = 0 during_training = True if self.FDD: self.reset() agent_robust = self.agents[0] agent_adaptive = self.agents[1] else: agent_robust = self.agents[0] agent_adaptive = None obs = self.reset_soft() return_a = 0 done = False items = range(self.time.shape[0]) with alive_bar(len(items)) as bar: while not done: if self.time[self.step_count] < self.FDD_switch_time or not self.FDD: action, _ = agent_robust.predict(obs, deterministic=True) else: self.FFD_change() action, _ = agent_adaptive.predict(obs, deterministic=True) obs, reward, done, info = self.step(action) return_a += reward bar() plot_response(self.agentID, self, self.task_fun(), return_a, during_training, self.failure_input[0], FDD=self.FDD) if verbose > 0: # print(f'Goal reached! Return = {return_a:.2f}') np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)}) print(f'nRMSE% avg: {(self.get_RMSE().sum()) / 3 * 100:.2f}%') print(f'nMAE% avg: {(self.get_MAE().sum()) / 3 * 100:.2f}%') print('') def close(self): self.C_MODEL.terminate() return class ActionLimits: def __init__(self, limits): self.low, self.high = limits[0, :], limits[1, :] class CitationNormal(Citation): """ Normal Citation Dynamics class, a sub-class of the Citation class. Author: Killian Dally :param evaluation: (bool) If False, the environment will be given training-specific shorter tasks. If True, the environment is given longer and unseen tasks as part of the evaluation. :param FDD: (bool) If True, the Fault Detection and Diagnosis module is added which switches from robust to adaptive control at self.FDD_switch_time. :param task: (Task) one of AltitudeTask, AttitudeTask, BodyRateTask, ReliabilityTask :param disturbance: (bool) If True, disturbance forces are added in the environment. Normal disturbance values from https://doi.org/10.2514/6.2018-1127. :param sensor_noise: (bool) If True, sensor noise is added to the environment observations based on the sensor noise estimates of the Cessna Citation 550 given in https://doi.org/10.2514/6.2018-1127. :param low_pass: (bool) It True, control inputs are filtered with a first-order low-pass filter. :param init_alt: (float) Initial flight altitude. One of 2000 or 5000. :param init_speed: (float) Initial speed. One of 90 or 140. """ def __init__(self, init_alt=2000, init_speed=90, evaluation=False, FDD=False, task=AttitudeTask, disturbance=False, sensor_noise=False, low_pass=False): self.init_alt = init_alt self.init_speed = init_speed super(CitationNormal, self).__init__(evaluation=evaluation, FDD=FDD, task=task, disturbance=disturbance, sensor_noise=sensor_noise, low_pass=low_pass) self.ref_signal = self.task_fun(init_alt=init_alt)[0] def get_plant(self): path = 'fault_tolerant_flight_control_drl.envs.citation' if self.init_alt == 2000 and self.init_speed == 90: plant = importlib.import_module(f'{path}.normal_2000_90._citation', package=None) elif self.init_alt == 2000 and self.init_speed == 140: plant = importlib.import_module(f'{path}.normal_2000_140._citation', package=None) self.placeholder_cond = True elif self.init_alt == 5000 and self.init_speed == 90: plant = importlib.import_module(f'{path}.normal_5000_90._citation', package=None) elif self.init_alt == 5000 and self.init_speed == 140: plant = importlib.import_module(f'{path}.normal_5000_140._citation', package=None) else: raise NotImplementedError('No model with the specified initial conditions is present. ' \ 'Choose within init_alt={2000, 5000} and init_speed={90, 120}.') return plant, ['normal', 1.0, 1.0] def load_agent(self, FDD=False): if FDD: raise NotImplementedError('No fault detection and diagnosis on the non-failed system.') return [SAC.load(f"{self.agent_path}/{self.task.agent_catalog['normal']}.zip", env=self)], self.task.agent_catalog['normal'] def reset(self): super(CitationNormal, self).reset() self.ref_signal = self.task_fun(init_alt=self.init_alt)[0] return np.zeros(self.observation_space.shape) def reset_soft(self): super(CitationNormal, self).reset_soft() self.ref_signal = self.task_fun(init_alt=self.init_alt)[0] return np.zeros(self.observation_space.shape) class CitationRudderStuck(Citation): """ Citation Dynamics class with rudder failure, a sub-class of the Citation class. The rudder is stuck at -15deg starting from self.failure_time. Author: Killian Dally """ def get_plant(self): plant = importlib.import_module(f'fault_tolerant_flight_control_drl.envs.citation.dr._citation', package=None) return plant, ['dr', 0.0, -15.0] def load_agent(self, FDD): if FDD: return [SAC.load(f"{self.agent_path}/{self.task.agent_catalog['normal']}.zip", env=self), SAC.load(f"{self.agent_path}/{self.task.agent_catalog['rudder_stuck']}.zip", env=self)], \ self.task.agent_catalog['rudder_stuck'] return CitationNormal().load_agent() def adapt_to_failure(self): sideslip_factor, pitch_factor, roll_factor, alt_factor = super(CitationRudderStuck, self).adapt_to_failure() if self.FDD: sideslip_factor[np.argwhere(self.time == self.FDD_switch_time)[0, 0]:] *= 0.0 roll_factor[np.argwhere(self.time == self.FDD_switch_time)[0, 0]:] *= 0.5 return sideslip_factor, pitch_factor, roll_factor, alt_factor class CitationAileronEff(Citation): """ Citation Dynamics class with aileron failure, a sub-class of the Citation class. The aileron effectiveness is reduced by 70% from self.failure_time. Author: Killian Dally """ def get_plant(self): plant = importlib.import_module(f'fault_tolerant_flight_control_drl.envs.citation.da._citation', package=None) return plant, ['da', 1.0, 0.3] def load_agent(self, FDD): if FDD: return [SAC.load(f"{self.agent_path}/{self.task.agent_catalog['normal']}.zip", env=self), SAC.load(f"{self.agent_path}/{self.task.agent_catalog['aileron_eff']}.zip", env=self)], \ self.task.agent_catalog['aileron_eff'] return CitationNormal().load_agent() def adapt_to_failure(self): sideslip_factor, pitch_factor, roll_factor, alt_factor = super(CitationAileronEff, self).adapt_to_failure() if self.FDD: pitch_factor[np.argwhere(self.time == self.FDD_switch_time)[0, 0]:] *= 1.5 return sideslip_factor, pitch_factor, roll_factor, alt_factor class CitationElevRange(Citation): """ Citation Dynamics class with elevator failure, a sub-class of the Citation class. The elevator operating range is reduced to [-3 deg, 3 deg] from self.failure_time. Author: Killian Dally """ def get_plant(self): plant = importlib.import_module(f'fault_tolerant_flight_control_drl.envs.citation.de._citation', package=None) return plant, ['de', 20.05, 2.5] def load_agent(self, FDD): if FDD: return [SAC.load(f"{self.agent_path}/{self.task.agent_catalog['normal']}.zip", env=self), SAC.load(f"{self.agent_path}/{self.task.agent_catalog['elev_range']}.zip", env=self)], \ self.task.agent_catalog['elev_range'] return CitationNormal().load_agent() def FFD_change(self): self.deflection_limits = self.ActionLimits(np.array([[-3.0, -37.24, -21.77], [3.0, 37.24, 21.77]])) self.rate_limits = self.ActionLimits(np.array([[-7, -40, -20], [7, 40, 20]])) class CitationCgShift(Citation): """ Citation Dynamics class with backwards c.g. shift, a sub-class of the Citation class. A 300kg payload moving from the from the front to the back of the passenger cabin is simulated, which translates to a backwards c.g. shift of 0.25m from self.failure_time. Author: Killian Dally """ def get_plant(self): plant = importlib.import_module(f'fault_tolerant_flight_control_drl.envs.citation.cg._citation', package=None) return plant, ['cg', 1.0, 1.04] def load_agent(self, FDD): if FDD: return [SAC.load(f"{self.agent_path}/{self.task.agent_catalog['normal']}.zip", env=self), SAC.load(f"{self.agent_path}/{self.task.agent_catalog['cg_shift']}.zip", env=self)], \ self.task.agent_catalog['cg_shift'] return CitationNormal().load_agent() def adapt_to_failure(self): sideslip_factor, pitch_factor, roll_factor, alt_factor = super(CitationCgShift, self).adapt_to_failure() if self.FDD: alt_factor[np.argwhere(self.time == self.FDD_switch_time)[0, 0]:] *= 0.5 return sideslip_factor, pitch_factor, roll_factor, alt_factor class CitationIcing(Citation): """ Citation Dynamics class with icing, a sub-class of the Citation class. A large accumulation of ice on the wing is simulated according to the measurements made in https://doi.org/10.1016/S0376-0421(01)00018-5 from self.failure_time. In practice, C_L_max and alpha_stall are reduced by 30% and C_D increased by 0.06. Author: Killian Dally """ def get_plant(self): plant = importlib.import_module(f'fault_tolerant_flight_control_drl.envs.citation.ice._citation', package=None) return plant, ['ice', 1.0, 0.7] # https://doi.org/10.1016/S0376-0421(01)00018-5 def load_agent(self, FDD): if FDD: return [SAC.load(f"{self.agent_path}/{self.task.agent_catalog['normal']}.zip", env=self), SAC.load(f"{self.agent_path}/{self.task.agent_catalog['icing']}.zip", env=self, policy_kwargs=dict(layers=[32, 32]))], \ self.task.agent_catalog['icing'] return CitationNormal().load_agent() def reset(self): super(CitationIcing, self).reset() self.ref_signal = self.task_fun()[0] return np.zeros(self.observation_space.shape) def adapt_to_failure(self): sideslip_factor, pitch_factor, roll_factor, alt_factor = super(CitationIcing, self).adapt_to_failure() if self.FDD: alt_factor[np.argwhere(self.time == self.FDD_switch_time)[0, 0]:] *= 0.25 return sideslip_factor, pitch_factor, roll_factor, alt_factor class CitationHorzTail(Citation): """ Citation Dynamics class with partial horizontal tail loss, a sub-class of the Citation class. Author: Killian Dally """ def get_plant(self): plant = importlib.import_module(f'fault_tolerant_flight_control_drl.envs.citation.ht._citation', package=None) return plant, ['ht', 1.0, 0.3] def load_agent(self, FDD): if FDD: return [SAC.load(f"{self.agent_path}/{self.task.agent_catalog['normal']}.zip", env=self), SAC.load(f"{self.agent_path}/{self.task.agent_catalog['horz_tail']}.zip", env=self, policy_kwargs=dict(layers=[32, 32]))], \ self.task.agent_catalog['horz_tail'] return CitationNormal().load_agent() def adapt_to_failure(self): sideslip_factor, pitch_factor, roll_factor, alt_factor = super(CitationHorzTail, self).adapt_to_failure() if self.FDD: alt_factor[np.argwhere(self.time == self.FDD_switch_time)[0, 0]:] *= 0.01 return sideslip_factor, pitch_factor, roll_factor, alt_factor class CitationVertTail(Citation): """ Citation Dynamics class with partial vertical tail loss, a sub-class of the Citation class. Author: Killian Dally """ def get_plant(self): plant = importlib.import_module(f'fault_tolerant_flight_control_drl.envs.citation.vt._citation', package=None) return plant, ['vt', 1.0, 0.3] def load_agent(self, FDD): if FDD: return [SAC.load(f"{self.agent_path}/{self.task.agent_catalog['normal']}.zip", env=self), SAC.load(f"{self.agent_path}/{self.task.agent_catalog['normal']}.zip", env=self)], \ self.task.agent_catalog['vert_tail'] return CitationNormal().load_agent() def adapt_to_failure(self): sideslip_factor, pitch_factor, roll_factor, alt_factor = super(CitationVertTail, self).adapt_to_failure() if self.FDD: sideslip_factor[np.argwhere(self.time == self.FDD_switch_time)[0, 0]:] *= 0.25 return sideslip_factor, pitch_factor, roll_factor, alt_factor class CitationDistAlpha(CitationNormal): """ CitationNormal Dynamics class with atmospheric disturbances as verital . The rudder is stuck at -15deg starting from self.failure_time. Author: Killian Dally """ def get_plant(self): path = 'fault_tolerant_flight_control_drl.envs.citation' if self.init_alt == 2000 and self.init_speed == 90: plant = importlib.import_module(f'{path}.normal_2000_90_dist._citation', package=None) else: raise NotImplementedError('No model with the specified initial conditions is present.') return plant, ['normal', 1.0, 1.0] class CitationVerif(CitationNormal): """ Normal Citation Dynamics class for verification, a sub-class of the Citation class. It emulates MATLAB from Python to compare the response of the compiled model and that of the Simulink model. Author: Killian Dally """ def step(self, actions: np.ndarray): self.current_deflection = actions if self.sideslip_factor[self.step_count - 1] == 0.0: self.current_deflection[2] = 0.0 if self.time[self.step_count] < 5.0 and self.evaluation: self.state = self.C_MODEL.step( np.hstack([d2r(self.current_deflection), 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, self.failure_input[1]])) else: self.state = self.C_MODEL.step( np.hstack([d2r(self.current_deflection), 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, self.failure_input[2]])) self.state_deg = self.state * self.scale_s self.error = d2r(self.ref_signal[:, self.step_count] - self.state_deg[self.track_indices]) self.error[self.track_indices.index(5)] *= self.sideslip_factor[self.step_count] self.error[self.track_indices.index(6)] *= self.roll_factor[self.step_count] if 7 in self.track_indices: self.error[self.track_indices.index(7)] *= self.pitch_factor[self.step_count] if 9 in self.track_indices: self.error[self.track_indices.index(9)] *= 1.0 self.state_history[:, self.step_count] = self.state_deg self.action_history[:, self.step_count] = self.current_deflection self.step_count += 1 done = bool(self.step_count >= self.time.shape[0]) if np.isnan(self.state).sum() > 0: print(self.state_history[:, self.step_count - 2], self.time[self.step_count - 1]) plot_response('before_crash', self, self.task_fun(), 100, during_training=False, failure=self.failure_input[0], FDD=self.FDD, broken=True) exit() return self.get_obs(), self.get_reward(), done, {'is_success': True} # # import os # print(os.getcwd()) # from stable_baselines.common.env_checker import check_env # envs = CitationNormal() # print("Observation space:", envs.observation_space.shape) # print("Action space:", envs.action_space.shape) # check_env(envs, warn=True) #
{"hexsha": "b5023c71e12ab2bbb72a047594c6d61484d2235a", "size": 29305, "ext": "py", "lang": "Python", "max_stars_repo_path": "fault_tolerant_flight_control_drl/envs/citation/citation.py", "max_stars_repo_name": "kdally/fault-tolerant-flight-control-drl", "max_stars_repo_head_hexsha": "800a1c9319b44ab2b1d17f6e19266c2392d6e57b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2021-02-27T09:49:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T16:28:08.000Z", "max_issues_repo_path": "fault_tolerant_flight_control_drl/envs/citation/citation.py", "max_issues_repo_name": "kdally/fault-tolerant-flight-control-drl", "max_issues_repo_head_hexsha": "800a1c9319b44ab2b1d17f6e19266c2392d6e57b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "fault_tolerant_flight_control_drl/envs/citation/citation.py", "max_forks_repo_name": "kdally/fault-tolerant-flight-control-drl", "max_forks_repo_head_hexsha": "800a1c9319b44ab2b1d17f6e19266c2392d6e57b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-03-04T07:24:35.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-17T04:21:08.000Z", "avg_line_length": 44.5364741641, "max_line_length": 132, "alphanum_fraction": 0.6460331002, "include": true, "reason": "import numpy", "num_tokens": 7445}
SUBROUTINE MA_CGRM (fldnin, prsdon, rmkdon, fldnou, ier) C************************************************************************ C* MA_CGRM * C* * C* This subroutine decodes the character remarks fields in a single * C* Coast Guard report. Parameters used which are not in the calling * C* sequence are found in macmn.cmn. * C* * C* MA_CGRM ( FLDNIN, PRSDON, RMKDON, FLDNOU, IER ) * C* * C* Input parameters: * C* FIELDS CHAR*(*) Array of fields found in input * C* string * C* LENSF INTEGER Array of lengths of fields * C* FLDNIN INTEGER Number of field to work on. * C* * C* Input and Output parameters: * C* PRSDON LOGICAL if true, pressure field has been* C* decoded. * C* RMKDON LOGICAL if true, remarks fields have * C* * C* Output parameters: * C* RIVALS(IRTERC) REAL tidal elev. relative to local * C* chart (inches) * C* RIVALS(IRHOCB) REAL height of cloud base (meters) * C* RIVALS(IRGUST) REAL max. wind speed (gust) (kts) * C* RIVALS(IRMXWH) REAL maximum wave height (ft) * C* RIVALS(IRCORN) REAL correction indicator * C* FLDNOU INTEGER Number of next field to work on * C* IER INTEGER Return code * C* 0 = Normal return * C* non-zero = Problem * C** * C* Log: * C* C. Caruso Magee/NCEP 4/01 Original Author * C* F. J. Yen/NCEP 4/01 Cleaned up, reformatted and renamed from* C* CG_RMKA. Added additional check for * C* another numeric field. Added RAIN, HAZE * C* and FOG. Kept value of fldnin unchanged.* C* Changed prologue and sequence order of * C* parameters. Added more comments. * C************************************************************************ INCLUDE 'macmn.cmn' C* INTEGER fldnin, fldnou LOGICAL prsdon, rmkdon, MA_FIND C* CHARACTER*5 swls (3) CHARACTER*3 dirtn (16) C* DATA swls / 'SWELL', 'SWL', 'SWEL' / DATA dirtn / 'SSW', 'WSW', 'WNW', 'NNW', + 'NNE', 'ENE', 'ESE', 'SSE', + 'SE', 'SW', 'NW', 'NE', + 'S', 'W', 'N', 'E' / C------------------------------------------------------------------------ ier = 0 i = fldnin fldnou = fldnin IF ( lensf(i) .eq. 5 ) THEN IF ( fields(i) .eq. 'GUSTY' ) THEN fldnou = fldnin + 1 rmkdon = .true. RETURN ELSE IF ( fields(i) .eq. 'MINUS' .and. + i + 1 .le. nflds) THEN C C* Decode tidal elevation C CALL ST_INTG ( fields(i+1)(1:lensf(i+1)), ist1, ier ) IF ( ier .eq. 0 .and. ist1 .lt. 1000 ) THEN rivals ( irterc ) = FLOAT( -ist1 ) ELSE WRITE ( UNIT = logmsg, FMT = '( A )' ) + ' Invalid group/format error in char remarks' CALL DC_WLOG ( 2, 'MA', 1, logmsg, ierwlg ) END IF fldnou = fldnin + 2 END IF ELSE IF ( lensf(i) .eq. 4 ) THEN IF ( fields(i) .eq. 'CEIL' .and. + i + 1 .le. nflds ) THEN C C* Decode ceiling (height of cloud base) C IF ( itypsf(i+1) .eq. ALPHA ) THEN IF ( fields(i+1) .eq. 'UNL' ) THEN fldnou = fldnin + 2 ELSE fldnou = fldnin + 1 END IF ELSE IF ( itypsf(i+1) .eq. NMR ) THEN IF ( lensf(i+1) .eq. 3 ) THEN C C* Ceiling is in hundreds of feet, so convert in C* feet, then to meters before saving into irhocb. C CALL ST_INTG ( fields(i+1)(1:lensf(i+1)), + ist1, ier ) IF ( ier .eq. 0 ) THEN rivals ( irhocb(1) ) = + FLOAT( ist1 ) * 100./3.28 fldnou = fldnin + 2 END IF ELSE fldnou = fldnin + 2 END IF ELSE fldnou = fldnin + 2 END IF ELSE IF ( fields(i) .eq. 'GUST' .and. + i + 1 .le. nflds ) THEN IF ( itypsf(i+1) .eq. NMR ) THEN C C* Decode maximum wind speed C CALL ST_INTG ( fields(i+1)(1:lensf(i+1)), + ist1, ier ) IF ( ier .eq. 0 .and. ist1 .lt. 300 ) THEN rivals ( irgust ) = FLOAT ( ist1 ) END IF fldnou = fldnin + 2 ELSE WRITE ( UNIT = logmsg, FMT = '( A )' ) + ' Invalid group/format error in char remarks' CALL DC_WLOG ( 2, 'MA', 1, logmsg, ierwlg ) fldnou = fldnin + 1 END IF ELSE IF ( fields(i) .eq. 'PLUS' .and. + i + 1 .le. nflds ) THEN C C* Decode tidal elevation C CALL ST_INTG ( fields(i+1)(1:lensf(i+1)), ist1, ier ) IF ( ier .eq. 0 .and. ist1 .lt. 1000 ) THEN rivals ( irterc ) = FLOAT ( ist1 ) ELSE WRITE ( UNIT = logmsg, FMT = '( A )' ) + ' Invalid group/format error in char remarks' CALL DC_WLOG ( 2, 'MA', 1, logmsg, ierwlg ) END IF fldnou = fldnin + 2 ELSE IF ( fields(i) .eq. 'HAZE' .or. + fields(i) .eq. 'RAIN' ) THEN IF ( iwxvln .lt. 20 ) THEN C C* Append haze or rain to the weather visibility C* text string and redecode the weather phenomenon C wxvsav = wxvsav(1:iwxvln) // fields (i) (1:1) iwxvln = iwxvln + 1 CALL MA_CGWX ( wxvsav(1:iwxvln), ier ) END IF fldnou = fldnin + 1 END IF ELSE IF ( lensf(i) .eq. 3 ) THEN IF ( fields(i) .eq. 'SCA' .or. + fields(i) .eq. 'UNL') THEN fldnou = fldnin + 1 rmkdon = .true. RETURN ELSE IF ( fields(i) .eq. 'COR' ) THEN rivals ( ircorn ) = 1. fldnou = fldnin + 1 ELSE IF ( fields(i) .eq. 'MAX' .and. + i + 1 .le. nflds ) THEN IF ( itypsf(i+1) .eq. NMR ) THEN C C* Decode maximum wave height C CALL ST_INTG ( fields(i+1)(1:lensf(i+1)), + ist1, ier ) IF ( ier .eq. 0 .and. ist1 .lt. 200 ) THEN rivals ( irmxwh ) = FLOAT ( ist1 ) END IF fldnou = fldnin + 3 ELSE fldnou = fldnin + 1 END IF C C* Test for presence of direction text strings and for C* possible swell text strings C ELSE IF ( MA_FIND ( fields(i), dirtn, 8 ) .and. + i + 1 .le. nflds ) THEN IF ( MA_FIND ( fields(i+1), swls, 3 ) ) THEN C C* Decode the swell direction C CALL MA_CGWD ( fields(i), swls(1), iret ) fldnou = fldnin + 2 ELSE fldnou = fldnin + 1 END IF ELSE IF ( fields(i) .eq. 'FOG' ) THEN IF ( iwxvln .lt. 20 ) THEN C C* Append fog to the weather visibility C* text string and redecode the weather phenomenon C wxvsav = wxvsav(1:iwxvln) // fields (i) (1:1) iwxvln = iwxvln + 1 CALL MA_CGWX ( wxvsav(1:iwxvln), ier ) END IF fldnou = fldnin + 1 END IF ELSE IF ( lensf(i) .eq. 2 ) THEN C C* Test for presence of direction text strings and for C* possible swell text strings C IF ( MA_FIND ( fields(i), dirtn(9), 4 ) .and. + i + 1 .le. nflds ) THEN IF ( MA_FIND ( fields(i+1), swls, 3 ) ) THEN C C* Decode the swell direction C CALL MA_CGWD ( fields(i), swls(1), iret ) fldnou = fldnin + 2 ELSE fldnou = fldnin + 1 END IF ELSE IF ( fields(i) .eq. 'MX' .and. + i + 1 .le. nflds ) THEN C C* Decode maximum wave height C IF ( itypsf(i+1) .eq. NMR ) THEN CALL ST_INTG ( fields(i+1)(1:lensf(i+1)), + ist1, ier ) IF ( ier .eq. 0 .and. ist1 .lt. 200 ) THEN rivals ( irmxwh ) = FLOAT ( ist1 ) fldnou = fldnin + 3 ELSE fldnou = fldnin + 3 END IF ELSE fldnou = fldnin + 1 END IF END IF C C* Test for presence of direction text strings and for C* possible swell text strings C ELSE IF ( lensf(i) .eq. 1 .and. + MA_FIND ( fields(i), dirtn(13), 4 ) .and. + i + 1 .le. nflds ) THEN IF ( MA_FIND ( fields(i+1), swls, 3) ) THEN C C* Decode the swell direction C CALL MA_CGWD ( fields(i), swls(1), iret ) fldnou = fldnin + 2 ELSE fldnou = fldnin + 1 END IF ELSE IF ( lensf(i) .eq. 1 .and. + ( fields(i) .eq. 'G' ) .and. + i + 1 .le. nflds ) THEN IF ( itypsf(i+1) .eq. NMR ) THEN C C* Decode maximum wind speed C CALL ST_INTG ( fields(i+1)(1:lensf(i+1)), ist1, ier ) IF ( ier .eq. 0 .and. ist1 .lt. 300 ) THEN rivals ( irgust ) = FLOAT ( ist1 ) END IF fldnou = fldnin + 2 ELSE fldnou = fldnin + 1 END IF END IF IF ( .not. rmkdon ) THEN C C* Check to see if next field (or initial field if no match C* found) is alpha or numeric. If numeric, it's more remarks, C* else it's station name or a typo. C IF ( itypsf(fldnou) .eq. NMR ) THEN IF ( fldnou .le. nflds ) THEN infld = fldnou CALL MA_CGPT ( infld, prsdon, rmkdon, fldnou, ier ) ELSE rmkdon = .true. END IF ELSE fldnou = fldnin rmkdon = .true. END IF END IF C* RETURN END
{"hexsha": "2b2a2600f4726557631c3c38253f886b2f2d1b66", "size": 9810, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "gempak/source/bridge/ma/macgrm.f", "max_stars_repo_name": "oxelson/gempak", "max_stars_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 42, "max_stars_repo_stars_event_min_datetime": "2015-06-03T15:26:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T22:36:03.000Z", "max_issues_repo_path": "gempak/source/bridge/ma/macgrm.f", "max_issues_repo_name": "oxelson/gempak", "max_issues_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 60, "max_issues_repo_issues_event_min_datetime": "2015-05-11T21:36:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T16:22:42.000Z", "max_forks_repo_path": "gempak/source/bridge/ma/macgrm.f", "max_forks_repo_name": "oxelson/gempak", "max_forks_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 27, "max_forks_repo_forks_event_min_datetime": "2016-06-06T21:55:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T18:23:28.000Z", "avg_line_length": 33.3673469388, "max_line_length": 73, "alphanum_fraction": 0.4729867482, "num_tokens": 3273}
"""Unittests for rasterio.plot""" import numpy as np import pytest try: import matplotlib as mpl mpl.use('agg') import matplotlib.pyplot as plt plt.show = lambda :None except ImportError: plt = None import rasterio from rasterio.plot import (show, show_hist, get_plt, plotting_extent, adjust_band) from rasterio.enums import ColorInterp def test_show_raster_band(): """Test plotting a single raster band.""" matplotlib = pytest.importorskip('matplotlib') with rasterio.open('tests/data/RGB.byte.tif') as src: show((src, 1)) fig = plt.gcf() plt.close(fig) def test_show_raster_mult_bands(): """Test multiple bands plotting.""" matplotlib = pytest.importorskip('matplotlib') with rasterio.open('tests/data/RGB.byte.tif') as src: show((src, (1, 2, 3))) fig = plt.gcf() plt.close(fig) def test_show_raster_object(): """Test plotting a raster object.""" matplotlib = pytest.importorskip('matplotlib') with rasterio.open('tests/data/RGB.byte.tif') as src: show(src) fig = plt.gcf() plt.close(fig) def test_show_raster_float(): """Test plotting a raster object with float data.""" matplotlib = pytest.importorskip('matplotlib') with rasterio.open('tests/data/float.tif') as src: show(src) fig = plt.gcf() plt.close(fig) def test_show_cmyk_interp(tmpdir): """A CMYK TIFF has cyan, magenta, yellow, black bands.""" matplotlib = pytest.importorskip('matplotlib') with rasterio.open('tests/data/RGB.byte.tif') as src: meta = src.meta meta['photometric'] = 'CMYK' meta['count'] = 4 tiffname = str(tmpdir.join('foo.tif')) with rasterio.open(tiffname, 'w', **meta) as dst: assert dst.profile['photometric'] == 'cmyk' assert dst.colorinterp == ( ColorInterp.cyan, ColorInterp.magenta, ColorInterp.yellow, ColorInterp.black) with rasterio.open(tiffname) as src: try: show(src) fig = plt.gcf() plt.close(fig) except ImportError: pass def test_show_raster_no_bounds(): """ This test only verifies that code up to the point of plotting with matplotlib works correctly. Tests do not exercise matplotlib. """ matplotlib = pytest.importorskip('matplotlib') with rasterio.open('tests/data/RGB.byte.tif') as src: try: show((src, 1), with_bounds=False) fig = plt.gcf() plt.close(fig) except ImportError: pass def test_show_raster_title(): """ This test only verifies that code up to the point of plotting with matplotlib works correctly. Tests do not exercise matplotlib. """ matplotlib = pytest.importorskip('matplotlib') with rasterio.open('tests/data/RGB.byte.tif') as src: try: show((src, 1), title="insert title here") fig = plt.gcf() plt.close(fig) except ImportError: pass def test_show_hist_large(): """ This test only verifies that code up to the point of plotting with matplotlib works correctly. Tests do not exercise matplotlib. """ matplotlib = pytest.importorskip('matplotlib') try: rand_arr = np.random.randn(10, 718, 791) show_hist(rand_arr) fig = plt.gcf() plt.close(fig) except ImportError: pass def test_show_raster_cmap(): """ This test only verifies that code up to the point of plotting with matplotlib works correctly. Tests do not exercise matplotlib. """ matplotlib = pytest.importorskip('matplotlib') with rasterio.open('tests/data/RGB.byte.tif') as src: try: show((src, 1), cmap='jet') fig = plt.gcf() plt.close(fig) except ImportError: pass def test_show_raster_ax(): """ This test only verifies that code up to the point of plotting with matplotlib works correctly. Tests do not exercise matplotlib. """ matplotlib = pytest.importorskip('matplotlib') with rasterio.open('tests/data/RGB.byte.tif') as src: try: fig, ax = plt.subplots(1) show((src, 1), ax=ax) fig = plt.gcf() plt.close(fig) except ImportError: pass def test_show_array(): """ This test only verifies that code up to the point of plotting with matplotlib works correctly. Tests do not exercise matplotlib. """ matplotlib = pytest.importorskip('matplotlib') with rasterio.open('tests/data/RGB.byte.tif') as src: try: show(src.read(1)) fig = plt.gcf() plt.close(fig) except ImportError: pass def test_show_array3D(): """ This test only verifies that code up to the point of plotting with matplotlib works correctly. Tests do not exercise matplotlib. """ matplotlib = pytest.importorskip('matplotlib') with rasterio.open('tests/data/RGB.byte.tif') as src: try: show(src.read((1, 2, 3))) fig = plt.gcf() plt.close(fig) except ImportError: pass def test_show_hist(): """ This test only verifies that code up to the point of plotting with matplotlib works correctly. Tests do not exercise matplotlib. """ matplotlib = pytest.importorskip('matplotlib') with rasterio.open('tests/data/RGB.byte.tif') as src: try: show_hist((src, 1), bins=256) fig = plt.gcf() plt.close(fig) except ImportError: pass try: show_hist(src.read(), bins=256) fig = plt.gcf() plt.close(fig) except ImportError: pass try: fig, ax = plt.subplots(1) show_hist(src.read(), bins=256, ax=ax) fig = plt.gcf() plt.close(fig) except ImportError: pass def test_show_hist_mplargs(): """ This test only verifies that code up to the point of plotting with matplotlib works correctly. Tests do not exercise matplotlib. """ matplotlib = pytest.importorskip('matplotlib') with rasterio.open('tests/data/RGB.byte.tif') as src: try: show_hist(src, bins=50, lw=0.0, stacked=False, alpha=0.3, histtype='stepfilled', title="World Histogram overlaid") fig = plt.gcf() plt.close(fig) except ImportError: pass def test_show_contour(): """ This test only verifies that code up to the point of plotting with matplotlib works correctly. Tests do not exercise matplotlib. """ matplotlib = pytest.importorskip('matplotlib') with rasterio.open('tests/data/RGB.byte.tif') as src: try: show((src, 1), contour=True) fig = plt.gcf() plt.close(fig) except ImportError: pass def test_show_contour_mplargs(): """ This test only verifies that code up to the point of plotting with matplotlib works correctly. Tests do not exercise matplotlib. """ matplotlib = pytest.importorskip('matplotlib') with rasterio.open('tests/data/RGB.byte.tif') as src: try: show((src, 1), contour=True, levels=[25, 125], colors=['white', 'red'], linewidths=4, contour_label_kws=dict(fontsize=18, fmt="%1.0f", inline_spacing=15, use_clabeltext=True)) fig = plt.gcf() plt.close(fig) except ImportError: pass def test_get_plt(): """ This test only verifies that code up to the point of plotting with matplotlib works correctly. Tests do not exercise matplotlib. """ matplotlib = pytest.importorskip('matplotlib') with rasterio.open('tests/data/RGB.byte.tif'): try: assert plt == get_plt() except ImportError: pass def test_plt_transform(): matplotlib = pytest.importorskip('matplotlib') with rasterio.open('tests/data/RGB.byte.tif') as src: show(src.read(), transform=src.transform) show(src.read(1), transform=src.transform) def test_plotting_extent(): from rasterio.plot import reshape_as_image expected = (101985.0, 339315.0, 2611485.0, 2826915.0) with rasterio.open('tests/data/RGB.byte.tif') as src: assert plotting_extent(src) == expected assert plotting_extent( reshape_as_image(src.read()), transform=src.transform) == expected assert plotting_extent( src.read(1), transform=src.transform) == expected # array requires a transform with pytest.raises(ValueError): plotting_extent(src.read(1)) def test_plot_normalize(): a = np.linspace(1, 6, 10) b = adjust_band(a, 'linear') np.testing.assert_array_almost_equal(np.linspace(0, 1, 10), b)
{"hexsha": "ab352a956e53032c5a7e8f3be2bf04cac0f24929", "size": 9057, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_plot.py", "max_stars_repo_name": "Juanlu001/rasterio", "max_stars_repo_head_hexsha": "21c43443288f28e9ffcc9b9183c27568a36ed21b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_plot.py", "max_issues_repo_name": "Juanlu001/rasterio", "max_issues_repo_head_hexsha": "21c43443288f28e9ffcc9b9183c27568a36ed21b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_plot.py", "max_forks_repo_name": "Juanlu001/rasterio", "max_forks_repo_head_hexsha": "21c43443288f28e9ffcc9b9183c27568a36ed21b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.339100346, "max_line_length": 105, "alphanum_fraction": 0.6102462184, "include": true, "reason": "import numpy", "num_tokens": 2072}
import numpy as np import pprint import cta_fspecial import cta_chog class Cta_products(): def __init__(self, Fourier_coefficients, product_options, output_order=0): self.monoms = product_options['monoms'] self.feature_order=[0,5]; self.angular_power=[self.feature_order[0],self.feature_order[1]]; self.angular_cross=[1,2]; self.Fourier_coefficients = Fourier_coefficients self.num_features=len(self.Fourier_coefficients) # window の数 self.feature_order[0]=max([self.feature_order[0], output_order]) def in_interval(self, value, interval): #value が1つの値の場合でもリストで与える。 ok=True if len(interval)<2: ok=False for v in range(len(value)): if (value[v]<interval[0] or value[v]>interval[1]): ok=False return ok ### l=0 がなかったのはここだーーーーー!!!!! # if exist('output_order'), # feature_order(1)=max(feature_order(1),output_order); # end; # assert(max(cellfun(@numel,monoms))<4); # assert(min(cellfun(@numel,monoms))>0); ### def cta_products(self): product_mat=[] test = [] for m in range(len(self.monoms)): morder=len(self.monoms[m]) #monoms{m} = '01' なら morder = 2 #print(morder) monom=self.monoms[m] #print('monom: ' + str(monom)) for a in range(1, self.num_features+1): # a は window function の番号を表す #print('a: ' + str(a)) for la in range(self.Fourier_coefficients[a-1]["L"]+1): # Fourier_coefficients[a][1] には L が入っている # print('la: ' +str(la)) if morder==1: #window function を1種類のみ使う場合 assert float(monom[0])==0, 'ERROR: conj is not necessary' #条件式が False の場合にエラーを投げる new_product=[a,la,float(monom[0]), -1,-1,-1, -1,-1,-1, la,0] # +1で本当に合ってるか確認する product_mat.append(new_product) # #print(product_mat) else: for b in range(a,self.num_features+1): # print('b: ' + str(b)) start_bl=0; if a==b and monom[0]==monom[1]: start_bl=la; for lb in range(start_bl, self.Fourier_coefficients[b-1]["L"]+1): # print('lb: ' + str(lb)) if morder==2: ## window function を2種類使う場合 l=((-1)**float(monom[0]))*la + ((-1)**float(monom[1]))*lb #print('-- l: ' + str(l)) # test = (in_interval([l],feature_order) and # ((a==b and in_interval([la,lb],angular_power)) or # (a!=b and in_interval([la,lb],angular_cross)))) # #print(test) if (Cta_products(self.Fourier_coefficients, product_options).in_interval([l],self.feature_order) and ((a==b and Cta_products(self.Fourier_coefficients, product_options).in_interval([la,lb],self.angular_power)) or (a!=b and Cta_products(self.Fourier_coefficients, product_options).in_interval([la,lb],self.angular_cross)))): new_product=[a,la,float(monom[0]), b,lb,float(monom[1]), -1,-1,-1, l,0]; product_mat.append(new_product) # print(product_mat) ### 20201002 for debug elif morder==3: ## window function を3種類使う場合 # print('morder==3') # print('self.Fourier_coefficients.shape') # print(len(self.Fourier_coefficients)) for c in range(b, self.num_features+1): # print('c: ' + str(c)) # print('self.Fourier_coefficients[c]') # print(self.Fourier_coefficients[c]) start_cl=0; if c==b and monom[1]==monom[2]: start_cl=lb if c==a and monom[0]==monom[2]: start_cl=np.max(la,start_cl) # print('start_cl: ' + str(start_cl)) # print('self.Fourier_coefficients[c][1]+1: ' + str(self.Fourier_coefficients[c][1]+1)) for lc in range(start_cl,self.Fourier_coefficients[c-1]["L"]+1): #print('lc: ' + str(lc)) # if np.min([la,lb,lc])>0 if np.min([lb,lc])>0: # #if 1 l=(((-1)**float(monom[0]))*la +((-1)**float(monom[1]))*lb +((-1)**float(monom[2]))*lc) #print('---l: ' + str(l)) # print('self.feature_order') # print(self.feature_order) if (Cta_products(self.Fourier_coefficients,product_options).in_interval([l],self.feature_order) and (((a==b and b==c and a==c) and Cta_products(self.Fourier_coefficients, product_options).in_interval([la,lb,lc],self.angular_power)) or ((a!=b or b!=c or a!=c) and Cta_products(self.Fourier_coefficients, product_options).in_interval([la,lb,lc],self.angular_cross)))): new_product=[a,la,float(monom[0]), b,lb,float(monom[1]), c,lc,float(monom[2]), l,0]; product_mat.append(new_product) product_mat = np.array(product_mat) # product_mat = product_mat[product_mat[:,-2]!=0] # コメントアウト # v = np.sort(product_mat[:,-2])[::-1] indx = np.argsort(product_mat[:,-2]) product_mat=product_mat[indx] # # -2 列目を sort された状態を保ちつつ、さらに 1 列目で sort # for i in range(self.feature_order[1]+1): # i_indx = np.argsort(product_mat[product_mat[:,-2]==i][:,0]) # if i == 0: # i_product_mat=product_mat[product_mat[:,-2]==i][i_indx] # else: # i_product_mat = np.concatenate([i_product_mat, product_mat[product_mat[:,-2]==i][i_indx]], axis=0) # # print(i_product_mat) # product_mat = i_product_mat # # (列, 規則): (-2,降) → (0,昇) → (1,昇) → (3, 昇) → (4, 昇) の順に sort # 20201005 この部分は octave と出力を揃えるため print('self.feature_order[1]') print(self.feature_order[1]) for i in range(self.feature_order[1],-1,-1):#-2 tmp_i = product_mat[product_mat[:,-2]==i] # print('i: ' + str(i)) for j in range(1, self.feature_order[1]+1):#0 # print('j: ' + str(j)) tmp_j = tmp_i[tmp_i[:,0]==j] # print(tmp_j) for k in range(self.feature_order[1]+1):#1 # print('k: ' + str(k)) tmp_k = tmp_j[tmp_j[:,1]==k] # print(tmp_k) for l in range(self.feature_order[1]+1):#3 tmp_l = tmp_k[tmp_k[:,3]==l] for m in range(self.feature_order[1]+1):#4 tmp_m = tmp_l[tmp_l[:,4]==m] ijkl_indx = np.argsort(tmp_m[:,6])#6 # print('ijk_indx' + str(len(ijk_indx))) if i == self.feature_order[1] and j ==1 and k ==0: # flag に書き換え ijkl_product_mat=tmp_m[ijkl_indx] # print(ijkl_product_mat) else: ijkl_product_mat = np.concatenate([ijkl_product_mat, tmp_m[ijkl_indx]], axis=0) # print(ijk_product_mat) product_mat = ijkl_product_mat return product_mat
{"hexsha": "5946bec5cd9974cccc7b49e3241e3520e1bbd765", "size": 9205, "ext": "py", "lang": "Python", "max_stars_repo_path": "cta_products.py", "max_stars_repo_name": "nyunyu122/CHOG_python", "max_stars_repo_head_hexsha": "7e929506e48f1e58d2ded9dbd9f53676ef83356e", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cta_products.py", "max_issues_repo_name": "nyunyu122/CHOG_python", "max_issues_repo_head_hexsha": "7e929506e48f1e58d2ded9dbd9f53676ef83356e", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cta_products.py", "max_forks_repo_name": "nyunyu122/CHOG_python", "max_forks_repo_head_hexsha": "7e929506e48f1e58d2ded9dbd9f53676ef83356e", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 53.5174418605, "max_line_length": 414, "alphanum_fraction": 0.4195545899, "include": true, "reason": "import numpy", "num_tokens": 2090}
[STATEMENT] lemma lran_bwd_simp: "lran a l h = (if l<h then lran a l (h-1)@[a (h-1)] else [])" [PROOF STATE] proof (prove) goal (1 subgoal): 1. lran a l h = (if l < h then lran a l (h - 1) @ [a (h - 1)] else []) [PROOF STEP] apply (induction a l h rule: lran.induct) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<And>a l h. (l < h \<Longrightarrow> lran a (l + 1) h = (if l + 1 < h then lran a (l + 1) (h - 1) @ [a (h - 1)] else [])) \<Longrightarrow> lran a l h = (if l < h then lran a l (h - 1) @ [a (h - 1)] else []) [PROOF STEP] apply (rewrite in "\<hole> = _" lran.simps) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<And>a l h. (l < h \<Longrightarrow> lran a (l + 1) h = (if l + 1 < h then lran a (l + 1) (h - 1) @ [a (h - 1)] else [])) \<Longrightarrow> (if l < h then a l # lran a (l + 1) h else []) = (if l < h then lran a l (h - 1) @ [a (h - 1)] else []) [PROOF STEP] apply (rewrite in "_ = \<hole>" lran.simps) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<And>a l h. (l < h \<Longrightarrow> lran a (l + 1) h = (if l + 1 < h then lran a (l + 1) (h - 1) @ [a (h - 1)] else [])) \<Longrightarrow> (if l < h then a l # lran a (l + 1) h else []) = (if l < h then (if l < h - 1 then a l # lran a (l + 1) (h - 1) else []) @ [a (h - 1)] else []) [PROOF STEP] by (auto simp: less_le)
{"llama_tokens": 617, "file": "IMP2_lib_IMP2_Aux_Lemmas", "length": 4}
import geopandas as gpd import pandas as pd from shapely.geometry import Polygon,Point from .grids import GPS_to_grids,grids_centre import math import numpy as np from .preprocess import * def busgps_arriveinfo(data,line,stop,col = ['VehicleId','GPSDateTime','lon','lat','stopname'], stopbuffer = 200,mintime = 300,project_epsg = 2416,timegap = 1800,method = 'project',projectoutput = False): ''' 输入公交GPS数据、公交线路与站点的GeoDataFrame,该方法能够识别公交的到离站信息 输入 ------- data : DataFrame 公交GPS数据,单一公交线路,且需要含有车辆ID、GPS时间、经纬度(wgs84) line : GeoDataFrame 公交线型的GeoDataFrame数据,单一公交线路 stop : GeoDataFrame 公交站点的GeoDataFrame数据 col : List 列名,按[车辆ID,时间,经度,纬度,站点名称字段]的顺序 stopbuffer : number 米,站点的一定距离范围,车辆进入这一范围视为到站,离开则视为离站 mintime : number 秒,短时间内公交再次到站则需要与前一次的到站数据结合一起计算到离站时间,该参数设置阈值 project_epsg : number 匹配时会将数据转换为投影坐标系以计算距离,这里需要给定投影坐标系的epsg代号 timegap : number 秒,清洗数据用,多长时间车辆不出现,就视为新的车辆 method : str 公交运行图匹配方法,可选'project'或'dislimit'; project为直接匹配线路上最近点,匹配速度快; dislimit则需要考虑前面点位置,加上距离限制,匹配速度慢。 projectoutput : bool 是否输出投影后的数据 输出 ------- arrive_info : DataFrame 公交到离站信息 ''' VehicleId,GPSDateTime,lon,lat,stopcol = col #数据清洗 print('数据清洗中',end = '') line.set_crs(crs='epsg:4326',allow_override=True,inplace=True) line = line.to_crs(epsg = project_epsg) line_buffer = line.copy() line_buffer['geometry'] = line_buffer.buffer(200) line_buffer = line_buffer.to_crs(epsg = 4326) print('.',end = '') data = clean_same(data,col=[VehicleId,GPSDateTime,lon,lat]) print('.',end = '') data = clean_outofshape(data,line_buffer,col = [lon,lat],accuracy = 500) print('.') data = id_reindex(data,VehicleId,timegap = timegap,timecol = GPSDateTime,suffix='') print('运行位置匹配中',end = '') #利用project方法,将数据点投影至公交线路上 lineshp = line['geometry'].iloc[0] print('.',end = '') data['geometry'] = gpd.points_from_xy(data[lon],data[lat]) data = gpd.GeoDataFrame(data) data.set_crs(crs='epsg:4326',allow_override=True,inplace=True) print('.',end = '') data = data.to_crs(epsg = project_epsg) print('.',end = '') if method == 'project': data['project'] = data['geometry'].apply(lambda r:lineshp.project(r)) elif method == 'dislimit': tmps = [] #改进的匹配方法 for vid in data[VehicleId].drop_duplicates(): print('.',end = '') tmp = data[data[VehicleId]==vid].copy() gap = 30 i = 0 tmp = tmp.sort_values(by = [VehicleId,GPSDateTime]).reset_index(drop=True) tmp['project'] = 0 from shapely.geometry import LineString for i in range(len(tmp)-1): if i == 0: proj = lineshp.project(tmp.iloc[i]['geometry']) tmp.loc[i,'project'] = proj else: proj = tmp['project'].iloc[i] dis = tmp.iloc[i+1]['geometry'].distance(tmp.iloc[i]['geometry']) if dis == 0: proj1 = proj else: proj2 = lineshp.project(tmp.iloc[i+1]['geometry']) if abs(proj2-proj)>dis: proj1 = np.sign(proj2-proj)*dis+proj else: proj1 = proj2 tmp.loc[i+1,'project'] = proj1 tmps.append(tmp) data = pd.concat(tmps) print('.',end = '') #公交站点也进行project stop = stop.to_crs(epsg = project_epsg) stop['project'] = stop['geometry'].apply(lambda r:lineshp.project(r)) print('.',end = '') #标准化时间 starttime = data[GPSDateTime].min() data['time_st'] = (data[GPSDateTime]-starttime).dt.total_seconds() BUS_project = data print('.') from shapely.geometry import LineString,Polygon import shapely #定义一个空的list存储识别结果 ls = [] print('匹配到离站信息...',end = '') #对每一辆车遍历 for car in BUS_project[VehicleId].drop_duplicates(): print('.',end = '') #提取车辆轨迹 tmp = BUS_project[BUS_project[VehicleId] == car] #如果车辆数据点少于1个,则无法构成轨迹 if len(tmp)>1: #对每一个站点识别 for stopname in stop[stopcol].drop_duplicates(): #提取站点位置 position = stop[stop[stopcol] == stopname]['project'].iloc[0] #通过缓冲区与线段交集识别到离站轨迹 buffer_polygon = LineString([[0,position], [data['time_st'].max(),position]]).buffer(stopbuffer) bus_linestring = LineString(tmp[['time_st','project']].values) line_intersection = bus_linestring.intersection(buffer_polygon) #整理轨迹,提取到离站时间 if line_intersection.is_empty: #如果为空,说明车辆没有到站信息 continue else: if type(line_intersection) == shapely.geometry.linestring.LineString: arrive = [line_intersection] else: arrive = list(line_intersection) arrive = pd.DataFrame(arrive) arrive['arrivetime']= arrive[0].apply(lambda r:r.coords[0][0]) arrive['leavetime']= arrive[0].apply(lambda r:r.coords[-1][0]) #通过时间阈值筛选到离站信息 a = arrive[['arrivetime']].copy() a.columns = ['time'] a['flag'] = 1 b = arrive[['leavetime']].copy() b.columns = ['time'] b['flag'] = 0 c = pd.concat([a,b]).sort_values(by = 'time') c['time1'] = c['time'].shift(-1) c['flag_1'] = ((c['time1']-c['time'])<mintime)&(c['flag']==0) c['flag_2'] = c['flag_1'].shift().fillna(False) c['flag_3'] = c['flag_1']|c['flag_2'] c = c[-c['flag_3']] arrive_new = c[c['flag'] == 1][['time']].copy() arrive_new.columns = ['arrivetime'] arrive_new['leavetime'] = list(c[c['flag'] == 0]['time']) arrive_new[stopcol] = stopname arrive_new[VehicleId] = car #合并数据 ls.append(arrive_new) #合成一个大表 arrive_info = pd.concat(ls) arrive_info['arrivetime'] = starttime+arrive_info['arrivetime'].apply(lambda r:pd.Timedelta(int(r),unit = 's')) arrive_info['leavetime'] = starttime+arrive_info['leavetime'].apply(lambda r:pd.Timedelta(int(r),unit = 's')) if projectoutput: return arrive_info,data else: return arrive_info def busgps_onewaytime(arrive_info,start,end,col = ['VehicleId','stopname']): ''' 输入到离站信息表arrive_info与起终点名称,计算单程耗时 输入 ------- arrive_info : DataFrame 公交到离站数据 start : Str 起点站名字 end : Str 终点站名字 col : List 字段列名[车辆ID,站点名称] 输出 ------- onewaytime : DataFrame 公交单程耗时 ''' #上行 #将起终点的信息提取后合并到一起 #终点站的到达时间 [VehicleId,stopname] = col a = arrive_info[arrive_info[stopname] == end][['arrivetime',stopname,VehicleId]] #起点站的离开时间 b = arrive_info[arrive_info[stopname] == start][['leavetime',stopname,VehicleId]] a.columns = ['time',stopname,VehicleId] b.columns = ['time',stopname,VehicleId] #合并信息 c = pd.concat([a,b]) #排序后提取每一单程的出行时间 c = c.sort_values(by = [VehicleId,'time']) for i in c.columns: c[i+'1'] = c[i].shift(-1) #提取以申昆路枢纽站为起点,延安东路外滩为终点的趟次 c = c[(c[VehicleId] == c[VehicleId+'1'])& (c[stopname]==start)& (c[stopname+'1']==end)] #计算该趟出行的持续时间 c['duration'] = (c['time1'] - c['time']).dt.total_seconds() #标识该趟出行的时间中点在哪一个小时 c['shour'] = c['time'].dt.hour c['方向'] = start+'-'+end #储存为c1变量 c1 = c.copy() #下行 a = arrive_info[arrive_info[stopname] == start][['arrivetime',stopname,VehicleId]] b = arrive_info[arrive_info[stopname] == end][['leavetime',stopname,VehicleId]] a.columns = ['time',stopname,VehicleId] b.columns = ['time',stopname,VehicleId] c = pd.concat([a,b]) c = c.sort_values(by = [VehicleId,'time']) for i in c.columns: c[i+'1'] = c[i].shift(-1) c = c[(c[VehicleId] == c[VehicleId+'1'])&(c[stopname]==end)&(c[stopname+'1']==start)] c['duration'] = (c['time1'] - c['time']).dt.total_seconds() c['shour'] = c['time'].dt.hour c['方向'] = end+'-'+start c2 = c.copy() onewaytime = pd.concat([c1,c2]) return onewaytime
{"hexsha": "cc06eb690e9ad64e044d5ea3f2e321bc300d8b36", "size": 8592, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/transbigdata/busgps.py", "max_stars_repo_name": "martinfleis/transbigdata", "max_stars_repo_head_hexsha": "ec80fcd47bf25edef26f4838d257c3cd9b6d9e2e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-21T14:24:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-21T14:24:30.000Z", "max_issues_repo_path": "src/transbigdata/busgps.py", "max_issues_repo_name": "yan-yuchen/transbigdata", "max_issues_repo_head_hexsha": "520cb59dd857ac1e30d904aabda1b76addf9354d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/transbigdata/busgps.py", "max_forks_repo_name": "yan-yuchen/transbigdata", "max_forks_repo_head_hexsha": "520cb59dd857ac1e30d904aabda1b76addf9354d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1008403361, "max_line_length": 130, "alphanum_fraction": 0.5572625698, "include": true, "reason": "import numpy", "num_tokens": 2802}
import sys import os sys.path.insert(1, os.path.join(sys.path[0], '..')) from vaccine_alloc_instance import * import numpy as np import random class RandomInstanceGenerator: def __init__(self,number_of_instances, n,c,d,q, Q_d_min, Q_d_max, Q_c_min, Q_c_max, p_availability=0.6 ): self.number_of_instances = number_of_instances self.Q_d_min = Q_d_min self.Q_d_max = Q_d_max self.Q_c_min = Q_c_min self.Q_c_max =Q_c_max self.p_availability = p_availability self.n = n self.c = c self.d = d self.q = q def generate(self): random_instances = [] for i in range(self.number_of_instances): availability = [[np.random.choice([0,1], p=[1-self.p_availability, self.p_availability]) for j in range(self.d) ] for i in range(self.n)] belongsToCatagory = [[random.randint(0,1) for j in range(self.c)] for i in range(self.n)] Q_d = [random.randint(self.Q_d_min,self.Q_d_max) for i in range(self.d)] Q_c = [random.randint(self.Q_c_min,self.Q_c_max) for i in range(self.c)] Q_cxd = [[random.randint(0,min(Q_d[j],Q_c[i])) for j in range(self.d)] for i in range(self.c)] #Setting Utility values U_nxd = [] for i in range(self.n): delta=random.random() U_nxd.append([1*(delta**i) for i in range(self.d)]) new_instance = VaccineAllocInstance(self.n,self.c,self.d,self.q,availability,belongsToCatagory,Q_d,Q_c,Q_cxd,U_nxd) random_instances.append(new_instance) return random_instances
{"hexsha": "6a6cfa18c5d5228d08a9fe64a9a9320213ca32a0", "size": 1438, "ext": "py", "lang": "Python", "max_stars_repo_path": "generators/random_instance_generator_1.py", "max_stars_repo_name": "severus-tux/vaccine-alloc", "max_stars_repo_head_hexsha": "5f04e93c51f637d0adad96bd35b3a2726f225701", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "generators/random_instance_generator_1.py", "max_issues_repo_name": "severus-tux/vaccine-alloc", "max_issues_repo_head_hexsha": "5f04e93c51f637d0adad96bd35b3a2726f225701", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "generators/random_instance_generator_1.py", "max_forks_repo_name": "severus-tux/vaccine-alloc", "max_forks_repo_head_hexsha": "5f04e93c51f637d0adad96bd35b3a2726f225701", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.95, "max_line_length": 140, "alphanum_fraction": 0.7162726008, "include": true, "reason": "import numpy", "num_tokens": 424}
#!/usr/bin/env python """ Use forced alignments to separate digit sequences into individual digits. Author: Herman Kamper Contact: kamperh@gmail.com Date: 2018 Edited: Ryan Eloff Date: June 2018 """ from __future__ import absolute_import, division, print_function from os import path import argparse import sys import numpy as np #-----------------------------------------------------------------------------# # UTILITY FUNCTIONS # #-----------------------------------------------------------------------------# def check_argv(): """Check the command line arguments.""" parser = argparse.ArgumentParser( description=__doc__.strip().split("\n")[0], add_help=False ) parser.add_argument( "fadir", type=str, help="Directory containing forced alignments." ) parser.add_argument( "outdir", type=str, help="Diretory to write output individual segment files" ) parser.add_argument( "dataset", type=str, choices={"train", "test"}, help="Dataset to obtain segments for." ) if len(sys.argv) == 1: parser.print_help() sys.exit(1) return parser.parse_args() #-----------------------------------------------------------------------------# # MAIN FUNCTION # #-----------------------------------------------------------------------------# def main(): args = check_argv() segments = {} # Read forced alignment fa_dir = args.fadir fa_fn = path.join(fa_dir, args.dataset + "_word_align.ctm") print("Reading:", fa_fn) with open(fa_fn) as f: # Keep track of the number of each digit key added per utterance sequence (in case of duplicates with the same key) utt_digit_keys = {} # For each entry in the forced alignments for line in f: # Create a segments entry line = line.split() utt_key = line[0] digit_start = float(line[2]) digit_duration = float(line[3]) digit_key = line[4] # Keep track of the number of each digit key per utterance key (in case of duplicate digit keys in same sequence) if not utt_key in utt_digit_keys: utt_digit_keys[utt_key] = {} if not digit_key in utt_digit_keys[utt_key]: utt_digit_keys[utt_key][digit_key] = 0 else: utt_digit_keys[utt_key][digit_key] += 1 # Change digit key to be '<digit_key>a' for first occurence, '<digit_key>b' for second occurence, etc. digit_key = digit_key + chr(utt_digit_keys[utt_key][digit_key] + ord('a')) segments[utt_key + "_" + digit_key] = ( # NOTE: Previously used `extract-rows` Kaldi module to extract individual digit features with segment start/end specified in frames (time/[10 ms frame-shift] -> time * 100). # Kaldi now uses `extract-feature-segments` instead, with segment start/end specified in seconds. # Integer floor of (time*100) maintained to generate same feature segments as previously. utt_key, int(np.floor(digit_start*100))/100.0, int(np.floor((digit_start + digit_duration)*100))/100.0 ) # Write segments segments_fn = path.join(args.outdir, "segments_indiv") print("Writing:", segments_fn) with open(segments_fn, "w") as f: for segment_key in sorted(segments): utt_key, digit_start, digit_end = segments[segment_key] f.write( "{} {} {} {}\n".format(segment_key, utt_key, digit_start, digit_end) ) if __name__ == "__main__": main()
{"hexsha": "2692bfd71f7d38f67e30f3e33b9a4ad67f6424c0", "size": 3846, "ext": "py", "lang": "Python", "max_stars_repo_path": "kaldi_features/tidigits/tidigits_segments_prep.py", "max_stars_repo_name": "rpeloff/multimodal_one-shot_learning", "max_stars_repo_head_hexsha": "b08b9deffea5c656f07a616f31850192e32c2aee", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2019-05-14T08:52:32.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-09T10:01:44.000Z", "max_issues_repo_path": "kaldi_features/tidigits/tidigits_segments_prep.py", "max_issues_repo_name": "rpeloff/multimodal_one-shot_learning", "max_issues_repo_head_hexsha": "b08b9deffea5c656f07a616f31850192e32c2aee", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kaldi_features/tidigits/tidigits_segments_prep.py", "max_forks_repo_name": "rpeloff/multimodal_one-shot_learning", "max_forks_repo_head_hexsha": "b08b9deffea5c656f07a616f31850192e32c2aee", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-08-27T08:49:44.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-01T15:08:16.000Z", "avg_line_length": 35.9439252336, "max_line_length": 189, "alphanum_fraction": 0.5429017161, "include": true, "reason": "import numpy", "num_tokens": 803}
import json import logging import os import shutil import numpy as np import torch from datetime import datetime, timedelta from torch import nn, optim from torch.nn import functional as F from models.fc_model import FCModel from sklearn.preprocessing import label_binarize _RNG_SEED = None def split(a, n): k, m = divmod(len(a), n) return (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n)) def fix_rng_seed(seed): """ Call this function at the beginning of program to fix rng seed. Args: seed (int): Note: See https://github.com/tensorpack/tensorpack/issues/196. Example: Fix random seed in both tensorpack and tensorflow. .. code-block:: python import utils seed = 42 utils.fix_rng_seed(seed) torch.manual_seed(seed) if config.cuda: torch.cuda.manual_seed(seed) # run trainer """ global _RNG_SEED _RNG_SEED = int(seed) def get_rng(obj=None): """ Get a good RNG seeded with time, pid and the object. Args: obj: some object to use to generate random seed. Returns: np.random.RandomState: the RNG. """ seed = (id(obj) + os.getpid() + int(datetime.now().strftime("%Y%m%d%H%M%S%f"))) % 4294967295 if _RNG_SEED is not None: seed = _RNG_SEED return np.random.RandomState(seed) class RunningAverage(): """A simple class that maintains the running average of a quantity Example: ``` loss_avg = RunningAverage() loss_avg.update(2) loss_avg.update(4) loss_avg() = 3 ``` """ def __init__(self): self.steps = 0 self.total = 0 def update(self, val): self.total += val self.steps += 1 def __call__(self): return self.total/float(self.steps) def set_logger(log_path): """Set the logger to log info in terminal and file `log_path`. In general, it is useful to have a logger so that every output to the terminal is saved in a permanent file. Here we save it to `model_dir/train.log`. Example: ``` logging.info("Starting training...") ``` Args: log_path: (string) where to log """ logger = logging.getLogger() logger.setLevel(logging.INFO) if not logger.handlers: # Logging to a file file_handler = logging.FileHandler(log_path) file_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s')) logger.addHandler(file_handler) # Logging to console stream_handler = logging.StreamHandler() stream_handler.setFormatter(logging.Formatter('%(message)s')) logger.addHandler(stream_handler) def save_dict_to_json(d, json_path): """Saves dict of floats in json file Args: d: (dict) of float-castable values (np.float, int, float, etc.) json_path: (string) path to json file """ def to_float(d): for k,v in d.items(): if type(v) is dict: d[k] = to_float(v) elif isinstance(v, np.float32) or isinstance(v, np.float64): d[k] = float(v) elif isinstance(v, list): d[k] = [float(x) for x in v] return d with open(json_path, 'w') as f: # We need to convert the values to float for json (it doesn't accept np.array, np.float, ) d = to_float(d) #{k: float(v) for k, v in d.items()} json.dump(d, f, indent=2) def save_checkpoint(state, is_best, checkpoint, name=None): """Saves model and training parameters at checkpoint + 'last.pth.tar'. If is_best==True, also saves checkpoint + 'best.pth.tar' Args: state: (dict) contains model's state_dict, may contain other keys such as epoch, optimizer state_dict is_best: (bool) True if it is the best model seen till now checkpoint: (string) folder where parameters are to be saved """ filepath = os.path.join(checkpoint, 'last.pth.tar') if not os.path.exists(checkpoint): print("Checkpoint Directory does not exist! Making directory {}".format(checkpoint)) os.mkdir(checkpoint) #else: #print("Checkpoint Directory exists! ") torch.save(state, filepath) if is_best: save_name = 'best.pth.tar' if name is None else 'best_{}.pth.tar'.format(name) shutil.copyfile(filepath, os.path.join(checkpoint, save_name)) def load_checkpoint(checkpoint, model, optimizer=None): """Loads model parameters (state_dict) from file_path. If optimizer is provided, loads state_dict of optimizer assuming it is present in checkpoint. Args: checkpoint: (string) filename which needs to be loaded model: (torch.nn.Module) model for which the parameters are loaded optimizer: (torch.optim) optional: resume optimizer from checkpoint """ if not os.path.exists(checkpoint): raise ValueError("File doesn't exist {}".format(checkpoint)) checkpoint = torch.load(checkpoint) model.load_state_dict(checkpoint['state_dict']) if optimizer: optimizer.load_state_dict(checkpoint['optim_dict']) return checkpoint class _ECELoss(nn.Module): """ Calculates the Expected Calibration Error of a model. The input to this loss is the logits of a model, NOT the softmax scores. This divides the confidence outputs into equally-sized interval bins. In each bin, we compute the confidence gap: bin_gap = | avg_confidence_in_bin - accuracy_in_bin | We then return a weighted average of the gaps, based on the number of samples in each bin See: Naeini, Mahdi Pakdaman, Gregory F. Cooper, and Milos Hauskrecht. "Obtaining Well Calibrated Probabilities Using Bayesian Binning." AAAI. 2015. """ def __init__(self, n_bins=15, save_bins=False, save_path=None): """ n_bins (int): number of confidence interval bins """ super(_ECELoss, self).__init__() bin_boundaries = torch.linspace(0, 1, n_bins + 1) self.bin_lowers = bin_boundaries[:-1] self.bin_uppers = bin_boundaries[1:] self.save_bins = save_bins self.save_path = save_path def forward(self, logits, labels): softmaxes = F.softmax(logits, dim=1) confidences, predictions = torch.max(softmaxes, 1) accuracies = predictions.eq(labels) ece = torch.zeros(1, device=logits.device) if self.save_bins: bin_data = {'bin_lowers': [], 'bin_uppers': [], 'props': [], 'accs': [], 'confs': []} for bin_lower, bin_upper in zip(self.bin_lowers, self.bin_uppers): # Calculated |confidence - accuracy| in each bin in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item()) prop_in_bin = in_bin.float().mean() if prop_in_bin.item() > 0: accuracy_in_bin = accuracies[in_bin].float().mean() avg_confidence_in_bin = confidences[in_bin].mean() ece += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin if self.save_bins: bin_data['bin_lowers'].append(bin_lower.item()) bin_data['bin_uppers'].append(bin_upper.item()) bin_data['props'].append(prop_in_bin.item()) bin_data['accs'].append(accuracy_in_bin.item()) bin_data['confs'].append(avg_confidence_in_bin.item()) if self.save_bins: save_dict_to_json(bin_data, self.save_path) return ece class _CwECELoss(nn.Module): """ Calculates the Class-wise Expected Calibration Error of a model. The input to this loss is the logits of a model, NOT the softmax scores. This divides the confidence outputs of each class j into equally-sized interval bins. In each bin, we compute the confidence gap: bin_gap = | avg_confidence_in_bin_j - accuracy_in_bin_j | We then return a weighted average of the gaps, based on the number of samples in each bin """ def __init__(self, n_bins=15, avg=True): """ n_bins (int): number of confidence interval bins """ super(_CwECELoss, self).__init__() bin_boundaries = torch.linspace(0, 1, n_bins + 1) self.bin_lowers = bin_boundaries[:-1] self.bin_uppers = bin_boundaries[1:] self.avg = avg def forward(self, logits, labels): softmaxes = F.softmax(logits, dim=1) num_classes = logits.shape[1] cw_ece = torch.zeros(1, device=logits.device) for j in range(num_classes): confidences_j = softmaxes[:,j] ece_j = torch.zeros(1, device=logits.device) for bin_lower, bin_upper in zip(self.bin_lowers, self.bin_uppers): in_bin = confidences_j.gt(bin_lower.item()) * confidences_j.le(bin_upper.item()) prop_in_bin = in_bin.float().mean() if prop_in_bin.item() > 0: accuracy_j_in_bin = labels[in_bin].eq(j).float().mean() avg_confidence_j_in_bin = confidences_j[in_bin].mean() ece_j += torch.abs(avg_confidence_j_in_bin - accuracy_j_in_bin) * prop_in_bin cw_ece += ece_j if self.avg: return cw_ece/num_classes else: return cw_ece # The next two functions are copied from Kull etal implementation # for testing def binary_ECE(probs, y_true, power = 1, bins = 15): idx = np.digitize(probs, np.linspace(0, 1, bins)) - 1 bin_func = lambda p, y, idx: (np.abs(np.mean(p[idx]) - np.mean(y[idx])) ** power) * np.sum(idx) / len(probs) ece = 0 for i in np.unique(idx): ece += bin_func(probs, y_true, idx == i) return ece def classwise_ECE(probs, y_true, power = 1, bins = 15): probs = np.array(probs) if not np.array_equal(probs.shape, y_true.shape): y_true = label_binarize(np.array(y_true), classes=range(probs.shape[1])) n_classes = probs.shape[1] return np.sum( [ binary_ECE( probs[:, c], y_true[:, c].astype(float), power = power, bins = bins ) for c in range(n_classes) ] ) class _CwECELossDir(nn.Module): """ Calculates the Class-wise Expected Calibration Error of a model. The input to this loss is the logits of a model, NOT the softmax scores. This divides the confidence outputs of each class j into equally-sized interval bins. In each bin, we compute the confidence gap: bin_gap = | avg_confidence_in_bin_j - accuracy_in_bin_j | We then return a weighted average of the gaps, based on the number of samples in each bin """ def __init__(self, n_bins=15): """ n_bins (int): number of confidence interval bins """ super(_CwECELossDir, self).__init__() self.n_bins = n_bins def forward(self, logits, labels): probs = F.softmax(logits, dim=1).detach().cpu().numpy() y_true = labels.detach().cpu().numpy() cwece = classwise_ECE(probs, y_true, bins=self.n_bins) return torch.tensor(cwece, device=logits.device, dtype=logits.dtype) class _MCELoss(nn.Module): """ Calculates the Maximum Calibration Error of a model. The input to this loss is the logits of a model, NOT the softmax scores. This divides the confidence outputs into equally-sized interval bins. In each bin, we compute the confidence gap: bin_gap = | avg_confidence_in_bin - accuracy_in_bin | We then return a maximum of the gaps See: Naeini, Mahdi Pakdaman, Gregory F. Cooper, and Milos Hauskrecht. "Obtaining Well Calibrated Probabilities Using Bayesian Binning." AAAI. 2015. """ def __init__(self, n_bins=15): """ n_bins (int): number of confidence interval bins """ super(_MCELoss, self).__init__() bin_boundaries = torch.linspace(0, 1, n_bins + 1) self.bin_lowers = bin_boundaries[:-1] self.bin_uppers = bin_boundaries[1:] def forward(self, logits, labels): softmaxes = F.softmax(logits, dim=1) confidences, predictions = torch.max(softmaxes, 1) accuracies = predictions.eq(labels) cal_errors = [] for bin_lower, bin_upper in zip(self.bin_lowers, self.bin_uppers): # Calculated |confidence - accuracy| in each bin in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item()) prop_in_bin = in_bin.float().mean() if prop_in_bin.item() > 0: accuracy_in_bin = accuracies[in_bin].float().mean() avg_confidence_in_bin = confidences[in_bin].mean() cal_errors.append(torch.abs(avg_confidence_in_bin - accuracy_in_bin)) return torch.max(torch.stack(cal_errors)) class _BrierLoss(nn.Module): """ Calculates the Brier Error of a model. The input to this loss is the logits of a model, NOT the softmax scores. We then return a mean square of the gaps between one-hot labels and the predicted scores. """ def __init__(self): """ """ super(_BrierLoss, self).__init__() def forward(self, logits, labels): softmaxes = F.softmax(logits, dim=1) labels_onehot = torch.zeros_like(softmaxes) labels_onehot.scatter_(1, labels[...,None], 1) diff = (labels_onehot - softmaxes) diff = diff *diff return diff.mean() class _MSODIRLoss(object): """ Calculates the nll + Matrix scaling off-diagonal andbias regularization of a model. The input to this loss is the logits of a model, NOT the softmax scores. NOTE: This loss only works with FCModel with zero hidden layers (i.e., num_hiddens is []) """ def __init__(self, model, weight_lambda=5e-4, bias_mu=5e-4): """ Args: model: (models.fc_model.FCModel object) weight_lambda: regularization weight for weights of fc in model bias_mu: regularization weights for bias of fc in model """ super(_MSODIRLoss, self).__init__() assert(isinstance(model, FCModel)) assert(len(model.num_hiddens) == 0) self.model = model self.ce = nn.CrossEntropyLoss() self.weight_lambda = weight_lambda self.bias_mu = bias_mu def __call__(self, logits, labels): ce_part = self.ce(logits, labels) weight_part = (1 - torch.eye(self.model.fc.weight.shape[0]) ).to(logits.device)*self.model.fc.weight weight_part = torch.sum(weight_part*weight_part) if self.model.fc.bias is not None: bias_part = torch.sum(self.model.fc.bias * self.model.fc.bias) else: bias_part=0 return ce_part + self.weight_lambda * weight_part + self.bias_mu * bias_part class EarlyStopping: """Early stops the training if validation loss doesn't improve after a given patience.""" def __init__(self, patience=7, verbose=False, delta=0): """ Args: patience (int): How long to wait after last time validation loss improved. Default: 7 verbose (bool): If True, prints a message for each validation loss improvement. Default: False delta (float): Minimum change in the monitored quantity to qualify as an improvement. Default: 0 """ self.patience = patience self.verbose = verbose self.counter = 0 self.best_score = None self.early_stop = False self.val_loss_min = np.Inf self.delta = delta def __call__(self, val_loss, model): score = -val_loss if self.best_score is None: self.best_score = score self.save_checkpoint(val_loss, model) elif score < self.best_score + self.delta: self.counter += 1 print('EarlyStopping counter: {} out of {}'.format(self.counter, self.patience)) if self.counter >= self.patience: self.early_stop = True else: self.best_score = score self.save_checkpoint(val_loss, model) self.counter = 0 def save_checkpoint(self, val_loss, model): '''Saves model when validation loss decrease.''' if self.verbose: print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(self.val_loss_min, self.val_loss_min)) torch.save(model.state_dict(), 'checkpoint.pt') self.val_loss_min = val_loss
{"hexsha": "aa81355a702ec30d2c976a07a75fe872cc38bd96", "size": 15966, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils.py", "max_stars_repo_name": "GuangyuanHao/IntraOrderPreservingCalibration", "max_stars_repo_head_hexsha": "79325ae321fc1bce3088b2af8ecd18357b50fb6a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2020-11-22T15:36:19.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-14T09:45:59.000Z", "max_issues_repo_path": "utils.py", "max_issues_repo_name": "GuangyuanHao/IntraOrderPreservingCalibration", "max_issues_repo_head_hexsha": "79325ae321fc1bce3088b2af8ecd18357b50fb6a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-01-10T07:13:50.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-27T03:04:30.000Z", "max_forks_repo_path": "utils.py", "max_forks_repo_name": "GuangyuanHao/IntraOrderPreservingCalibration", "max_forks_repo_head_hexsha": "79325ae321fc1bce3088b2af8ecd18357b50fb6a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-04-11T14:06:29.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T20:31:06.000Z", "avg_line_length": 34.2618025751, "max_line_length": 128, "alphanum_fraction": 0.6574596017, "include": true, "reason": "import numpy", "num_tokens": 3969}
import numpy as np import scipy.optimize import warnings def calc_weights(cov, x0=None, options=None, scale_factor=10000, pcr_tolerance=0.001, ignore_objective=False): """ Calculate the weights associated with the equal risk contribution portfolio. Refer to "On the Properties of Equally-Weighted Risk Contributions Portfolios" by Maillard, Roncalli, and Teiletche for definitions. Parameters ---------- cov: numpy.ndarray (N, N) covariance matrix of assets, must be positive definite x0: numpy.ndarray (N,) initial solution guess. If None is given uses inverse of standard deviation regularized to be between 0 and 1. options: dictionary A dictionary of solver options. See scipy.optimize.minimize. scale_factor: float Number to scale the optimization function by, can be helpful for convergence pcr_tolerance: float The max allowable tolerance for differences in the PCR coming from different assets in decimal terms, e.g. 1% would be 0.01 ignore_objective: False Provided the max difference in PCR satifies pcr_tolerance, ignore whether the objective function has converged. See Notes below. Returns ------- w: numpy.ndarray (N,) array of asset weights Notes: ------ The objective function from the paper embodies but is not exactly the same as the desired result, which is to have equal risk contributions in terms of PCR for each asset. As a result, there are scenarios where the maxiter will be exceeded (i.e. non convergence) when in fact the goal of having equal risk contributions within some acceptable tolerance has been achieved. In these scenaries playing around with 'ftol' and 'maxiter' in 'options' and 'scale_factor' is helpful. The objective function can also be ignored using ignore_objective=True, meaning the weights will be returned provided the max PCR tolerance is satiesfied even if the objective has not converged. See https://github.com/matthewgilbert/erc/issues/1 """ # check matrix is PD np.linalg.cholesky(cov) if not options: options = {'ftol': 1e-20, 'maxiter': 800} def fun(x): # these are non normalized risk contributions, i.e. not regularized # by total risk, seems to help numerically risk_contributions = x.dot(cov) * x a = np.reshape(risk_contributions, (len(risk_contributions), 1)) # broadcasts so you get pairwise differences in risk contributions risk_diffs = a - a.transpose() sum_risk_diffs_squared = np.sum(np.square(np.ravel(risk_diffs))) # https://stackoverflow.com/a/36685019/1451311 return sum_risk_diffs_squared / scale_factor N = cov.shape[0] if x0 is None: x0 = 1 / np.sqrt(np.diag(cov)) x0 = x0 / x0.sum() bounds = [(0, 1) for i in range(N)] constraints = {'type': 'eq', 'fun': lambda x: np.sum(x) - 1} res = scipy.optimize.minimize(fun, x0, method='SLSQP', bounds=bounds, constraints=constraints, options=options) weights = res.x risk_squared = weights.dot(cov).dot(weights) pcrs = weights.dot(cov) * weights / risk_squared pcrs = np.reshape(pcrs, (len(pcrs), 1)) pcr_max_diff = np.max(np.abs(pcrs - pcrs.transpose())) if not res.success: if ignore_objective and (pcr_max_diff < pcr_tolerance): return weights else: msg = ("Max difference in percentage contribution to risk " "in decimals is {0:.2E}, " "tolerance is {1:.2E}".format(pcr_max_diff, pcr_tolerance)) warnings.warn(msg) raise RuntimeError(res) if pcr_max_diff > pcr_tolerance: raise RuntimeError("Max difference in percentage contribution to risk " "in decimals is %s which exceeds tolerance of %s." % (pcr_max_diff, pcr_tolerance)) return weights
{"hexsha": "201a23513d2e8d329c8c71a4b3c576d73ab3543d", "size": 4096, "ext": "py", "lang": "Python", "max_stars_repo_path": "erc/erc.py", "max_stars_repo_name": "matthewgilbert/erc", "max_stars_repo_head_hexsha": "0f1b2587570f67e89ad713d48947ca1d96a801c0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2017-08-02T10:41:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-01T21:14:39.000Z", "max_issues_repo_path": "erc/erc.py", "max_issues_repo_name": "matthewgilbert/erc", "max_issues_repo_head_hexsha": "0f1b2587570f67e89ad713d48947ca1d96a801c0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2018-08-06T19:37:44.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-01T20:56:24.000Z", "max_forks_repo_path": "erc/erc.py", "max_forks_repo_name": "matthewgilbert/erc", "max_forks_repo_head_hexsha": "0f1b2587570f67e89ad713d48947ca1d96a801c0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2019-08-20T02:05:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-01T09:22:41.000Z", "avg_line_length": 40.96, "max_line_length": 79, "alphanum_fraction": 0.6538085938, "include": true, "reason": "import numpy,import scipy", "num_tokens": 974}
\section{Croissant} \label{croissant} \setcounter{secnumdepth}{0} Time: 9 hours (30 minutes prep, 7+ hours inactive rising and resting, 18 minutes baking) Serves: 12 pastries, 6-12 people, depending on generosity \begin{multicols}{2} \subsection*{Ingredients} \begin{itemize} \item 1 recipe of \nameref{viennoiserie} \item Flour to roll out dough \item Butter for cooking sheet \item 1 egg \item 1 teaspoon water \end{itemize} \subsection*{Hardware} \begin{itemize} \item Large surface for rolling \item Knife with which to cut pastry \item Baking sheet \end{itemize} \clearpage \subsection*{Instructions} \begin{enumerate} \item Make the \nameref{viennoiserie} dough if you have not yet, this will take roughly 8 hours. \item Take the recently chilled and turned dough from the fridge. \item Lightly flour your surface. \item Lightly butter your baking sheet. \item Roll out the dough into a rectangle, about 24x6 inches. \item Cut the rectangle in half, so you have two 12x6 inch rectanlges. \item Cover and chill one rectangle while working with the other. \item Roll the remaining rectangle into a longer rectangle, about 15x7 inches. \item Cut the rectangle into three rectangles, each about 5x7 inches. \item Take one rectangle, roll it into a square, about 6x6 inches. \item Cut the rectangle along the diagonal. \item Take one of these right triangles and flare out the two shorter corners to match. \item Take the two shorter corners and roll the pastry up towards the long point. \item Tuck the end of the long point under the now-rolled pastry and bend the ends to shape into a crescent. \item Place the pastry on a baking sheet, do not crowd the pastries. \item Repeat with each triangle and rectangle to get 12 croissants. \item Make an egg wash by mixing 1 egg and 1 teaspoon water in a small bowl. \item Once all croissants are on baking sheet, cover lightly in egg wash. \item Pre-heat oven to 455F. \item Allow croissants to rise (double in size) on the baking sheet. \item Place baking sheet in oven, and allow to bake until golden brown, about 18-20 minutes. \item Allow croissants to cool 10-15 minutes before consuming. \end{enumerate} \subsection*{Notes} \begin{itemize} \item This is based on the Croissant recipe of Julia Child and Simone Beck, as seen in Mastering the Art of French Cooking, Volume 2, page 96 and on "The French Chef", episode 1. \begin{itemize} \item Main difference is in my \nameref{viennoiserie} dough, I have different pastry flour, and use of weights over volumetric measurements. \end{itemize} \item While the book gives a lot of detail, watching someone shape the dough is the best way to learn. I recommend watching the episode, which can be seen here: \url{https://www.youtube.com/watch?v=uZmrvEfhfsg}. \item This recipe, while French in origin, is based on American ingredients and ovens. \item While croissants are best served fresh, they can be frozen after cooling completely. Seal in an air-tight container in the freezer. \item To reheat, place on a lightly buttered baking sheet into a 400F pre-heated oven for about 10 minutes. \item I often make by dough then make half \nameref{painAuChocolat} and half croissant. \item A huge thanks to Nicolas Bidron and Nicolas Guigo for inspiring my French baking. \end{itemize} \end{multicols} \clearpage
{"hexsha": "221fd875ad21bc05628dfc2bee14c815b037ad43", "size": 3478, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "chapters/breads/croissant.tex", "max_stars_repo_name": "calebwatt15/caleb-watt-cookbook", "max_stars_repo_head_hexsha": "abddcdb60e9422d63d945e7a9ec019c0288e34d7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2018-03-10T06:39:22.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-24T18:06:54.000Z", "max_issues_repo_path": "chapters/breads/croissant.tex", "max_issues_repo_name": "calebwatt15/caleb-watt-cookbook", "max_issues_repo_head_hexsha": "abddcdb60e9422d63d945e7a9ec019c0288e34d7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chapters/breads/croissant.tex", "max_forks_repo_name": "calebwatt15/caleb-watt-cookbook", "max_forks_repo_head_hexsha": "abddcdb60e9422d63d945e7a9ec019c0288e34d7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 52.696969697, "max_line_length": 215, "alphanum_fraction": 0.7441058079, "num_tokens": 909}
# -*- coding: utf-8 -*- """ Created on Thu Jun 3 17:29:01 2021 @author: Luigi """ import allMethods as fz #import funzioni_zeri as fz import numpy as np import sympy as sym import sympy.utilities.lambdify x = sym.symbols("x") fx = x**3 + x**2 - 33*x + 63 dfx = sym.diff(fx, x, 1) f = sym.lambdify(x, fx, np) df = sym.lambdify(x, dfx, np) x0 = 1 tolx = 1e-12 tolf = tolx xkNew, itNew, xksNew = fz.newton(f, df, x0, tolx, tolf, 500) xkNewM, itNewM, xksNewM = fz.newtonModificato(f, df, x0, tolx, tolf, 500, 2) convNewton = fz.stimaOrdine(xksNew, itNew-1) convMod = fz.stimaOrdine(xksNewM, itNewM-1) print(f"Newton normale -> {convNewton}") print(f"Newton modificato -> {convMod}")
{"hexsha": "ccbb73799dc8c2638ecea4fd0cfc8a6fe12a1b5f", "size": 720, "ext": "py", "lang": "Python", "max_stars_repo_path": "zeri_di_funzione/esercizi/4.py", "max_stars_repo_name": "luigi-borriello00/Metodi_SIUMerici", "max_stars_repo_head_hexsha": "cf1407c0ad432a49a96dcd08303213e48723c57a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-06-23T14:47:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-07T08:39:27.000Z", "max_issues_repo_path": "zeri_di_funzione/esercizi/4.py", "max_issues_repo_name": "luigi-borriello00/Metodi_SIUMerici", "max_issues_repo_head_hexsha": "cf1407c0ad432a49a96dcd08303213e48723c57a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "zeri_di_funzione/esercizi/4.py", "max_forks_repo_name": "luigi-borriello00/Metodi_SIUMerici", "max_forks_repo_head_hexsha": "cf1407c0ad432a49a96dcd08303213e48723c57a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.8181818182, "max_line_length": 77, "alphanum_fraction": 0.6416666667, "include": true, "reason": "import numpy,import sympy", "num_tokens": 278}
__doc__ = """Timoshenko beam validation case, for detailed explanation refer to Gazzola et. al. R. Soc. 2018 section 3.4.3 """ import numpy as np import sys # FIXME without appending sys.path make it more generic sys.path.append("../../") from elastica import * from examples.TimoshenkoBeamCase.timoshenko_postprocessing import plot_timoshenko class TimoshenkoBeamSimulator(BaseSystemCollection, Constraints, Forcing): pass timoshenko_sim = TimoshenkoBeamSimulator() final_time = 5000 # Options PLOT_FIGURE = True SAVE_FIGURE = False SAVE_RESULTS = False ADD_UNSHEARABLE_ROD = False # setting up test params n_elem = 100 start = np.zeros((3,)) direction = np.array([0.0, 0.0, 1.0]) normal = np.array([0.0, 1.0, 0.0]) base_length = 3.0 base_radius = 0.25 base_area = np.pi * base_radius ** 2 density = 5000 nu = 0.1 E = 1e6 # For shear modulus of 1e4, nu is 99! poisson_ratio = 99 shearable_rod = CosseratRod.straight_rod( n_elem, start, direction, normal, base_length, base_radius, density, nu, E, poisson_ratio, ) timoshenko_sim.append(shearable_rod) timoshenko_sim.constrain(shearable_rod).using( OneEndFixedRod, constrained_position_idx=(0,), constrained_director_idx=(0,) ) end_force = np.array([-15.0, 0.0, 0.0]) timoshenko_sim.add_forcing_to(shearable_rod).using( EndpointForces, 0.0 * end_force, end_force, ramp_up_time=final_time / 2.0 ) if ADD_UNSHEARABLE_ROD: # Start into the plane unshearable_start = np.array([0.0, -1.0, 0.0]) unshearable_rod = CosseratRod.straight_rod( n_elem, unshearable_start, direction, normal, base_length, base_radius, density, nu, E, # Unshearable rod needs G -> inf, which is achievable with -ve poisson ratio poisson_ratio=-0.7, ) timoshenko_sim.append(unshearable_rod) timoshenko_sim.constrain(unshearable_rod).using( OneEndFixedRod, constrained_position_idx=(0,), constrained_director_idx=(0,) ) timoshenko_sim.add_forcing_to(unshearable_rod).using( EndpointForces, 0.0 * end_force, end_force, ramp_up_time=final_time / 2.0 ) timoshenko_sim.finalize() timestepper = PositionVerlet() # timestepper = PEFRL() dl = base_length / n_elem dt = 0.01 * dl total_steps = int(final_time / dt) print("Total steps", total_steps) integrate(timestepper, timoshenko_sim, final_time, total_steps) if PLOT_FIGURE: plot_timoshenko(shearable_rod, end_force, SAVE_FIGURE, ADD_UNSHEARABLE_ROD) if SAVE_RESULTS: import pickle filename = "Timoshenko_beam_data.dat" file = open(filename, "wb") pickle.dump(shearable_rod, file) file.close()
{"hexsha": "7cf7d592f76dbfd03515e4ec2f84352e35568daf", "size": 2695, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/TimoshenkoBeamCase/timoshenko.py", "max_stars_repo_name": "zhidou2/PyElastica", "max_stars_repo_head_hexsha": "0f5502bc5349ab5e5dc794d8dfc82b7c2bd69eb6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 71, "max_stars_repo_stars_event_min_datetime": "2020-04-15T17:02:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T04:53:51.000Z", "max_issues_repo_path": "examples/TimoshenkoBeamCase/timoshenko.py", "max_issues_repo_name": "zhidou2/PyElastica", "max_issues_repo_head_hexsha": "0f5502bc5349ab5e5dc794d8dfc82b7c2bd69eb6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 59, "max_issues_repo_issues_event_min_datetime": "2020-05-15T03:51:46.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T13:53:01.000Z", "max_forks_repo_path": "examples/TimoshenkoBeamCase/timoshenko.py", "max_forks_repo_name": "zhidou2/PyElastica", "max_forks_repo_head_hexsha": "0f5502bc5349ab5e5dc794d8dfc82b7c2bd69eb6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 57, "max_forks_repo_forks_event_min_datetime": "2020-06-17T20:34:02.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-16T08:09:54.000Z", "avg_line_length": 24.7247706422, "max_line_length": 84, "alphanum_fraction": 0.7120593692, "include": true, "reason": "import numpy", "num_tokens": 822}
Require Import Crypto.Arithmetic.PrimeFieldTheorems. Require Import Crypto.Specific.montgomery64_2e416m2e208m1_7limbs.Synthesis. (* TODO : change this to field once field isomorphism happens *) Definition add : { add : feBW_small -> feBW_small -> feBW_small | forall a b, phiM_small (add a b) = F.add (phiM_small a) (phiM_small b) }. Proof. Set Ltac Profiling. Time synthesize_add (). Show Ltac Profile. Time Defined. Print Assumptions add.
{"author": "anonymous-code-submission-01", "repo": "sp2019-54-code", "sha": "8867f5bed0821415ec99f593b1d61f715ed4f789", "save_path": "github-repos/coq/anonymous-code-submission-01-sp2019-54-code", "path": "github-repos/coq/anonymous-code-submission-01-sp2019-54-code/sp2019-54-code-8867f5bed0821415ec99f593b1d61f715ed4f789/src/Specific/montgomery64_2e416m2e208m1_7limbs/feadd.v"}
#!/usr/bin/env python # -*- coding: latin1 -*- import scipy as sp import matplotlib.pyplot as plt # Get data from external file file = "./data/web_traffic.tsv" data = sp.genfromtxt(file, delimiter="\t") # all examples will have three classes in this file colors = ['g', 'k', 'b', 'm', 'r'] linestyles = ['-', '-.', '--', ':', '-'] # Divide into two lists x = data[:, 0] y = data[:, 1] # Remove NAN x = x[~sp.isnan(y)] y = y[~sp.isnan(y)] fx = sp.linspace(0, x[-1], 1000) # Plot data in graphics def plot_data(x, y, models, mx=None, ymax=None, xmin=None): plt.clf() plt.scatter(x, y, s=10) plt.title("Web traffic over the last month") plt.xlabel("Time") plt.ylabel("Hits/hour") plt.xticks([w * 7 * 24 for w in range(10)], ['week %i' % w for w in range(10)]) if models: if mx is None: mx = sp.linspace(0, x[-1], 1000) for model, style, color in zip(models, linestyles, colors): plt.plot(mx, model(mx), linestyle=style, linewidth=2, c=color) plt.legend(["d=%i" % m.order for m in models], loc="upper left") plt.autoscale(tight=True) plt.ylim(ymin=0) if ymax: plt.ylim(ymax=ymax) if xmin: plt.xlim(xmin=xmin) plt.grid(True, linestyle='-', color='0.75') plt.show() # Divide data into two blocks, separated at 3.5 weeks inflection = 3.5 * 7 * 24 xa = x[:inflection] # before the inflection point ya = y[:inflection] xb = x[inflection:] # after the inflection point yb = y[inflection:] fa = sp.poly1d(sp.polyfit(xa, ya, 1)) fb = sp.poly1d(sp.polyfit(xb, yb, 1)) plot_data(x, y, [fa, fb])
{"hexsha": "973b8fa1b6a0e5a4da26cfe49fd778a92a1c699a", "size": 1612, "ext": "py", "lang": "Python", "max_stars_repo_path": "web_traffic-2.py", "max_stars_repo_name": "aricarmona/machine-learning-python", "max_stars_repo_head_hexsha": "884cafaddc2b1e623c4701bfaa9fb6c9221f9e18", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "web_traffic-2.py", "max_issues_repo_name": "aricarmona/machine-learning-python", "max_issues_repo_head_hexsha": "884cafaddc2b1e623c4701bfaa9fb6c9221f9e18", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "web_traffic-2.py", "max_forks_repo_name": "aricarmona/machine-learning-python", "max_forks_repo_head_hexsha": "884cafaddc2b1e623c4701bfaa9fb6c9221f9e18", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-01-30T12:54:47.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-30T12:54:47.000Z", "avg_line_length": 26.0, "max_line_length": 83, "alphanum_fraction": 0.6023573201, "include": true, "reason": "import scipy", "num_tokens": 507}
# -*- coding: utf-8 -*- """ TODO: """ import numpy as np from scipy import interpolate def t_list(mb_solve, speed_of_light): """ Return the time points shifted to the fixed (lab) frame of reference given a speed-of-light. Args: mb_solve: An MBSolve object speed_of_light: The speed of light in the system. Returns: Array of time values in the fixed frame of reference. """ t_scale = 1.0 + mb_solve.z_max/(speed_of_light * mb_solve.t_max) return mb_solve.tlist*t_scale def rabi_freq(mb_solve, field_idx, speed_of_light, part='real', interp_kind='linear'): """ Return the field results shifted to the fixed (lab) frame of reference given a speed-of-light by interpolation. Can return the real part or abs value. Args: mb_solve: An MBSolve object field_idx: The field to return speed_of_light: The speed of light in the system part: Which part of the complex field ('real' or 'abs') interp_kind: The kind of spline interpolation to use ('linear', 'cubic' or 'quintic') Returns: Array[num_fields, num_space_points, num_time_points] of field values in the fixed frame of reference. """ if part == 'abs': rabi_freq_zt = np.abs(mb_solve.Omegas_zt[field_idx]) elif part == 'real': rabi_freq_zt = np.real(mb_solve.Omegas_zt[field_idx]) else: raise ValueError('Invalid part. Try "abs" or "real"') rabi_freq_intp = interpolate.interp2d(mb_solve.tlist, mb_solve.zlist, rabi_freq_zt, bounds_error=False, fill_value=0., kind=interp_kind) rabi_freq_fixed = np.zeros(mb_solve.Omegas_zt[field_idx].shape, dtype=np.float) for i, z_i in enumerate(mb_solve.zlist): rabi_freq_fixed[i] = rabi_freq_intp(t_list(mb_solve, speed_of_light) - z_i / speed_of_light, z_i) return rabi_freq_fixed def rabi_freq_abs(mb_solve, field_idx, speed_of_light, interp_kind='linear'): """ DEPRECATED. Use rabi_freq with part='abs'. Return the absolute value of the complex solved field results shifted to the fixed (lab) frame of reference given a speed-of-light by interpolation. Args: mb_solve: An MBSolve object field_idx: The field to return speed_of_light: The speed of light in the system interp_kind: The kind of spline interpolation to use ('linear', 'cubic' or 'quintic') Returns: Array[num_fields, num_space_points, num_time_points] of field values in the fixed frame of reference. """ return rabi_freq(mb_solve, field_idx, speed_of_light, part='abs', interp_kind=interp_kind)
{"hexsha": "6a9cba2b3fc355c5a2caa274e95a2ab3bd067ffd", "size": 2813, "ext": "py", "lang": "Python", "max_stars_repo_path": "maxwellbloch/fixed.py", "max_stars_repo_name": "amcdawes/maxwellbloch", "max_stars_repo_head_hexsha": "48b5301ccfa24704a4240125d377b1448d5591d9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "maxwellbloch/fixed.py", "max_issues_repo_name": "amcdawes/maxwellbloch", "max_issues_repo_head_hexsha": "48b5301ccfa24704a4240125d377b1448d5591d9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "maxwellbloch/fixed.py", "max_forks_repo_name": "amcdawes/maxwellbloch", "max_forks_repo_head_hexsha": "48b5301ccfa24704a4240125d377b1448d5591d9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.3048780488, "max_line_length": 79, "alphanum_fraction": 0.6491290437, "include": true, "reason": "import numpy,from scipy", "num_tokens": 673}
# Created by Dennis Willsch (d.willsch@fz-juelich.de) # Modified by Gabriele Cavallaro (g.cavallaro@fz-juelich.de) import os import sys import re import numpy as np import numpy.lib.recfunctions as rfn import matplotlib.pyplot as plt from utils import * import shutil import pickle import numpy.lib.recfunctions as rfn from dwave.system.samplers import DWaveSampler from dwave.system.composites import EmbeddingComposite from dimod import BinaryQuadraticModel def gen_svm_qubos(B,K,xi,gamma,path_data_key,data_key,path_out): data,label = loaddataset(path_data_key+data_key) N = len(data) Q = np.zeros((K*N,K*N)) print(f'Creating the QUBO of size {Q.shape}') for n in range(N): # not optimized: size will not be so large and this way its more easily verifyable for m in range(N): for k in range(K): for j in range(K): Q[K*n+k,K*m+j] = .5 * B**(k+j) * label[n] * label[m] * (kernel(data[n], data[m], gamma) + xi) if n == m and k == j: Q[K*n+k,K*m+j] += - B**k print(f'Extracting nodes and couplers') Q = np.triu(Q) + np.tril(Q,-1).T # turn the symmetric matrix into upper triangular qubo_nodes = np.asarray([[n, n, Q[n,n]] for n in range(len(Q))]) # if not np.isclose(Q[n,n],0)]) NOTE: removed due to variable order! qubo_couplers = np.asarray([[n, m, Q[n,m]] for n in range(len(Q)) for m in range(n+1,len(Q)) if not np.isclose(Q[n,m],0)]) qubo_couplers = qubo_couplers[np.argsort(-np.abs(qubo_couplers[:,2]))] #path = f'runs/run{data_key}_B={B}_K={K}_xi={xi}_gamma={gamma}/' path = f'{path_out}run{data_key}_B={B}_K={K}_xi={xi}_gamma={gamma}/' print(f'Saving {len(qubo_nodes)} nodes and {len(qubo_couplers)} couplers for {path}') os.makedirs(path, exist_ok=True) np.save(path+'Q.npy', Q) np.savetxt(path+'qubo_nodes.dat', qubo_nodes, fmt='%g', delimiter='\t') np.savetxt(path+'qubo_couplers.dat', qubo_couplers, fmt='%g', delimiter='\t') return path def dwave_run(path_data_key,path_in): MAXRESULTS = 20 # NOTE: to save space only 20 best results match = re.search('run([^/]*)_B=(.*)_K=(.*)_xi=(.*)_gamma=([^/]*)', path_in) data_key = match.group(1) B = int(match.group(2)) K = int(match.group(3)) xi = float(match.group(4)) gamma = float(match.group(5)) data,label = loaddataset(path_data_key+data_key) path = path_in+ ('/' if path_in[-1] != '/' else '') qubo_couplers = np.loadtxt(path+'qubo_couplers.dat') qubo_nodes = np.loadtxt(path+'qubo_nodes.dat') qubo_nodes = np.array([[i,i,(qubo_nodes[qubo_nodes[:,0]==i,2][0] if i in qubo_nodes[:,0] else 0.)] for i in np.arange(np.concatenate((qubo_nodes,qubo_couplers))[:,[0,1]].max()+1)]) # to make sure every (i,i) occurs in the qubo in increasing order such that the variable order in BinaryQuadraticModel is consistent (see locate wrongenergies-* github issue) maxcouplers = len(qubo_couplers) ## POSSIBLE INPUT if len(sys.argv) <= 2 else int(sys.argv[2]) if not 'train' in data_key: raise Exception(f'careful: datakey={data_key} => youre trying to train on a validation / test set!') couplerslist = [maxcouplers] for trycouplers in [2500, 2000, 1800, 1600, 1400, 1200, 1000, 500]: if maxcouplers > trycouplers: couplerslist += [trycouplers] sampler = EmbeddingComposite(DWaveSampler()) for couplers in couplerslist: # try to reduce as little couplers as necessary to find an embedding Q = { (q[0], q[1]): q[2] for q in np.vstack((qubo_nodes, qubo_couplers[:couplers])) } pathsub = path + f'result_couplers={couplers}/' os.makedirs(pathsub, exist_ok=True) print(f'running {pathsub} with {len(qubo_nodes)} nodes and {couplers} couplers') ordering = np.array(list(BinaryQuadraticModel.from_qubo(Q))) if not (ordering == np.arange(len(ordering),dtype=ordering.dtype)).all(): print(f'WARNING: variables are not correctly ordered! path={path} ordering={ordering}') try: response = sampler.sample_qubo(Q, num_reads=10000) # NOTE: it may be worth trying to pass additional parameters such as # annealing_time and chain_strength here. Regarding the latter, # if the scale of the Qij is very different from 1, one should not use # the default chain_strength=1 for the embedding here because the # embedding would not use properly scaled strengths to tie physical qubits together # (This will show up in a large chain_break_fraction) # Instead, a useful approach is to set # chain_strength = r * max(abs(Qij)) # for r = 3.0, 2.5, 2.0, 1.5, 1.0, 0.9, 0.8, ... # until the best chain_strength is found. except ValueError as v: print(f' -- no embedding found, removing {pathsub} and trying less couplers') shutil.rmtree(pathsub) continue break save_json(pathsub+'info.json', response.info) # contains response.info #NOTE left out: pickle.dump(response, open(pathsub+'response.pkl','wb')) # contains full response incl. response.record etc; can be loaded with pickle.load(open('response.pkl','rb')) samples = np.array([''.join(map(str,sample)) for sample in response.record['sample']]) # NOTE: it would be safer to use the labeling from record.data() for the qubit variable order unique_samples, unique_idx, unique_counts = np.unique(samples, return_index=True, return_counts=True) # unfortunately, num_occurrences seems not to be added up after unembedding unique_records = response.record[unique_idx] result = rfn.merge_arrays((unique_samples, unique_records['energy'], unique_counts, unique_records['chain_break_fraction'])) # see comment on chain_strength above result = result[np.argsort(result['f1'])] np.savetxt(pathsub+'result.dat', result[:MAXRESULTS], fmt='%s', delimiter='\t', header='\t'.join(response.record.dtype.names), comments='') # load with np.genfromtxt(..., dtype=['<U2000',float,int,float], names=True, encoding=None) alphas = np.array([decode(sample,B,K) for sample in result['f0'][:MAXRESULTS]]) np.save(pathsub+f'alphas{data_key}_gamma={gamma}.npy', alphas) return pathsub def eval_run_trainaccuracy(path_in): regex = 'run([^/]*)_B=(.*)_K=(.*)_xi=(.*)_gamma=([^/]*)/result_couplers.*/?$' match = re.search(regex, path_in) path = path_in + ('/' if path_in[-1] != '/' else '') data_key = match.group(1) B = int(match.group(2)) K = int(match.group(3)) xi = float(match.group(4)) gamma = float(match.group(5)) data,label = loaddataset(data_key) alphas_file = path+f'alphas{data_key}_gamma={gamma}.npy' if not os.path.isfile(alphas_file): print('result '+alphas_file+' doesnt exist, exiting') sys.exit(-1) alphas = np.atleast_2d(np.load(alphas_file)) nalphas = len(alphas) assert len(data) == alphas.shape[1], "alphas do not seem to be for the right data set?)" result = np.genfromtxt(path+'result.dat', dtype=['<U2000',float,int,float], names=True, encoding=None, max_rows=nalphas) Cs = [100, 10, (B**np.arange(K)).sum(), 1.5] evaluation = np.zeros(nalphas, dtype=[('sum_antn',float)]+[(f'acc(C={C})',float) for C in Cs]) for n,alphas_n in enumerate(alphas): evaluation[n]['sum_antn'] = (label * alphas_n).sum() for j,field in enumerate(evaluation.dtype.names[1:]): b = eval_offset_avg(alphas_n, data, label, gamma, Cs[j]) # NOTE: this is NAN if no support vectors were found, see TODO file label_predicted = np.sign(eval_classifier(data, alphas_n, data, label, gamma, b)) # NOTE: this is only train accuracy! (see eval_result_roc*) evaluation[n][field] = (label == label_predicted).sum() / len(label) result_evaluated = rfn.merge_arrays((result,evaluation), flatten=True) fmt = '%s\t%.3f\t%d\t%.3f' + '\t%.3f'*len(evaluation.dtype.names) #NOTE: left out # np.savetxt(path+'result_evaluated.dat', result_evaluated, fmt=fmt, delimiter='\t', header='\t'.join(result_evaluated.dtype.names), comments='') # load with np.genfromtxt(..., dtype=['<U2000',float,int,float,float,float,float,float], names=True, encoding=None) print(result_evaluated.dtype.names) print(result_evaluated) def eval_run_rocpr_curves(path_data_key,path_in,plotoption): regex = 'run([^/]*)_B=(.*)_K=(.*)_xi=(.*)_gamma=([^/]*)/result_couplers.*/?$' match = re.search(regex, path_in) path = path_in + ('/' if path_in[-1] != '/' else '') data_key = match.group(1) B = int(match.group(2)) K = int(match.group(3)) xi = float(match.group(4)) gamma = float(match.group(5)) data,label = loaddataset(path_data_key+data_key) dwavesolutionidx=0 C=(B**np.arange(K)).sum() if 'calibtrain' in data_key: testname = 'Validation' datatest,labeltest = loaddataset(path_data_key+data_key.replace('calibtrain','calibval')) else: print('be careful: this does not use the aggregated bagging classifier but only the simple one as in calibration') testname = 'Test' datatest,labeltest = loaddataset(re.sub('train(?:set)?[0-9]*(?:bag)[0-9]*','test',data_key)) alphas_file = path+f'alphas{data_key}_gamma={gamma}.npy' if not os.path.isfile(alphas_file): print('result '+alphas_file+' doesnt exist, exiting') sys.exit(-1) alphas = np.atleast_2d(np.load(alphas_file)) nalphas = len(alphas) assert len(data) == alphas.shape[1], "alphas do not seem to be for the right data set?)" print('idx \tsum_antn\ttrainacc\ttrainauroc\ttrainauprc\ttestacc \ttestauroc\ttestauprc') trainacc_all=np.zeros([nalphas]) trainauroc_all=np.zeros([nalphas]) trainauprc_all=np.zeros([nalphas]) testacc_all=np.zeros([nalphas]) testauroc_all=np.zeros([nalphas]) testauprc_all=np.zeros([nalphas]) for i in range(nalphas): alphas_n = alphas[i] b = eval_offset_avg(alphas_n, data, label, gamma, C) # NOTE: this is NAN if no support vectors were found, see TODO file score = eval_classifier(data, alphas_n, data, label, gamma, b) scoretest = eval_classifier(datatest, alphas_n, data, label, gamma, b) trainacc,trainauroc,trainauprc = eval_acc_auroc_auprc(label,score) testacc,testauroc,testauprc = eval_acc_auroc_auprc(labeltest,scoretest) trainacc_all[i]=trainacc trainauroc_all[i]=trainauroc trainauprc_all[i]=trainauprc testacc_all[i]=testacc testauroc_all[i]=testauroc testauprc_all[i]=testauprc print(f'{i}\t{(label*alphas_n).sum():8.4f}\t{trainacc:8.4f}\t{trainauroc:8.4f}\t{trainauprc:8.4f}\t{testacc:8.4f}\t{testauroc:8.4f}\t{testauprc:8.4f}') # plot code starts here if plotoption != 'noplotsave': alphas_n = alphas[dwavesolutionidx] # plot only the requested b = eval_offset_avg(alphas_n, data, label, gamma, C) # NOTE: this is NAN if no support vectors were found, see TODO file score = eval_classifier(data, alphas_n, data, label, gamma, b) scoretest = eval_classifier(datatest, alphas_n, data, label, gamma, b) # roc curve plt.figure(figsize=(6.4,3.2)) plt.subplot(1,2,1) plt.subplots_adjust(top=.95, right=.95, bottom=.15, wspace=.3) fpr, tpr, thresholds = roc_curve(labeltest, scoretest) auroc = roc_auc_score(labeltest, scoretest) plt.plot(fpr, tpr, label='AUROC=%0.3f' % auroc, color='g') plt.fill_between(fpr, tpr, alpha=0.2, color='g', step='post') plt.plot([0, 1], [0, 1], 'k--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.0]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') #plt.title('Receiver Operating Curve') plt.legend(loc="lower right") # pr curve plt.subplot(1,2,2) precision, recall, _ = precision_recall_curve(labeltest, scoretest) auprc = auc(recall, precision) plt.step(recall, precision, color='g', where='post', label='AUPRC=%0.3f' % auprc) plt.fill_between(recall, precision, alpha=0.2, color='g', step='post') plt.xlabel('Recall') plt.ylabel('Precision') plt.ylim([0.0, 1.0]) plt.xlim([0.0, 1.0]) #plt.title('PR curve') plt.legend(loc="lower right") # save the data for gnuplot savename = f'{path.replace("/","_")}{dwavesolutionidx}' #with open('results/rocpr_curves/'+savename,'w') as out: with open(path_in+savename,'w') as out: out.write(f'AUROC\t{auroc:0.3f}\t# ROC:FPR,TPR\n') assert len(fpr) == len(tpr) for i in range(len(fpr)): out.write(f'{fpr[i]}\t{tpr[i]}\n') out.write(f'\n\nAUPRC\t{auprc:0.3f}\t# PRC:Recall,Precision\n') assert len(recall) == len(precision) for i in range(len(recall)): out.write(f'{recall[i]}\t{precision[i]}\n') print(f'saved data for {savename}') if plotoption == 'saveplot': savefigname = path_in+savename+'.pdf' plt.savefig(savefigname) print(f'saved as {savefigname}') else: plt.show() return np.average(trainacc_all), np.average(trainauroc_all), np.average(trainauprc_all) ,np.average(testacc_all), np.average(testauroc_all), np.average(testauprc_all) def predict(path_data_key,path_in,datatest): regex = 'run([^/]*)_B=(.*)_K=(.*)_xi=(.*)_gamma=([^/]*)/result_couplers.*/?$' match = re.search(regex, path_in) path = path_in + ('/' if path_in[-1] != '/' else '') data_key = match.group(1) B = int(match.group(2)) K = int(match.group(3)) xi = float(match.group(4)) gamma = float(match.group(5)) data,label = loaddataset(path_data_key+data_key) C=(B**np.arange(K)).sum() # Load the alphas (20xnumber of samples) #alphas=np.load(path_files[y]+f'alphas{data_key}{i}_{y}_gamma={gamma}.npy') alphas = np.atleast_2d(np.load(path_in+f'alphas{data_key}_gamma={gamma}.npy')) nalphas = len(alphas) #print(nalphas) # Compute the mean of the alphas alphas_avg=np.mean(alphas,axis=0) b = eval_offset_avg(alphas_avg, data, label, gamma, C) # NOTE: this is NAN if no support vectors were found, see TODO file scoretest = eval_classifier(datatest, alphas_avg, data, label, gamma, b) return scoretest
{"hexsha": "2a1a6f64635f67ddfa87261639780f4a9d82f3cb", "size": 15189, "ext": "py", "lang": "Python", "max_stars_repo_path": "quantum_SVM.py", "max_stars_repo_name": "GaIbatorix/Quantum-SVM", "max_stars_repo_head_hexsha": "30e2d7378ac6e19a4ba062b92970a9e8033ad525", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "quantum_SVM.py", "max_issues_repo_name": "GaIbatorix/Quantum-SVM", "max_issues_repo_head_hexsha": "30e2d7378ac6e19a4ba062b92970a9e8033ad525", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "quantum_SVM.py", "max_forks_repo_name": "GaIbatorix/Quantum-SVM", "max_forks_repo_head_hexsha": "30e2d7378ac6e19a4ba062b92970a9e8033ad525", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.3177570093, "max_line_length": 360, "alphanum_fraction": 0.6194614524, "include": true, "reason": "import numpy", "num_tokens": 4224}
""" MonteCarloModel(core, dates, paths) A `MonteCarloModel` is the result of a simulation of a series of asset prices. * `core`: a reference `CoreModel` * `dates`: an `AbstractVector{Date}` * `paths`: a matrix of the scenario paths: the rows are the scenarios, and the columns are the values at each date in `dates`. """ struct MonteCarloModel{C,D,T} <: AbstractModel core::C dates::D paths::Matrix{T} end """ MonteCarloScenario(core, dates, path) A `MonteCarloScenario` is a single simulation scenario of a `MonteCarloModel`. * `core`: a reference `CoreModel` * `dates`: an `AbstractVector{Date}` * `paths`: an `AbstractVector` of the values at each date in `dates`. """ struct MonteCarloScenario{C,D,S} <: AbstractModel core::C dates::D path::S end numeraire(m::Union{MonteCarloModel, MonteCarloScenario}) = numeraire(m.core) startdate(m::Union{MonteCarloModel, MonteCarloScenario}) = startdate(m.core) yearfractionto(m::Union{MonteCarloModel, MonteCarloScenario}, dt::Date) = yearfractionto(m.core, dt) # the value of currency is the same under every scenario value(m::MonteCarloModel, c::WhenAt{Receive{T}}) where {T} = value(m.core,c) value(m::MonteCarloScenario, c::WhenAt{Receive{T}}) where {T} = value(m.core,c) struct ScenarioIterator{M<:MonteCarloModel} m::M n::Int end """ scenarios(m::MonteCarloModel) Returns an iterator over each `MonteCarloScenario` in `m`. """ scenarios(m::MonteCarloModel) = ScenarioIterator(m, size(m.paths, 1)) Base.length(sc::ScenarioIterator) = sc.n Base.iterate(sc::ScenarioIterator, i::Int=1) = i > sc.n ? nothing : (MonteCarloScenario(sc.m.core, sc.m.dates, view(sc.m.paths, i, :)), i+1) """ date2index(m::Union{MonteCarloScenario, MonteCarloModel}, dt::Date) Returns the index of `dt` in the path(s) of `m`. """ function date2index(m::Union{MonteCarloScenario, MonteCarloModel}, dt::Date) ii = searchsorted(m.dates, dt) isempty(ii) && throw(DomainError(dt)) return ii[1] end index2date(m::Union{MonteCarloScenario, MonteCarloModel}, i) = m.dates[i] function forwardprice(m::MonteCarloScenario, s::SingleStock, dt::Date) valueat(m,s,date2index(m,dt)) end function forwardprice(m::MonteCarloModel, s::SingleStock, dt::Date) mean(forwardprice(ms, s, dt) for ms in scenarios(m)) end valueat(m::MonteCarloScenario, ::SingleStock, i::Int) = m.path[i] valueat(m::MonteCarloScenario, ::SingleStock, i::Int, ::Type{Dual}) = Dual(m.path[i], 1) # fallback function value(m::MonteCarloModel, c::WhenAt) N = date2index(m, maturitydate(c)) discount(m.core.yieldcurve, maturitydate(c)) * mean(valueat(ms, c.c, N) for ms in scenarios(m)) end """ montecarlo(m::GeomBMModel, dates, npaths) Sample `npaths` Monte Carlo paths of the model `m`, at time `dates`. """ function montecarlo(m::GeomBMModel{CoreModel{T,R,Q}, V}, dates::StepRange{Date}, npaths::Integer) where {T,R,Q,V} σ = m.volatility S = typeof(m.core.yieldcurve.rate) Xt = Array{promote_type(T,V,S)}(undef, length(dates), npaths) Δt = yearfraction(daycount(m.core.yieldcurve), step(dates)) df = discount(m.core.carrycurve, Δt) / discount(m.core.yieldcurve, Δt) for i = 1:npaths x = value(m, SingleStock()) for (j, dt) in enumerate(dates) if j == 1 Δt1 = yearfraction(daycount(m.core.yieldcurve), startdate(m), first(dates)) df1 = Δt1 == 0 ? 1.0 : discount(m.core.carrycurve, Δt1) / discount(m.core.yieldcurve, Δt1) x *= df1 * exp(-σ^2*Δt1/2 + σ*sqrt(Δt1)*randn()) else x *= df * exp(-σ^2*Δt/2 + σ*sqrt(Δt)*randn()) end Xt[j,i] = x end end MonteCarloModel(m.core, dates, copy(transpose(Xt))) end function value(m::GeomBMModel, c::Contract, ::Type{MonteCarloModel}, dates::StepRange{Date}, npaths::Integer) mcm = montecarlo(m, dates, npaths) value(mcm, c) end value(m::GeomBMModel, c::Contract, ::Type{MonteCarloModel}, npaths::Integer) = value(m, c, MonteCarloModel, startdate(m):Day(1):maturitydate(c), npaths)
{"hexsha": "8619ef087b82a2967c3f992ac0b415b76c9ad3d3", "size": 4108, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/models/montecarlo.jl", "max_stars_repo_name": "alecloudenback/Miletus.jl", "max_stars_repo_head_hexsha": "5863620f254fe234001e815d03ef603df0204a1a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 67, "max_stars_repo_stars_event_min_datetime": "2019-05-18T12:52:38.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T18:27:10.000Z", "max_issues_repo_path": "src/models/montecarlo.jl", "max_issues_repo_name": "alecloudenback/Miletus.jl", "max_issues_repo_head_hexsha": "5863620f254fe234001e815d03ef603df0204a1a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-01-09T02:20:38.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-27T16:11:04.000Z", "max_forks_repo_path": "src/models/montecarlo.jl", "max_forks_repo_name": "alecloudenback/Miletus.jl", "max_forks_repo_head_hexsha": "5863620f254fe234001e815d03ef603df0204a1a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2019-05-18T19:13:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-27T12:25:00.000Z", "avg_line_length": 31.1212121212, "max_line_length": 126, "alphanum_fraction": 0.6708860759, "num_tokens": 1280}
from tkinter import * from tkinter import messagebox import numpy as np import pandas as pd l1=['itching','skin_rash','nodal_skin_eruptions','continuous_sneezing','shivering','chills','joint_pain', 'stomach_pain','acidity','ulcers_on_tongue','muscle_wasting','vomiting','burning_micturition','spotting_ urination','fatigue', 'weight_gain','anxiety','cold_hands_and_feets','mood_swings','weight_loss','restlessness','lethargy','patches_in_throat', 'irregular_sugar_level','cough','high_fever','sunken_eyes','breathlessness','sweating','dehydration','indigestion', 'headache','yellowish_skin','dark_urine','nausea','loss_of_appetite','pain_behind_the_eyes','back_pain','constipation', 'abdominal_pain','diarrhoea','mild_fever','yellow_urine','yellowing_of_eyes','acute_liver_failure','fluid_overload', 'swelling_of_stomach','swelled_lymph_nodes','malaise','blurred_and_distorted_vision','phlegm','throat_irritation', 'redness_of_eyes','sinus_pressure','runny_nose','congestion','chest_pain','weakness_in_limbs','fast_heart_rate', 'pain_during_bowel_movements','pain_in_anal_region','bloody_stool','irritation_in_anus','neck_pain','dizziness','cramps', 'bruising','obesity','swollen_legs','swollen_blood_vessels','puffy_face_and_eyes','enlarged_thyroid','brittle_nails', 'swollen_extremeties','excessive_hunger','extra_marital_contacts','drying_and_tingling_lips','slurred_speech','knee_pain','hip_joint_pain', 'muscle_weakness','stiff_neck','swelling_joints','movement_stiffness','spinning_movements','loss_of_balance','unsteadiness','weakness_of_one_body_side', 'loss_of_smell','bladder_discomfort','foul_smell_of urine','continuous_feel_of_urine','passage_of_gases','internal_itching','toxic_look_(typhos)', 'depression','irritability','muscle_pain','altered_sensorium','red_spots_over_body','belly_pain','abnormal_menstruation','dischromic _patches', 'watering_from_eyes','increased_appetite','polyuria','family_history','mucoid_sputum','rusty_sputum','lack_of_concentration','visual_disturbances', 'receiving_blood_transfusion','receiving_unsterile_injections','coma','stomach_bleeding','distention_of_abdomen','history_of_alcohol_consumption', 'fluid_overload','blood_in_sputum','prominent_veins_on_calf','palpitations','painful_walking','pus_filled_pimples','blackheads','scurring','skin_peeling', 'silver_like_dusting','small_dents_in_nails','inflammatory_nails','blister','red_sore_around_nose','yellow_crust_ooze'] disease=['Fungal infection','Allergy','GERD','Chronic cholestasis','Drug Reaction', 'Peptic ulcer diseae','AIDS','Diabetes','Gastroenteritis','Bronchial Asthma','Hypertension', ' Migraine','Cervical spondylosis', 'Paralysis (brain hemorrhage)','Jaundice','Malaria','Chicken pox','Dengue','Typhoid','hepatitis A', 'Hepatitis B','Hepatitis C','Hepatitis D','Hepatitis E','Alcoholic hepatitis','Tuberculosis', 'Common Cold','Pneumonia','Dimorphic hemmorhoids(piles)', 'Heartattack','Varicoseveins','Hypothyroidism','Hyperthyroidism','Hypoglycemia','Osteoarthristis', 'Arthritis','(vertigo) Paroymsal Positional Vertigo','Acne','Urinary tract infection','Psoriasis', 'Impetigo'] l2=[] for x in range(0,len(l1)): l2.append(0) # TESTING DATA tr=pd.read_csv("Testing.csv") tr.replace({'prognosis':{'Fungal infection':0,'Allergy':1,'GERD':2,'Chronic cholestasis':3,'Drug Reaction':4, 'Peptic ulcer diseae':5,'AIDS':6,'Diabetes ':7,'Gastroenteritis':8,'Bronchial Asthma':9,'Hypertension ':10, 'Migraine':11,'Cervical spondylosis':12, 'Paralysis (brain hemorrhage)':13,'Jaundice':14,'Malaria':15,'Chicken pox':16,'Dengue':17,'Typhoid':18,'hepatitis A':19, 'Hepatitis B':20,'Hepatitis C':21,'Hepatitis D':22,'Hepatitis E':23,'Alcoholic hepatitis':24,'Tuberculosis':25, 'Common Cold':26,'Pneumonia':27,'Dimorphic hemmorhoids(piles)':28,'Heart attack':29,'Varicose veins':30,'Hypothyroidism':31, 'Hyperthyroidism':32,'Hypoglycemia':33,'Osteoarthristis':34,'Arthritis':35, '(vertigo) Paroymsal Positional Vertigo':36,'Acne':37,'Urinary tract infection':38,'Psoriasis':39, 'Impetigo':40}},inplace=True) X_test= tr[l1] y_test = tr[["prognosis"]] np.ravel(y_test) # TRAINING DATA df=pd.read_csv("Training.csv") df.replace({'prognosis':{'Fungal infection':0,'Allergy':1,'GERD':2,'Chronic cholestasis':3,'Drug Reaction':4, 'Peptic ulcer diseae':5,'AIDS':6,'Diabetes ':7,'Gastroenteritis':8,'Bronchial Asthma':9,'Hypertension ':10, 'Migraine':11,'Cervical spondylosis':12, 'Paralysis (brain hemorrhage)':13,'Jaundice':14,'Malaria':15,'Chicken pox':16,'Dengue':17,'Typhoid':18,'hepatitis A':19, 'Hepatitis B':20,'Hepatitis C':21,'Hepatitis D':22,'Hepatitis E':23,'Alcoholic hepatitis':24,'Tuberculosis':25, 'Common Cold':26,'Pneumonia':27,'Dimorphic hemmorhoids(piles)':28,'Heart attack':29,'Varicose veins':30,'Hypothyroidism':31, 'Hyperthyroidism':32,'Hypoglycemia':33,'Osteoarthristis':34,'Arthritis':35, '(vertigo) Paroymsal Positional Vertigo':36,'Acne':37,'Urinary tract infection':38,'Psoriasis':39, 'Impetigo':40}},inplace=True) X= df[l1] y = df[["prognosis"]] np.ravel(y) def message(): if (Symptom1.get() == "None" and Symptom2.get() == "None" and Symptom3.get() == "None" and Symptom4.get() == "None" and Symptom5.get() == "None"): messagebox.showinfo("OPPS!!", "ENTER SYMPTOMS PLEASE") else : NaiveBayes() def NaiveBayes(): from sklearn.naive_bayes import MultinomialNB gnb = MultinomialNB() gnb=gnb.fit(X,np.ravel(y)) from sklearn.metrics import accuracy_score y_pred = gnb.predict(X_test) print(accuracy_score(y_test, y_pred)) print(accuracy_score(y_test, y_pred, normalize=False)) psymptoms = [Symptom1.get(),Symptom2.get(),Symptom3.get(),Symptom4.get(),Symptom5.get()] for k in range(0,len(l1)): for z in psymptoms: if(z==l1[k]): l2[k]=1 inputtest = [l2] predict = gnb.predict(inputtest) predicted=predict[0] h='no' for a in range(0,len(disease)): if(disease[predicted] == disease[a]): h='yes' break if (h=='yes'): t3.delete("1.0", END) t3.insert(END, disease[a]) else: t3.delete("1.0", END) t3.insert(END, "No Disease") root = Tk() root.title(" Disease Prediction From Symptoms") root.configure() Symptom1 = StringVar() Symptom1.set(None) Symptom2 = StringVar() Symptom2.set(None) Symptom3 = StringVar() Symptom3.set(None) Symptom4 = StringVar() Symptom4.set(None) Symptom5 = StringVar() Symptom5.set(None) w2 = Label(root, justify=LEFT, text=" Disease Prediction From Symptoms ") w2.config(font=("Elephant", 30)) w2.grid(row=1, column=0, columnspan=2, padx=100) NameLb1 = Label(root, text="") NameLb1.config(font=("Elephant", 20)) NameLb1.grid(row=5, column=1, pady=10, sticky=W) S1Lb = Label(root, text="Symptom 1") S1Lb.config(font=("Elephant", 15)) S1Lb.grid(row=7, column=1, pady=10 , sticky=W) S2Lb = Label(root, text="Symptom 2") S2Lb.config(font=("Elephant", 15)) S2Lb.grid(row=8, column=1, pady=10, sticky=W) S3Lb = Label(root, text="Symptom 3") S3Lb.config(font=("Elephant", 15)) S3Lb.grid(row=9, column=1, pady=10, sticky=W) S4Lb = Label(root, text="Symptom 4") S4Lb.config(font=("Elephant", 15)) S4Lb.grid(row=10, column=1, pady=10, sticky=W) S5Lb = Label(root, text="Symptom 5") S5Lb.config(font=("Elephant", 15)) S5Lb.grid(row=11, column=1, pady=10, sticky=W) lr = Button(root, text="Predict",height=2, width=20, command=message) lr.config(font=("Elephant", 15)) lr.grid(row=15, column=1,pady=20) OPTIONS = sorted(l1) S1En = OptionMenu(root, Symptom1,*OPTIONS) S1En.grid(row=7, column=2) S2En = OptionMenu(root, Symptom2,*OPTIONS) S2En.grid(row=8, column=2) S3En = OptionMenu(root, Symptom3,*OPTIONS) S3En.grid(row=9, column=2) S4En = OptionMenu(root, Symptom4,*OPTIONS) S4En.grid(row=10, column=2) S5En = OptionMenu(root, Symptom5,*OPTIONS) S5En.grid(row=11, column=2) NameLb = Label(root, text="") NameLb.config(font=("Elephant", 20)) NameLb.grid(row=13, column=1, pady=10, sticky=W) NameLb = Label(root, text="") NameLb.config(font=("Elephant", 15)) NameLb.grid(row=18, column=1, pady=10, sticky=W) t3 = Text(root, height=2, width=30) t3.config(font=("Elephant", 20)) t3.grid(row=20, column=1 , padx=10) root.mainloop()
{"hexsha": "c05366fff895779a929e9faff8e00292ba838a8c", "size": 8288, "ext": "py", "lang": "Python", "max_stars_repo_path": "disease_prediction.py", "max_stars_repo_name": "Reyuga/Reyuga", "max_stars_repo_head_hexsha": "d7c98fec6093f7540255a67de55bb765b3be8c06", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-06-29T16:52:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-29T16:52:18.000Z", "max_issues_repo_path": "disease_prediction.py", "max_issues_repo_name": "Reyuga/Reyuga", "max_issues_repo_head_hexsha": "d7c98fec6093f7540255a67de55bb765b3be8c06", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "disease_prediction.py", "max_forks_repo_name": "Reyuga/Reyuga", "max_forks_repo_head_hexsha": "d7c98fec6093f7540255a67de55bb765b3be8c06", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.8518518519, "max_line_length": 158, "alphanum_fraction": 0.7206805019, "include": true, "reason": "import numpy", "num_tokens": 2692}
const GWPos = SVector{2,Int} const TwoAgentPos = SVector{4,Int} const dir = Dict(:up=>GWPos(0,1), :down=>GWPos(0,-1), :left=>GWPos(-1,0), :right=>GWPos(1,0), :stay=>GWPos(0,0), :upleft=>GWPos(-1,1), :upright=>GWPos(1,1), :downright=>GWPos(1,-1), :downleft=>GWPos(-1,-1)) const aind = Dict(:up=>1, :down=>2, :left=>3, :right=>4, :stay=>5, :upleft=>6, :upright=>7, :downright=>8, :downleft=>9) const syma = [:up, :down, :left, :right, :stay, :upleft, :upright, :downleft, :downright] const S = TwoAgentPos const A = Symbol # Gridworld with adversary @with_kw mutable struct AdversarialGridworldMDP <:MDP{S, A} size::Tuple{Int, Int} = (10,10) rewards::Dict{GWPos, Float64} = Dict() walls::Vector{GWPos} = [] tprob::Float64 = 0.7 discount::Float64 = 0.95 agent_gets_action = :ego # :ego or :adversary ego_policy = (s, rng::AbstractRNG = Random.GLOBAL_RNG) -> rand(rng, syma) adversary_policy = (s, rng::AbstractRNG = Random.GLOBAL_RNG) -> rand(rng, syma) failure_penalty = 5 end valid_pos(mdp::AdversarialGridworldMDP, pos::GWPos) = !(pos in mdp.walls || any((pos .> mdp.size) .| (pos .< GWPos(1,1)))) function random_valid_pos(mdp::AdversarialGridworldMDP, rng::AbstractRNG = Random.GLOBAL_RNG, exclude = [], max_trials = 1000) trial = 0 while trial < max_trials pos = GWPos(rand(rng, 1:mdp.size[1]), rand(rng, 1:mdp.size[2])) if valid_pos(mdp, pos) && !(haskey(mdp.rewards, pos) || pos in exclude) return pos end trial += 1 end end function POMDPs.initialstate(mdp::AdversarialGridworldMDP, rng::AbstractRNG = Random.GLOBAL_RNG) ego = random_valid_pos(mdp, rng) adversary = random_valid_pos(mdp, rng, [ego]) Deterministic(S(ego..., adversary...)) end function POMDPs.states(mdp::AdversarialGridworldMDP) lengths = (mdp.size[1], mdp.size[2], mdp.size[1], mdp.size[2]) ss = S[] for ijk in CartesianIndices(lengths) s = S(ijk.I...) valid_pos(mdp, ego_pos(s)) && valid_pos(mdp, adversary_pos(s)) && push!(ss, s) end push!(ss, S(-1,-1,-1,-1)) ss end POMDPs.actions(mdp::AdversarialGridworldMDP) = syma POMDPs.actionindex(mdp::AdversarialGridworldMDP, a::A) = aind[a] ego_pos(s::S) = GWPos(s[1], s[2]) adversary_pos(s::S) = GWPos(s[3], s[4]) agents_overlap(s::S) = ego_pos(s) == adversary_pos(s) POMDPs.isterminal(mdp::AdversarialGridworldMDP, s::S) = any(s .< 0) POMDPs.discount(mdp::AdversarialGridworldMDP) = mdp.discount # Returns a sample next state and reward function POMDPs.gen(mdp::AdversarialGridworldMDP, s::S, a::A, rng::AbstractRNG = Random.GLOBAL_RNG) if haskey(mdp.rewards, ego_pos(s)) || agents_overlap(s) || isterminal(mdp, s) return (sp = S(-1,-1,-1,-1), r = reward(mdp, s)) else # Compute the direction based on the provided action rdir = (rand(rng) < mdp.tprob) ? dir[a] : dir[rand(rng, syma[a .!= syma])] # If this MDP controls the agent then use the adversary policy for the adversary # Do the opposite if the adversary is being controlled by the MDP action if mdp.agent_gets_action == :ego new_ego = ego_pos(s) + rdir new_adv = adversary_pos(s) + dir[mdp.adversary_policy(s, rng)] else new_ego = ego_pos(s) + dir[mdp.ego_policy(s, rng)] new_adv = adversary_pos(s) + rdir end # Make sure the moves are in bound and not hitting a wall new_ego = valid_pos(mdp, new_ego) ? new_ego : ego_pos(s) new_adv = valid_pos(mdp, new_adv) ? new_adv : adversary_pos(s) return (sp = S(new_ego..., new_adv...), r = reward(mdp, s)) end end # Returns the reward for the provided state function POMDPs.reward(mdp::AdversarialGridworldMDP, s::S) isterminal(mdp, s) && return 0 r = (get(mdp.rewards, ego_pos(s), 0.0) - mdp.failure_penalty*agents_overlap(s)) / mdp.failure_penalty mdp.agent_gets_action == :ego ? r : -r end function tocolor(mdp::AdversarialGridworldMDP, r::Float64) maxr = maximum(values(mdp.rewards)) minr = -maxr frac = (r-minr)/(maxr-minr) return get(ColorSchemes.redgreensplit, frac) end # Renders the mdp function POMDPModelTools.render(mdp::AdversarialGridworldMDP, s::S) nx, ny = mdp.size cells = [] for x in 1:nx, y in 1:ny pos = GWPos(x,y) reward_index = findfirst([pos] .== keys(mdp.rewards)) wall_index = findfirst([pos] .== mdp.walls) ctx = context((x-1)/nx, (ny-y)/ny, 1/nx, 1/ny) color = "white" if !isnothing(reward_index) color = tocolor(mdp, get(mdp.rewards, pos, 0)) elseif !isnothing(wall_index) color = "black" end cell = compose(ctx, Compose.rectangle(), fill(color)) push!(cells, cell) end grid = compose(context(), Compose.stroke("gray"), cells...) outline = compose(context(), Compose.rectangle()) if all(s .> 0) x,y = ego_pos(s) agent_ctx = context((x-1)/nx, (ny-y)/ny, 1/nx, 1/ny) ego = compose(agent_ctx, Compose.circle(0.5, 0.5, 0.4), Compose.stroke("black"), fill("blue")) x,y = adversary_pos(s) agent_ctx = context((x-1)/nx, (ny-y)/ny, 1/nx, 1/ny) adversary = compose(agent_ctx, Compose.circle(0.5, 0.5, 0.4), Compose.stroke("black"), fill("orange")) agents_comp = compose(context(), ego, adversary) sz = min(w, h) return compose(context((w-sz)/2, (h-sz)/2, sz, sz), agents_comp, grid, outline) else sz = min(w, h) return compose(context((w-sz)/2, (h-sz)/2, sz, sz), grid, outline) end end
{"hexsha": "f61b733a6755c19a2ac1f46e66eec652c2511b24", "size": 5626, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/mdp.jl", "max_stars_repo_name": "ancorso/GridworldAdversary.jl", "max_stars_repo_head_hexsha": "3595bb7e0cee97cdaa50c00f0ae68228129799c3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-12-18T16:19:01.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-10T16:47:03.000Z", "max_issues_repo_path": "src/mdp.jl", "max_issues_repo_name": "ancorso/GridworldAdversary.jl", "max_issues_repo_head_hexsha": "3595bb7e0cee97cdaa50c00f0ae68228129799c3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/mdp.jl", "max_forks_repo_name": "ancorso/GridworldAdversary.jl", "max_forks_repo_head_hexsha": "3595bb7e0cee97cdaa50c00f0ae68228129799c3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.8, "max_line_length": 126, "alphanum_fraction": 0.6260220405, "num_tokens": 1868}
#!/usr/bin/env python """ Test module for TwoPhaseFlow """ import pytest import tables import numpy as np import proteus.defaults from proteus import Context from proteus import default_so from proteus.iproteus import * import os import sys Profiling.logLevel=1 Profiling.verbose=True class TestTwoPhaseFlow(object): def setup_method(self,method): self._scriptdir = os.path.dirname(__file__) self.path = proteus.__path__[0]+"/tests/TwoPhaseFlow/" def teardown_method(self, method): """ Tear down function """ FileList = ['marin.h5','marin.xmf' 'moses.h5','moses.xmf' 'damBreak.h5','damBreak.xmf' 'damBreak_solver_options.h5','damBreak_solver_options.xmf' 'TwoDimBucklingFlow.h5','TwoDimBucklingFlow.xmf' 'filling.h5','filling.xmf' ] for file in FileList: if os.path.isfile(file): os.remove(file) else: pass def compare_vs_saved_files(self,name): actual = tables.open_file(name+'.h5','r') expected_path = 'comparison_files/' + 'comparison_' + name + '_phi_t2.csv' #write comparison file #np.array(actual.root.phi_t2).tofile(os.path.join(self._scriptdir, expected_path),sep=",") np.testing.assert_almost_equal(np.fromfile(os.path.join(self._scriptdir, expected_path),sep=","),np.array(actual.root.phi_t2).flatten(),decimal=6) expected_path = 'comparison_files/' + 'comparison_' + name + '_velocity_t2.csv' #write comparison file #np.array(actual.root.velocity_t2).tofile(os.path.join(self._scriptdir, expected_path),sep=",") np.testing.assert_almost_equal(np.fromfile(os.path.join(self._scriptdir, expected_path),sep=","),np.array(actual.root.velocity_t2).flatten(),decimal=6) actual.close() # *** 2D tests *** # def test_risingBubble(self): #uses structured triangle mesh os.system("parun --TwoPhaseFlow --path " + self.path + " " "risingBubble.py -l5 -v -C 'final_time=0.1 dt_output=0.1 refinement=1'") self.compare_vs_saved_files("risingBubble") def test_damBreak(self): os.system("parun --TwoPhaseFlow --path " + self.path + " " "damBreak.py -l5 -v -C 'final_time=0.1 dt_output=0.1 he=0.1'") self.compare_vs_saved_files("damBreak") @pytest.mark.skip(reason="numerics are very sensitive, hashdist build doesn't pass but conda does") def test_damBreak_solver_options(self): os.system("parun --TwoPhaseFlow --path " + self.path + " " "damBreak_solver_options.py -l5 -v -C 'final_time=0.1 dt_output=0.1 he=0.1'") self.compare_vs_saved_files("damBreak_solver_options") # @pytest.mark.skip(reason="long test") def test_TwoDimBucklingFlow(self): os.system("parun --TwoPhaseFlow --path " + self.path + " " "TwoDimBucklingFlow.py -l5 -v -C 'final_time=0.1 dt_output=0.1 he=0.09'") self.compare_vs_saved_files("TwoDimBucklingFlow") # @pytest.mark.skip(reason="long test") @pytest.mark.skip(reason="need to redo after history revision") def test_fillingTank(self): os.system("parun --TwoPhaseFlow --path " + self.path + " " "fillingTank.py -l5 -v -C 'final_time=0.02 dt_output=0.02 he=0.01'") self.compare_vs_saved_files("fillingTank") # *** 3D tests *** # def test_marin(self): os.system("parun --TwoPhaseFlow --path " + self.path + " " "marin.py -l5 -v -C 'final_time=0.1 dt_output=0.1 he=0.5'") self.compare_vs_saved_files("marin") def test_moses(self): os.system("parun --TwoPhaseFlow --path " + self.path + " " "moses.py -l5 -v -C 'final_time=0.1 dt_output=0.1 he=0.5'") self.compare_vs_saved_files("moses")
{"hexsha": "13c277d453e012730f8117b53a24c917162d3595", "size": 3914, "ext": "py", "lang": "Python", "max_stars_repo_path": "proteus/tests/TwoPhaseFlow/test_TwoPhaseFlow.py", "max_stars_repo_name": "tridelat/proteus", "max_stars_repo_head_hexsha": "44d7c3cb2f992b109b30f14b4660235d90e9bdfb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "proteus/tests/TwoPhaseFlow/test_TwoPhaseFlow.py", "max_issues_repo_name": "tridelat/proteus", "max_issues_repo_head_hexsha": "44d7c3cb2f992b109b30f14b4660235d90e9bdfb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2018-02-08T23:22:59.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-06T19:40:32.000Z", "max_forks_repo_path": "proteus/tests/TwoPhaseFlow/test_TwoPhaseFlow.py", "max_forks_repo_name": "zhang-alvin/proteus", "max_forks_repo_head_hexsha": "13380120826ff0ffa0f244ddd4ee7f389dd8b917", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.0860215054, "max_line_length": 159, "alphanum_fraction": 0.6308124681, "include": true, "reason": "import numpy", "num_tokens": 1026}
using GeoFormatTypes, Test using GeoFormatTypes: Geom, CRS, Extended, Unknown @testset "Test construcors" begin @test_throws ArgumentError ProjString("+lat_ts=56.5 +ellps=GRS80") @test_throws ArgumentError ProjJSON(Dict("fype" => 1)) @test_throws ArgumentError ProjJSON("fype") @test_throws ArgumentError EPSG("ERROR:4326") @test EPSG("EPSG:4326") == EPSG(4326) end @testset "Test constructors" begin @test ProjString("+proj=test") isa ProjString @test ProjJSON(Dict("type" => "GeographicCRS")) isa ProjJSON @test ProjJSON("type: GeographicCRS") isa ProjJSON @test EPSG(4326) isa EPSG @test WellKnownText("test") isa WellKnownText{Unknown} @test WellKnownBinary([1, 2, 3, 4]) isa WellKnownBinary{Unknown} @test WellKnownText2("test") isa WellKnownText2{Unknown} @test ESRIWellKnownText("test") isa ESRIWellKnownText{Unknown} @test WellKnownText(Extended(), "test") isa WellKnownText{Extended} @test WellKnownBinary(Extended(), [1, 2, 3, 4]) isa WellKnownBinary{Extended} @test WellKnownText2(CRS(), "test") isa WellKnownText2{CRS} @test ESRIWellKnownText(Geom(), "test") isa ESRIWellKnownText{Geom} @test GML("test") isa GML{Unknown} @test GML(Geom(), "test") isa GML{Geom} @test GML(CRS(), "test") isa GML{CRS} # Probably doesn't actually exist @test KML("test") isa KML @test GeoJSON("test") isa GeoJSON end @testset "Test conversion to string or int" begin @test convert(String, ProjString("+proj=test")) == "+proj=test" @test convert(String, EPSG(4326)) == "EPSG:4326" @test convert(Int, EPSG(4326)) == 4326 @test convert(String, WellKnownText("test")) == "test" @test convert(String, WellKnownText2("test")) == "test" @test convert(String, ESRIWellKnownText("test")) == "test" @test convert(String, GML("test")) == "test" @test convert(String, KML("test")) == "test" @test convert(String, GeoJSON("test")) == "test" end # `convert` placeholder methods Base.convert(target::Type{<:GeoFormat}, mode::Union{Geom,Type{Geom}}, source::GeoFormat; kwargs...) = (:geom, kwargs...) Base.convert(target::Type{<:GeoFormat}, mode::Union{CRS,Type{CRS}}, source::GeoFormat; kwargs...) = (:crs, kwargs...) @testset "Test convert mode allocation" begin @testset "Test identical type is passed through unchanged" begin @test convert(WellKnownText, WellKnownText(Extended(), "test")) == WellKnownText(Extended(), "test") @test convert(ProjString, ProjString("+proj=test")) == ProjString("+proj=test") end @testset "Test conversions are assigned to crs or geom correctly" begin @test convert(WellKnownText, WellKnownText2(CRS(), "test")) == (:crs,) @test convert(WellKnownText2, WellKnownText(CRS(), "test")) == (:crs,) @test convert(WellKnownBinary, WellKnownText(CRS(), "test")) == (:crs,) @test convert(ProjString, WellKnownText(CRS(), "test")) == (:crs,) @test convert(EPSG, ProjString("+proj=test")) == (:crs,) @test convert(CoordSys, ProjString("+proj=test")) == (:crs,) @test convert(GeoJSON, WellKnownText(Geom(), "test")) == (:geom,) @test convert(KML, WellKnownText(Geom(), "test")) == (:geom,) @test convert(GML, WellKnownText(Geom(), "test")) == (:geom,) @test convert(ESRIWellKnownText, WellKnownText(Geom(), "test")) == (:geom,) @test convert(WellKnownBinary, WellKnownText(Geom(), "test")) == (:geom,) @test convert(WellKnownText2, WellKnownText(Geom(), "test")) == (:geom,) @test convert(WellKnownText2, WellKnownText(Geom(), "test")) == (:geom,) @test convert(WellKnownText, WellKnownText2(Geom(), "test")) == (:geom,) @test convert(GeoJSON, WellKnownText(Extended(), "test")) == (:geom,) @test convert(KML, WellKnownText(Extended(), "test")) == (:geom,) @test convert(GML, WellKnownText(Extended(), "test")) == (:geom,) @test convert(ESRIWellKnownText, WellKnownText(Extended(), "test")) == (:geom,) @test convert(WellKnownBinary, WellKnownText(Extended(), "test")) == (:geom,) @test convert(WellKnownText2, WellKnownText(Extended(), "test")) == (:geom,) @test convert(WellKnownText2, WellKnownText(Extended(), "test")) == (:geom,) @test convert(WellKnownText, WellKnownText2(Extended(), "test")) == (:geom,) @test convert(GeoJSON, WellKnownText(Unknown(), "test")) == (:geom,) @test convert(KML, WellKnownText(Unknown(), "test")) == (:geom,) @test convert(GML, WellKnownText(Unknown(), "test")) == (:geom,) @test convert(ESRIWellKnownText, WellKnownText(Unknown(), "test")) == (:geom,) @test convert(WellKnownBinary, WellKnownText(Unknown(), "test")) == (:geom,) @test convert(WellKnownText2, WellKnownText(Unknown(), "test")) == (:geom,) @test convert(WellKnownText2, WellKnownText(Unknown(), "test")) == (:geom,) @test convert(WellKnownText, WellKnownText2(Unknown(), "test")) == (:geom,) end @testset "Test kargs pass through convert" begin @test convert(WellKnownText, WellKnownText2(CRS(), "test"); order=:trad) == (:crs, :order => :trad,) @test convert(GML, WellKnownText(Extended(), "test"); order=:custom) == (:geom, :order => :custom) end @testset "Test conversions that are not possible throw an error" begin @test_throws ArgumentError convert(KML, ProjString("+proj=test")) @test_throws ArgumentError convert(GeoJSON, ProjString("+proj=test")) @test_throws ArgumentError convert(ProjString, WellKnownText(Geom(), "test")) @test_throws ArgumentError convert(CoordSys, WellKnownText(Geom(), "test")) @test_throws ArgumentError convert(EPSG, WellKnownText(Geom(), "test")) end end
{"hexsha": "e413c6f2f26f3a3ac8d135c45fee9dd767ef780a", "size": 5779, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "rafaqz/CoordinateReferenceSystemsBase.jl", "max_stars_repo_head_hexsha": "7d9317ec03b02af6089449800d75265e42fb139c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-11-11T00:03:36.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-11T00:03:36.000Z", "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "rafaqz/CoordinateReferenceSystemsBase.jl", "max_issues_repo_head_hexsha": "7d9317ec03b02af6089449800d75265e42fb139c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-11-13T14:13:01.000Z", "max_issues_repo_issues_event_max_datetime": "2019-11-21T06:14:55.000Z", "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "rafaqz/GeoFormatTypes.jl", "max_forks_repo_head_hexsha": "7d9317ec03b02af6089449800d75265e42fb139c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 56.1067961165, "max_line_length": 108, "alphanum_fraction": 0.6546115245, "num_tokens": 1648}
#!/usr/bin/env python # -*- coding: utf-8 -*- import scipy.sparse def rcm(g): '''Compute the reverse Cuthill-Mckee permutation of a graph. Note that the method does NOT modify the graph, but rather just returns a permutation vector that can be used by Graph.permute to achieve the actual reordering. Parameters ---------- g: Graph The graph to be reordered. Returns ------- perm: numpy.ndarray Array of permuted node indices. ''' return scipy.sparse.csgraph.reverse_cuthill_mckee( g.adjacency_matrix, symmetric_mode=True )
{"hexsha": "c6ceb3b43f57d6fb05add8aa2d74a01819885620", "size": 599, "ext": "py", "lang": "Python", "max_stars_repo_path": "graphdot/graph/reorder/rcm.py", "max_stars_repo_name": "yhtang/GraphDot", "max_stars_repo_head_hexsha": "3d5ed4fbb2f6912052baa42780b436da76979691", "max_stars_repo_licenses": ["BSD-3-Clause-LBNL"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2020-02-14T18:07:39.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-15T12:07:31.000Z", "max_issues_repo_path": "graphdot/graph/reorder/rcm.py", "max_issues_repo_name": "yhtang/graphdot", "max_issues_repo_head_hexsha": "3d5ed4fbb2f6912052baa42780b436da76979691", "max_issues_repo_licenses": ["BSD-3-Clause-LBNL"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-03-19T19:07:26.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-24T06:08:51.000Z", "max_forks_repo_path": "graphdot/graph/reorder/rcm.py", "max_forks_repo_name": "yhtang/graphdot", "max_forks_repo_head_hexsha": "3d5ed4fbb2f6912052baa42780b436da76979691", "max_forks_repo_licenses": ["BSD-3-Clause-LBNL"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-10-17T06:11:18.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-07T11:56:33.000Z", "avg_line_length": 24.9583333333, "max_line_length": 78, "alphanum_fraction": 0.6577629382, "include": true, "reason": "import scipy", "num_tokens": 139}
""" Cart pole swing-up: Original version from: https://github.com/zuoxingdong/DeepPILCO/blob/master/cartpole_swingup.py Modified so that done=True when x is outside of -2.4 to 2.4 Reward is also reshaped to be similar to PyBullet/roboschool version More difficult, since dt is 0.05 (not 0.01), and only 200 timesteps """ import logging import math import gym from gym import spaces from gym.utils import seeding import numpy as np logger = logging.getLogger(__name__) class CartPoleSwingUpEnv(gym.Env): metadata = { 'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': 100 } def __init__(self, masscart=0.5, masspole=0.5, polelength=0.5): self.g = 9.82 # gravity self.m_c = masscart # cart mass, default 0.5 self.m_p = masspole # pendulum mass, default 0.5 self.total_m = (self.m_p + self.m_c) self.l = polelength # pole's length, default 0.6 self.m_p_l = (self.m_p * self.l) self.force_mag = 20.0 self.dt = 0.04 # seconds between state updates self.tau = 0.02 self.b = 0.1 # friction coefficient, default 0.1 self.bouncing = False self.t = 0 # timestep self.t_limit = 200 # todo: tlimit originally 1000 # Angle at which to fail the episode self.theta_threshold_radians = 12 * 2 * math.pi / 360 self.x_threshold = 2.4 self.kinematics_integrator = 'euler' high = np.array([ np.finfo(np.float32).max, np.finfo(np.float32).max, np.finfo(np.float32).max, np.finfo(np.float32).max, np.finfo(np.float32).max]) self.action_space = spaces.Box(-1.0, 1.0, shape=(1,)) self.observation_space = spaces.Box(-high, high) self.seed() self.viewer = None self.state = None self.constraint = True #safe constraint self.constraint_reward = -100 self.coslimit = 0.98 def seed(self, seed=None): self.np_random, seed = seeding.np_random(seed) return [seed] def step(self, action): # Valid action action = np.clip(action, -1.0, 1.0)[0] action *= self.force_mag state = self.state x, x_dot, theta, theta_dot = state s = math.sin(theta) c = math.cos(theta) xdot_update = (-2 * self.m_p_l * ( theta_dot ** 2) * s + 3 * self.m_p * self.g * s * c + 4 * action - 4 * self.b * x_dot) / ( 4 * self.total_m - 3 * self.m_p * c ** 2) thetadot_update = (-3 * self.m_p_l * (theta_dot ** 2) * s * c + 6 * self.total_m * self.g * s + 6 * ( action - self.b * x_dot) * c) / (4 * self.l * self.total_m - 3 * self.m_p_l * c ** 2) if self.bouncing: if x < -self.x_threshold: x = -self.x_threshold x_dot = -0.1*x_dot elif x > self.x_threshold: x = self.x_threshold x_dot = -0.1*x_dot else: x = x + x_dot * self.dt x_dot = x_dot + xdot_update * self.dt else: x = x + x_dot * self.dt x_dot = x_dot + xdot_update * self.dt theta = theta + theta_dot * self.dt theta_dot = theta_dot + thetadot_update * self.dt self.state = (x, x_dot, theta, theta_dot) done = False if not self.bouncing: if x < -self.x_threshold or x > self.x_threshold: done = True self.t += 1 if self.t >= self.t_limit: done = True self.t = 0 obs = np.array([x, x_dot, np.cos(theta), np.sin(theta), theta_dot]) reward = self.get_reward_mujoco() violation = self.constraint_violated() return obs, reward, done, violation def constraint_violated(self): state = self.state x, x_dot, theta, theta_dot = state if np.cos(theta) > 0 and np.cos(theta) < self.coslimit and np.sin(theta) > 0: return 1 elif x < -self.x_threshold or x > self.x_threshold: return 1 return 0 def get_reward(self): state = self.state x, x_dot, theta, theta_dot = state reward_theta = (np.cos(theta) + 1.0) / 2.0 reward_x = np.cos((x / self.x_threshold) * (np.pi / 2.0)) reward = reward_theta * reward_x reward = np.max((np.min((reward, 1)), 0)) return reward def get_reward_mujoco(self): # mujoco env reward state = self.state x, x_dot, theta, theta_dot = state length = self.l # pole length x_tip_error = x - length * np.sin(theta) y_tip_error = length - length * np.cos(theta) reward = np.exp(-(x_tip_error ** 2 + y_tip_error ** 2) / length ** 2) # print('theta ', theta) # print('x ', x) # print('x_tip_error ', x_tip_error, 'y_tip_error ', y_tip_error) return reward def reset(self): # self.state = self.np_random.normal(loc=np.array([0.0, 0.0, 30*(2*np.pi)/360, 0.0]), scale=np.array([0.0, 0.0, 0.0, 0.0])) # self.state = np.random.normal(loc=np.array([0.0, 0.0, np.pi, 0.0]), scale=np.array([0.2, 0.2, 0.2, 0.2])) self.state = (0.0, 0.0, np.pi, 0.0) self.steps_beyond_done = None self.t = 0 # timestep x, x_dot, theta, theta_dot = self.state obs = np.array([x, x_dot, np.cos(theta), np.sin(theta), theta_dot]) # return obs, np.array(self.state) return obs # def set_state(self, obs): # # state = (x, x_dot, theta, theta_dot) # # obs = [x, x_dot, np.cos(theta), np.sin(theta), theta_dot] # self.state = (obs[0], obs[1], obs[2], obs[3]) #wrong! # # theta = np.arctan(obs[3]/obs[2]) # # self.t = 0 # # self.state = (obs[0], obs[1], theta, obs[4]) def render(self, mode='human', close=False): if close: if self.viewer is not None: self.viewer.close() self.viewer = None return screen_width = 600 screen_height = 600 # before was 400 world_width = 5 # max visible position of cart scale = screen_width / world_width carty = screen_height / 2 # TOP OF CART polewidth = 6.0 polelen = scale * self.l # 0.6 or self.l cartwidth = 40.0 cartheight = 20.0 if self.viewer is None: from gym.envs.classic_control import rendering self.viewer = rendering.Viewer(screen_width, screen_height) l, r, t, b = -cartwidth / 2, cartwidth / 2, cartheight / 2, -cartheight / 2 cart = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)]) self.carttrans = rendering.Transform() cart.add_attr(self.carttrans) cart.set_color(1, 0, 0) self.viewer.add_geom(cart) l, r, t, b = -polewidth / 2, polewidth / 2, polelen - polewidth / 2, -polewidth / 2 pole = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)]) pole.set_color(0, 0, 1) self.poletrans = rendering.Transform(translation=(0, 0)) pole.add_attr(self.poletrans) pole.add_attr(self.carttrans) self.viewer.add_geom(pole) self.axle = rendering.make_circle(polewidth / 2) self.axle.add_attr(self.poletrans) self.axle.add_attr(self.carttrans) self.axle.set_color(0.1, 1, 1) self.viewer.add_geom(self.axle) # Make another circle on the top of the pole self.pole_bob = rendering.make_circle(polewidth / 2) self.pole_bob_trans = rendering.Transform() self.pole_bob.add_attr(self.pole_bob_trans) self.pole_bob.add_attr(self.poletrans) self.pole_bob.add_attr(self.carttrans) self.pole_bob.set_color(0, 0, 0) self.viewer.add_geom(self.pole_bob) self.wheel_l = rendering.make_circle(cartheight / 4) self.wheel_r = rendering.make_circle(cartheight / 4) self.wheeltrans_l = rendering.Transform(translation=(-cartwidth / 2, -cartheight / 2)) self.wheeltrans_r = rendering.Transform(translation=(cartwidth / 2, -cartheight / 2)) self.wheel_l.add_attr(self.wheeltrans_l) self.wheel_l.add_attr(self.carttrans) self.wheel_r.add_attr(self.wheeltrans_r) self.wheel_r.add_attr(self.carttrans) self.wheel_l.set_color(0, 0, 0) # Black, (B, G, R) self.wheel_r.set_color(0, 0, 0) # Black, (B, G, R) self.viewer.add_geom(self.wheel_l) self.viewer.add_geom(self.wheel_r) self.track = rendering.Line( (screen_width / 2 - self.x_threshold * scale, carty - cartheight / 2 - cartheight / 4), (screen_width / 2 + self.x_threshold * scale, carty - cartheight / 2 - cartheight / 4)) self.track.set_color(0, 0, 0) self.viewer.add_geom(self.track) if self.state is None: return None x = self.state cartx = x[0] * scale + screen_width / 2.0 # MIDDLE OF CART self.carttrans.set_translation(cartx, carty) self.poletrans.set_rotation(x[2]) self.pole_bob_trans.set_translation(-self.l * np.sin(x[2]), self.l * np.cos(x[2])) return self.viewer.render(return_rgb_array=mode == 'rgb_array')
{"hexsha": "901280a060221634c6a96c61adb8f03c3bb2fa7e", "size": 9585, "ext": "py", "lang": "Python", "max_stars_repo_path": "envs/cartpole-envs/cartpole_envs/envs/CartPoleSwingUpEnv.py", "max_stars_repo_name": "baimingc/casrl", "max_stars_repo_head_hexsha": "7567b6592ab1790c2231993fadb78a9b5933e125", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-04-23T03:30:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-27T19:32:41.000Z", "max_issues_repo_path": "envs/cartpole-envs/cartpole_envs/envs/CartPoleSwingUpEnv.py", "max_issues_repo_name": "baimingc/casrl", "max_issues_repo_head_hexsha": "7567b6592ab1790c2231993fadb78a9b5933e125", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "envs/cartpole-envs/cartpole_envs/envs/CartPoleSwingUpEnv.py", "max_forks_repo_name": "baimingc/casrl", "max_forks_repo_head_hexsha": "7567b6592ab1790c2231993fadb78a9b5933e125", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.7362204724, "max_line_length": 131, "alphanum_fraction": 0.5616066771, "include": true, "reason": "import numpy", "num_tokens": 2700}
# Copyright 2018 The Cirq Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import random from typing import Sequence import numpy as np import pytest import sympy import cirq def assert_gates_implement_unitary(gates: Sequence[cirq.SingleQubitGate], intended_effect: np.ndarray, atol: float): actual_effect = cirq.dot(*[cirq.unitary(g) for g in reversed(gates)]) cirq.testing.assert_allclose_up_to_global_phase(actual_effect, intended_effect, atol=atol) def test_is_negligible_turn(): assert cirq.is_negligible_turn(0, 1e-5) assert cirq.is_negligible_turn(1e-6, 1e-5) assert cirq.is_negligible_turn(1, 1e-5) assert cirq.is_negligible_turn(1 + 1e-6, 1e-5) assert cirq.is_negligible_turn(1 - 1e-6, 1e-5) assert cirq.is_negligible_turn(-1, 1e-5) assert cirq.is_negligible_turn(-1 + 1e-6, 1e-5) assert cirq.is_negligible_turn(-1 - 1e-6, 1e-5) assert cirq.is_negligible_turn(3, 1e-5) assert cirq.is_negligible_turn(3 + 1e-6, 1e-5) assert not cirq.is_negligible_turn(1e-4, 1e-5) assert not cirq.is_negligible_turn(-1e-4, 1e-5) assert not cirq.is_negligible_turn(0.5, 1e-5) assert not cirq.is_negligible_turn(-0.5, 1e-5) assert not cirq.is_negligible_turn(0.5, 1e-5) assert not cirq.is_negligible_turn(4.5, 1e-5) # Variable sympy expression assert not cirq.is_negligible_turn(sympy.Symbol('a'), 1e-5) assert not cirq.is_negligible_turn(sympy.Symbol('a') + 1, 1e-5) assert not cirq.is_negligible_turn(sympy.Symbol('a') * 1e-10, 1e-5) # Constant sympy expression assert cirq.is_negligible_turn(sympy.Symbol('a') * 0 + 3 + 1e-6, 1e-5) assert not cirq.is_negligible_turn(sympy.Symbol('a') * 0 + 1.5 - 1e-6, 1e-5) def test_single_qubit_matrix_to_gates_known_x(): actual = cirq.single_qubit_matrix_to_gates( np.array([[0, 1], [1, 0]]), tolerance=0.01) assert cirq.approx_eq(actual, [cirq.X], atol=1e-9) def test_single_qubit_matrix_to_gates_known_y(): actual = cirq.single_qubit_matrix_to_gates( np.array([[0, -1j], [1j, 0]]), tolerance=0.01) assert cirq.approx_eq(actual, [cirq.Y], atol=1e-9) def test_single_qubit_matrix_to_gates_known_z(): actual = cirq.single_qubit_matrix_to_gates( np.array([[1, 0], [0, -1]]), tolerance=0.01) assert cirq.approx_eq(actual, [cirq.Z], atol=1e-9) def test_single_qubit_matrix_to_gates_known_s(): actual = cirq.single_qubit_matrix_to_gates( np.array([[1, 0], [0, 1j]]), tolerance=0.01) assert cirq.approx_eq(actual, [cirq.Z**0.5], atol=1e-9) def test_known_s_dag(): actual = cirq.single_qubit_matrix_to_gates( np.array([[1, 0], [0, -1j]]), tolerance=0.01) assert cirq.approx_eq(actual, [cirq.Z**-0.5], atol=1e-9) def test_known_h(): actual = cirq.single_qubit_matrix_to_gates( np.array([[1, 1], [1, -1]]) * np.sqrt(0.5), tolerance=0.001) assert cirq.approx_eq(actual, [cirq.Y**-0.5, cirq.Z], atol=1e-9) @pytest.mark.parametrize('intended_effect', [ np.array([[0, 1j], [1, 0]]), # Historical failure: np.array([[-0.10313355-0.62283483j, 0.76512225-0.1266025j], [-0.72184177+0.28352196j, 0.23073193+0.5876415j]]), ] + [ cirq.testing.random_unitary(2) for _ in range(10) ]) def test_single_qubit_matrix_to_gates_cases(intended_effect): for atol in [1e-1, 1e-8]: gates = cirq.single_qubit_matrix_to_gates( intended_effect, tolerance=atol / 10) assert len(gates) <= 3 assert sum(1 for g in gates if not isinstance(g, cirq.ZPowGate)) <= 1 assert_gates_implement_unitary(gates, intended_effect, atol=atol) @pytest.mark.parametrize('pre_turns,post_turns', [(random.random(), random.random()) for _ in range(10)]) def test_single_qubit_matrix_to_gates_fuzz_half_turns_merge_z_gates( pre_turns, post_turns): intended_effect = cirq.dot( cirq.unitary(cirq.Z**(2 * pre_turns)), cirq.unitary(cirq.X), cirq.unitary(cirq.Z**(2 * post_turns))) gates = cirq.single_qubit_matrix_to_gates( intended_effect, tolerance=1e-7) assert len(gates) <= 2 assert_gates_implement_unitary(gates, intended_effect, atol=1e-6) def test_single_qubit_matrix_to_gates_tolerance_z(): z = np.diag([1, np.exp(1j * 0.01)]) optimized_away = cirq.single_qubit_matrix_to_gates( z, tolerance=0.1) assert len(optimized_away) == 0 kept = cirq.single_qubit_matrix_to_gates(z, tolerance=0.0001) assert len(kept) == 1 def test_single_qubit_matrix_to_gates_tolerance_xy(): c, s = np.cos(0.01), np.sin(0.01) xy = np.array([[c, -s], [s, c]]) optimized_away = cirq.single_qubit_matrix_to_gates( xy, tolerance=0.1) assert len(optimized_away) == 0 kept = cirq.single_qubit_matrix_to_gates(xy, tolerance=0.0001) assert len(kept) == 1 def test_single_qubit_matrix_to_gates_tolerance_half_turn_phasing(): a = np.pi / 2 + 0.01 c, s = np.cos(a), np.sin(a) nearly_x = np.array([[c, -s], [s, c]]) z1 = np.diag([1, np.exp(1j * 1.2)]) z2 = np.diag([1, np.exp(1j * 1.6)]) phased_nearly_x = z1.dot(nearly_x).dot(z2) optimized_away = cirq.single_qubit_matrix_to_gates( phased_nearly_x, tolerance=0.1) assert len(optimized_away) == 2 kept = cirq.single_qubit_matrix_to_gates( phased_nearly_x, tolerance=0.0001) assert len(kept) == 3 def test_single_qubit_op_to_framed_phase_form_output_on_example_case(): u, t, g = cirq.single_qubit_op_to_framed_phase_form( cirq.unitary(cirq.Y**0.25)) assert cirq.allclose_up_to_global_phase(u, cirq.unitary(cirq.X**0.5)) assert abs(t - (1 + 1j) * math.sqrt(0.5)) < 0.00001 assert abs(g - 1) < 0.00001 @pytest.mark.parametrize('mat', [ np.eye(2), cirq.unitary(cirq.H), cirq.unitary(cirq.X), cirq.unitary(cirq.X**0.5), cirq.unitary(cirq.Y), cirq.unitary(cirq.Z), cirq.unitary(cirq.Z**0.5), ] + [cirq.testing.random_unitary(2) for _ in range(10)]) def test_single_qubit_op_to_framed_phase_form_equivalent_on_known_and_random( mat): u, t, g = cirq.single_qubit_op_to_framed_phase_form(mat) z = np.diag([g, g * t]) assert np.allclose(mat, np.conj(u.T).dot(z).dot(u)) def test_single_qubit_matrix_to_native_gates_known(): actual = cirq.single_qubit_matrix_to_phased_x_z( np.array([[0, 1], [1, 0]]), atol=0.01) assert cirq.approx_eq(actual, [cirq.PhasedXPowGate(phase_exponent=1.0)], atol=1e-9) actual = cirq.single_qubit_matrix_to_phased_x_z( np.array([[0, -1j], [1j, 0]]), atol=0.01) assert cirq.approx_eq(actual, [cirq.Y], atol=1e-9) actual = cirq.single_qubit_matrix_to_phased_x_z( np.array([[1, 0], [0, -1]]), atol=0.01) assert cirq.approx_eq(actual, [cirq.Z], atol=1e-9) actual = cirq.single_qubit_matrix_to_phased_x_z( np.array([[1, 0], [0, 1j]]), atol=0.01) assert cirq.approx_eq(actual, [cirq.Z**0.5], atol=1e-9) actual = cirq.single_qubit_matrix_to_phased_x_z( np.array([[1, 0], [0, -1j]]), atol=0.01) assert cirq.approx_eq(actual, [cirq.Z**-0.5], atol=1e-9) actual = cirq.single_qubit_matrix_to_phased_x_z( np.array([[1, 1], [1, -1]]) * np.sqrt(0.5), atol=0.001) assert cirq.approx_eq( actual, [cirq.PhasedXPowGate(phase_exponent=-0.5, exponent=0.5), cirq.Z**-1], atol=1e-9) @pytest.mark.parametrize('intended_effect', [ np.array([[0, 1j], [1, 0]]), ] + [ cirq.testing.random_unitary(2) for _ in range(10) ]) def test_single_qubit_matrix_to_native_gates_cases(intended_effect): gates = cirq.single_qubit_matrix_to_phased_x_z(intended_effect, atol=1e-6) assert len(gates) <= 2 assert_gates_implement_unitary(gates, intended_effect, atol=1e-5) @pytest.mark.parametrize('pre_turns,post_turns', [(random.random(), random.random()) for _ in range(10)]) def test_single_qubit_matrix_to_native_gates_fuzz_half_turns_always_one_gate( pre_turns, post_turns): atol = 1e-6 aggr_atol = atol * 10.0 intended_effect = cirq.dot( cirq.unitary(cirq.Z**(2 * pre_turns)), cirq.unitary(cirq.X), cirq.unitary(cirq.Z**(2 * post_turns))) gates = cirq.single_qubit_matrix_to_phased_x_z( intended_effect, atol=atol) assert len(gates) == 1 assert_gates_implement_unitary(gates, intended_effect, atol=aggr_atol) def test_single_qubit_matrix_to_native_gates_tolerance_z(): z = np.diag([1, np.exp(1j * 0.01)]) optimized_away = cirq.single_qubit_matrix_to_phased_x_z( z, atol=0.1) assert len(optimized_away) == 0 kept = cirq.single_qubit_matrix_to_phased_x_z(z, atol=0.0001) assert len(kept) == 1 def test_single_qubit_matrix_to_native_gates_tolerance_xy(): c, s = np.cos(0.01), np.sin(0.01) xy = np.array([[c, -s], [s, c]]) optimized_away = cirq.single_qubit_matrix_to_phased_x_z( xy, atol=0.1) assert len(optimized_away) == 0 kept = cirq.single_qubit_matrix_to_phased_x_z(xy, atol=0.0001) assert len(kept) == 1 def test_single_qubit_matrix_to_native_gates_tolerance_half_turn_phasing(): a = np.pi / 2 + 0.01 c, s = np.cos(a), np.sin(a) nearly_x = np.array([[c, -s], [s, c]]) z1 = np.diag([1, np.exp(1j * 1.2)]) z2 = np.diag([1, np.exp(1j * 1.6)]) phased_nearly_x = z1.dot(nearly_x).dot(z2) optimized_away = cirq.single_qubit_matrix_to_phased_x_z( phased_nearly_x, atol=0.1) assert len(optimized_away) == 1 kept = cirq.single_qubit_matrix_to_phased_x_z( phased_nearly_x, atol=0.0001) assert len(kept) == 2
{"hexsha": "b48dc6a0ff2acfdda9e4ef5936bbab0a9cedee7a", "size": 10408, "ext": "py", "lang": "Python", "max_stars_repo_path": "cirq/optimizers/decompositions_test.py", "max_stars_repo_name": "muneerqu/Cirq", "max_stars_repo_head_hexsha": "729d993312467d8ea9127103f9e15ae2391e7d85", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-11-08T11:46:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-27T10:13:38.000Z", "max_issues_repo_path": "cirq/optimizers/decompositions_test.py", "max_issues_repo_name": "1eedaegon/Cirq", "max_issues_repo_head_hexsha": "de0c5e855069bba71e55b070fc9b06f58c07a861", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-11-09T14:57:09.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-12T12:35:58.000Z", "max_forks_repo_path": "cirq/optimizers/decompositions_test.py", "max_forks_repo_name": "1eedaegon/Cirq", "max_forks_repo_head_hexsha": "de0c5e855069bba71e55b070fc9b06f58c07a861", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-10-25T19:36:50.000Z", "max_forks_repo_forks_event_max_datetime": "2018-10-25T19:36:50.000Z", "avg_line_length": 34.5780730897, "max_line_length": 80, "alphanum_fraction": 0.6644888547, "include": true, "reason": "import numpy,import sympy", "num_tokens": 3387}
#include <iostream> #include <vector> #include <map> #include <string> #include <exception> #include <cstring> #include <boost/algorithm/string.hpp> #include <tao/pegtl.hpp> #include "cli.h" #include "../engine/engine.h" #include "grammar_cli.h" using std::endl; using std::cin; using std::cout; using std::istream; using std::ostream; using std::vector; using std::string; using std::map; int CliParser::run(istream& in, ostream& out){ string current_line; map<string, string> vars; size_t linecount = 0; try { if (!in) return 2; while(in) { out << prompt << " "; getline(in, current_line); if (current_line.empty()) continue; tao::pegtl::memory_input<> p_in(current_line, "STDIN"); tao::pegtl::parse< cli::grammar, cli::impl >(p_in, vars); log_command(out, vars); int rc = handle_command(vars); ++linecount; vars.clear(); if (rc == 1) break; } return 0; } catch (std::exception e){ // This will hande EngineExceptions out << "Command failed: " << e.what() << endl; return 1; } } int CliParser::handle_command(map<string, string> vars) { // static Engine engine; string action = vars["action"]; if (action == "open") { engine->open_file(vars["open_path"], vars["open_name"], vars["open_type"]); return RC_CONTINUE; } else if ( action == "list") { cout << "list(): " << endl; engine->list_file(); return RC_CONTINUE; } else if ( action == "exit") { cout << "Bye!" << endl; return RC_BREAK; } else { cout << "Command not found" << endl; return RC_BAD_CMD; } } void CliParser::log_command(ostream& out, map<string, string> vars){ typedef map<string, string>::const_iterator iter; out << "DEBUG: " << vars["action"] << ": "; for(iter it = vars.begin(); it != vars.end(); ++it){ out << it->first << "=" << it->second << " "; } out << endl; } void CliParser::set_prompt(string s) { prompt = s; }
{"hexsha": "7225c3ef8c1ddf76da059791fce19b52ab9364cb", "size": 2165, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/cli/cli.cpp", "max_stars_repo_name": "ThomasBuchinger/csv-query-language", "max_stars_repo_head_hexsha": "b6b65c3dcc033b8e8c7ed06440c387c6738d37bb", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/cli/cli.cpp", "max_issues_repo_name": "ThomasBuchinger/csv-query-language", "max_issues_repo_head_hexsha": "b6b65c3dcc033b8e8c7ed06440c387c6738d37bb", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/cli/cli.cpp", "max_forks_repo_name": "ThomasBuchinger/csv-query-language", "max_forks_repo_head_hexsha": "b6b65c3dcc033b8e8c7ed06440c387c6738d37bb", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.0625, "max_line_length": 83, "alphanum_fraction": 0.5524249423, "num_tokens": 547}
simplifyProject(P::Project) = map(P) do branch map(branch) do solution solution.data end end #TODO actually convert(T, Project) function complicateProject(V) #::Vector{Vector{Vector{Float64}}} P = Project() branches = map(V) do bData branch = Branch(P) solutions::Vector{Solution} = map(bData) do sData Solution(sData, branch) end branch.solutions = solutions return branch end P.branches = branches return P end """ create(homotopy, jacobian, projection) """ create(v...) = begin ses = Session() ses.P = Project() ses.core = Galerkin(ses, v...) ses.cont = PC(ses) ses.viz = GalerkinViz(ses) show(ses.cont); show(ses.core); show(ses.viz) return ses end """ save(filename, session[, overwrite]) """ save(filename, S::Session; overwrite=false) = begin if !isfile("save/$(filename)") || overwrite; open("save/$(filename)", "w") do f serialize(f, simplifyProject(S.P)) end else error("File already exists. Use overwrite=true .")end return Void end #TODO restore non-serializable stuff (figures, observer) """ load(filename, homotopy, jacobian, projection) """ load(filename, v...) = begin V = open(deserialize, "save/$(filename)") ses = Session() ses.P = complicateProject(V) ses.core = Galerkin(ses, v...) ses.cont = PC(ses) ses.viz = GalerkinViz(ses) show(ses.cont); show(ses.core); show(ses.viz) return ses end
{"hexsha": "7734787feb3ac4c8c73f3af2f749741f4171b21f", "size": 1368, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "master/lib/ncmprojSAVEHELPER.jl", "max_stars_repo_name": "285714/ncm", "max_stars_repo_head_hexsha": "fcf289c7ef5f8500ebcb238e36c6a7ee9e054147", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "master/lib/ncmprojSAVEHELPER.jl", "max_issues_repo_name": "285714/ncm", "max_issues_repo_head_hexsha": "fcf289c7ef5f8500ebcb238e36c6a7ee9e054147", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "master/lib/ncmprojSAVEHELPER.jl", "max_forks_repo_name": "285714/ncm", "max_forks_repo_head_hexsha": "fcf289c7ef5f8500ebcb238e36c6a7ee9e054147", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.5862068966, "max_line_length": 119, "alphanum_fraction": 0.6849415205, "num_tokens": 409}
# assume this is run after detect.py has been run, this means that the images in data/images # have corresponding data in labels from PIL import Image import numpy as np import pandas as pd import os import random import sklearn import skimage import skimage.io import matplotlib.pyplot as plt import pathlib PROJECT_DIR = str(pathlib.Path(__file__).parent.absolute()) +'/' IMAGES_DIR = PROJECT_DIR + 'data/images' LABELS_DIR = PROJECT_DIR + 'data/cropped/labels' CROPPED_DIR = PROJECT_DIR + 'data/cropped/' files = [] for (dirpath, dirnames, filenames) in os.walk(IMAGES_DIR): files.extend(filenames) break def map_files_to_labels(image): text_path = image + ".txt" return text_path labels = [map_files_to_labels(file) for file in files] def crop_image(file,label, output_dir): width, height = image.size # find the largest of the squares here # but for now just get the first one class_id, center_x, center_y, box_width, box_height = [float(x) for x in list(label.iloc[0])[0].split(' ')] pixel_center_x = width*center_x pixel_center_y = height*center_y pixel_box_width = width*box_width pixel_box_height = height*box_height box_top_left_x = pixel_center_x - pixel_box_width/2 box_top_left_y = pixel_center_y - pixel_box_height/2 left = box_top_left_x top= box_top_left_y right = (box_top_left_x + pixel_box_width ) bottom = (box_top_left_y + pixel_box_height ) cropped_image = image.crop((left, top, right, bottom)) #image.show() cropped_image.save(output_dir + files[i]) for i in range(len(files)): image = Image.open(IMAGES_DIR + '/' + files[i]) if( not os.path.exists(LABELS_DIR + "/" +labels[i])): continue label = pd.read_csv(LABELS_DIR + "/" +labels[i], header=None) crop_image(image,label,CROPPED_DIR)
{"hexsha": "d702d46ecc353dd76ae63eed6d847175a92b2fd8", "size": 1835, "ext": "py", "lang": "Python", "max_stars_repo_path": "crop_images_from_yolo_labels.py", "max_stars_repo_name": "AndrewLaird/ChessTutorModels", "max_stars_repo_head_hexsha": "c4fd960417d5b9918e430d040deb89fed3f4b73b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "crop_images_from_yolo_labels.py", "max_issues_repo_name": "AndrewLaird/ChessTutorModels", "max_issues_repo_head_hexsha": "c4fd960417d5b9918e430d040deb89fed3f4b73b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "crop_images_from_yolo_labels.py", "max_forks_repo_name": "AndrewLaird/ChessTutorModels", "max_forks_repo_head_hexsha": "c4fd960417d5b9918e430d040deb89fed3f4b73b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.5833333333, "max_line_length": 112, "alphanum_fraction": 0.7177111717, "include": true, "reason": "import numpy", "num_tokens": 480}
import numpy as np import pandas as pd from glob import glob dfs = list() directoryPath = 'data/raw/data_for_november_2019_evaluation/south_sudan_data/IMF/' filenames = glob(directoryPath + 'imf*.xlsx') for filename in filenames: df = pd.read_excel(filename) df = df.transpose() index_val = df.index.values indicator = index_val[0] year_val = index_val[1:] colnames = df.iloc[0,:] df = df.iloc[1:, :] col_dict = dict(zip(list(range(df.shape[1])), colnames)) df.rename(col_dict, axis=1, inplace=True) df.dropna(axis=1, how='all', inplace=True) df.rename({'South Sudan, Republic of' : 'Value'}, axis=1, inplace=True) df['Value'].replace('no data', np.nan, inplace=True) df['Variable'] = indicator df['Year'] = df.index df['Country'] = 'South Sudan' for col in colnames: if 'Ethiopia' == col: ethiopia_ind_val = df[col].values df1 = pd.DataFrame({'Variable':indicator, 'Year':df.index, 'Value': ethiopia_ind_val, 'Country':'Ethiopia'}) df = pd.concat([df, df1], sort=False, ignore_index=True) df = df[['Year', 'Variable', 'Value', 'Country']] dfs.append(df) big_frame = pd.concat(dfs, sort=False, ignore_index=False) big_frame.index = list(range(big_frame.shape[0])) big_frame['Unit'] = big_frame['Variable'].apply(lambda st: st[st.find('(') + 1:st.find(')')]) big_frame['Source'], big_frame['Month'], big_frame['County'], big_frame['State'] = 'IMF', None, None, None big_frame.dropna(subset=['Value'], inplace=True) big_frame['Variable'] = big_frame['Variable'].str.replace(r'\(.*?\)', '').str.strip() big_frame.to_csv('data/IMF-data.csv', index=False)
{"hexsha": "450f13f46de61c642325070826fad41fef1b5418", "size": 1740, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/data_processing/IMF.py", "max_stars_repo_name": "mikiec84/delphi", "max_stars_repo_head_hexsha": "2e517f21e76e334c7dfb14325d25879ddf26d10d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 25, "max_stars_repo_stars_event_min_datetime": "2018-03-03T11:57:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-16T21:19:54.000Z", "max_issues_repo_path": "scripts/data_processing/IMF.py", "max_issues_repo_name": "mikiec84/delphi", "max_issues_repo_head_hexsha": "2e517f21e76e334c7dfb14325d25879ddf26d10d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 385, "max_issues_repo_issues_event_min_datetime": "2018-02-21T16:52:06.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-17T07:44:56.000Z", "max_forks_repo_path": "scripts/data_processing/IMF.py", "max_forks_repo_name": "mikiec84/delphi", "max_forks_repo_head_hexsha": "2e517f21e76e334c7dfb14325d25879ddf26d10d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2018-03-20T01:08:11.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-29T01:04:49.000Z", "avg_line_length": 30.5263157895, "max_line_length": 120, "alphanum_fraction": 0.6298850575, "include": true, "reason": "import numpy", "num_tokens": 481}
""" Test to exercise Small File Workload Note: This test is using the benchmark-operator and the elastic search, so it start process with port forwarding on port 9200 from the host that run the test (localhost) to the elastic-search within the open-shift cluster, so, if you host is listen to port 9200, this test can not be running in your host. """ # Builtin modules import json import logging # 3ed party modules import os.path from elasticsearch import Elasticsearch, exceptions as ESExp import numpy as np import pytest import time # Local modules from ocs_ci.framework import config from ocs_ci.framework.testlib import performance from ocs_ci.helpers.helpers import get_full_test_logs_path from ocs_ci.ocs import benchmark_operator, constants, exceptions from ocs_ci.ocs.elasticsearch import ElasticSearch from ocs_ci.ocs.perfresult import PerfResult from ocs_ci.ocs.perftests import PASTest from ocs_ci.ocs.utils import get_pod_name_by_pattern from ocs_ci.utility import templating from ocs_ci.utility.utils import ceph_health_check log = logging.getLogger(__name__) class SmallFileResultsAnalyse(PerfResult): """ This class is reading all test results from elasticsearch server (which the benchmark-operator running of the benchmark is generate), aggregate them by : test operation (e.g. create / delete etc.) sample (for test to be valid it need to run with more the one sample) host (test can be run on more then one pod {called host}) it generates results for all tests as one unit which will be valid only if the deviation between samples is less the 5% """ managed_keys = { "IOPS": {"name": "iops", "op": np.sum}, "MiBps": {"name": "mbps", "op": np.sum}, "elapsed": {"name": "elapsed-time", "op": np.average}, "files": {"name": "files_per_thread", "op": np.sum}, "filesPerSec": {"name": "Files-Sec", "op": np.sum}, "records": {"name": "Rec-per-thread", "op": np.sum}, } def __init__(self, uuid, crd, full_log_path, es_con): """ Initialize the object by reading some of the data from the CRD file and by connecting to the ES server and read all results from it. Args: uuid (str): the unique uid of the test crd (dict): dictionary with test parameters - the test yaml file that modify it in the test itself. full_log_path (str): the path of the results files to be found es_con (elasticsearch): an elasticsearch connection """ super(SmallFileResultsAnalyse, self).__init__(uuid, crd) self.index = crd["spec"]["es_index"] + "-results" self.new_index = crd["spec"]["es_index"] + "-fullres" self.full_log_path = full_log_path # make sure we have connection to the elastic search server self.es = es_con # WA for Cloud environment where pod can not send results to ES self.dont_check = False # make sure we have connection to the elastic search server # self.es_connect() # Creating full results dictionary self.add_key("clients", crd["spec"]["workload"]["args"]["clients"]) self.add_key("samples", crd["spec"]["workload"]["args"]["samples"]) self.add_key("threads", crd["spec"]["workload"]["args"]["threads"]) self.add_key("operations", crd["spec"]["workload"]["args"]["operation"]) self.add_key("full-res", {}) # Calculate the number of records for the test # Total threads for one sample - one operation self.records = self.results["clients"] * self.results["threads"] # Number of threads for all samples self.records *= self.results["samples"] # Number of records for all operation - cleanup does not count numofops = len(self.results["operations"]) if "cleanup" in self.results["operations"]: numofops -= 1 self.records *= numofops def read(self): """ Reading all test records from the elasticsearch server into dictionary inside this object """ query = {"query": {"match": {"uuid": f'"{self.uuid}"'}}} log.info("Reading all data from ES server") try: # Initialize the scroll page = self.es.search(index=self.index, scroll="2m", size=1000, body=query) sid = page["_scroll_id"] scroll_size = page["hits"]["total"]["value"] log.info( f"Looking for {self.records} records and found {scroll_size} records." ) self.all_results = page["hits"]["hits"] # Start scrolling while scroll_size > 0: page = self.es.scroll(scroll_id=sid, scroll="2m") # Update the scroll ID sid = page["_scroll_id"] self.all_results += page["hits"]["hits"] # Get the number of results that we returned in the last scroll scroll_size = len(page["hits"]["hits"]) log.debug(f"{scroll_size} records was read") log.info(f"The total record that was read : {len(self.all_results)}") log.debug(self.all_results) total_rec_found = len(self.all_results) if total_rec_found < 1: log.warning("No data in ES server, disabling results calculation") self.dont_check = True if total_rec_found < self.records: log.error("Not all data read from ES server") self.dont_check = True if total_rec_found > self.records: log.warning("More records then expected was read, check the results!") except ESExp.NotFoundError: log.warning("No data in ES server, disabling results calculation") self.dont_check = True def thread_read(self, host, op, snum): """ This method read all threads record of one host / operation and sample Args: host (str): the name of the pod that ran the test op (str): the operation that is tested snum (int): sample of test as string Returns: dict : dictionary of results records """ res = {} log.debug(f"Reading all threads for {op} / {snum} / {host}") for hit in self.all_results: if ( hit["_source"]["host"] == host and hit["_source"]["optype"] == op and hit["_source"]["sample"] == snum ): for key in self.managed_keys.keys(): # not all operation have all values, so i am using try try: val = float("{:.2f}".format(hit["_source"][key])) if self.managed_keys[key]["name"] in res.keys(): res[self.managed_keys[key]["name"]].append(val) else: res[self.managed_keys[key]["name"]] = [val] except Exception: pass res = self.aggregate_threads_results(res) return res def aggregate_threads_results(self, res): """ Aggregation of one section of results, this can be threads in host, hosts in sample, samples in test Args: res (dict) : dictionary of results Returns: dict : dictionary with the aggregate results. """ results = {} for key in self.managed_keys.keys(): if self.managed_keys[key]["name"] in res.keys(): results[key] = self.managed_keys[key]["op"]( res[self.managed_keys[key]["name"]] ) # This is the place to check in host (treads) deviation. return results def combine_results(self, results, clear): """ Combine 2 or more results (hosts in sample / samples in test) to one result. Args: results (dict): dictionary of results to combine clear (bool): return only combined results or not. True - return only combined results False - add the combine results to originals results Returns: dict : dictionary of results records """ res = {} log.debug(f"The results to combine {json.dumps(results, indent=2)}") for rec in results.keys(): record = results[rec] for key in self.managed_keys.keys(): # not all operation have all values, so i am using try try: val = float("{:.2f}".format(record[key])) if self.managed_keys[key]["name"] in res.keys(): res[self.managed_keys[key]["name"]].append(val) else: res[self.managed_keys[key]["name"]] = [val] except Exception: pass if not clear: res.update(self.aggregate_threads_results(res)) else: res = self.aggregate_threads_results(res) log.debug(f"The combines results are : {json.dumps(res, indent=2)}") return res def aggregate_host_results(self): """ Aggregation results from all hosts in single sample """ results = {} for op in self.results["operations"]: for smp in range(self.results["samples"]): sample = smp + 1 if op in self.results["full-res"].keys(): self.results["full-res"][op][sample] = self.combine_results( self.results["full-res"][op][sample], True ) return results def aggregate_samples_results(self): """ Aggregation results from all hosts in single sample, and compare between samples. Returns: bool: True if results deviation (between samples) is les or equal to 20%, otherwise False """ test_pass = True for op in self.results["operations"]: log.debug( f'Aggregating {op} - {json.dumps(self.results["full-res"][op], indent=3)}' ) results = self.combine_results(self.results["full-res"][op], False) log.info(f"Check IOPS {op} samples deviation") for key in self.managed_keys.keys(): if self.managed_keys[key]["name"] in results.keys(): results[key] = self.managed_keys[key]["op"]( results[self.managed_keys[key]["name"]] ) if isinstance(results[self.managed_keys[key]["name"]], list): results[key] = np.average( results[self.managed_keys[key]["name"]] ) results[key] = float("{:.2f}".format(results[key])) if key == "IOPS": st_deviation = np.std(results[self.managed_keys[key]["name"]]) mean = np.mean(results[self.managed_keys[key]["name"]]) pct_dev = (st_deviation / mean) * 100 if pct_dev > 20: log.error( f"Deviation for {op} IOPS is more the 20% ({pct_dev})" ) # TODO: unmarked next line after implementing data cleansing # test_pass = False self.results["full-res"][op] = results return test_pass def get_clients_list(self): """ Finding and creating a list of all hosts that was used in this test Returns: list: a list of pods name """ res = [] for hit in self.all_results: host = hit["_source"]["host"] if host not in res: res.append(host) log.info(f"The pods names used in this test are {res}") return res def init_full_results(self): """ Initialize the full results Internal DB as dictionary. """ log.info("Initialising results DB") # High level of internal results DB is operation for op in self.results["operations"]: self.results["full-res"][op] = {} # second level is sample for smp in range(self.results["samples"]): sample = smp + 1 self.results["full-res"][op][sample] = {} # last level is host (all threads will be in the host) for host in self.results["hosts"]: self.results["full-res"][op][sample][host] = self.thread_read( host, op, sample ) log.debug(f"The Initial DB is : {self.results['full-res']}") @performance class TestSmallFileWorkload(PASTest): """ Deploy benchmark operator and run SmallFile workload SmallFile workload using https://github.com/distributed-system-analysis/smallfile smallfile is a python-based distributed POSIX workload generator which can be used to quickly measure performance for a variety of metadata-intensive workloads """ def setup(self): """ Setting up test parameters """ log.info("Starting the test setup") self.benchmark_name = "SmallFiles" self.client_pod_name = "smallfile-client" if config.PERF.get("deploy_internal_es"): self.es = ElasticSearch() else: if config.PERF.get("internal_es_server") == "": self.es = None return else: self.es = { "server": config.PERF.get("internal_es_server"), "port": config.PERF.get("internal_es_port"), "url": f"http://{config.PERF.get('internal_es_server')}:{config.PERF.get('internal_es_port')}", } # verify that the connection to the elasticsearch server is OK if not super(TestSmallFileWorkload, self).es_connect(): self.es = None return super(TestSmallFileWorkload, self).setup() # deploy the benchmark-operator self.deploy_benchmark_operator() def setting_storage_usage(self, file_size, files, threads, samples, clients): """ Getting the storage capacity, calculate the usage of the storage and setting the workload CR rile parameters. Args: file_size (int) : the size of the file to be used files (int) : number of files to use threads (int) : number of threads to be use in the test samples (int) : how meany samples to run for each test clients (int) : number of clients (pods) to use in the test """ self.crd_data["spec"]["workload"]["args"]["file_size"] = file_size self.crd_data["spec"]["workload"]["args"]["files"] = files self.crd_data["spec"]["workload"]["args"]["threads"] = threads self.crd_data["spec"]["workload"]["args"]["samples"] = samples self.crd_data["spec"]["workload"]["args"]["clients"] = clients # Calculating the size of the volume that need to be test, it should # be at least twice in the size then the size of the files, and at # least 100Gi. # Since the file_size is in Kb and the vol_size need to be in Gb, more # calculation is needed. vol_size = int(files * threads * file_size * 3) vol_size = int(vol_size / constants.GB2KB) if vol_size < 100: vol_size = 100 self.crd_data["spec"]["workload"]["args"]["storagesize"] = f"{vol_size}Gi" def init_full_results(self, full_results): """ Initialize the full results object which will send to the ES server Args: full_results (obj): an empty SmallFileResultsAnalyse object Returns: SmallFileResultsAnalyse (obj): the input object fill with data """ for key in self.environment: full_results.add_key(key, self.environment[key]) # Calculating the total size of the working data set - in GB full_results.add_key( "dataset", self.crd_data["spec"]["workload"]["args"]["file_size"] * self.crd_data["spec"]["workload"]["args"]["files"] * self.crd_data["spec"]["workload"]["args"]["threads"] * full_results.results["clients"] / constants.GB2KB, ) full_results.add_key( "global_options", { "files": self.crd_data["spec"]["workload"]["args"]["files"], "file_size": self.crd_data["spec"]["workload"]["args"]["file_size"], "storageclass": self.crd_data["spec"]["workload"]["args"][ "storageclass" ], "vol_size": self.crd_data["spec"]["workload"]["args"]["storagesize"], }, ) return full_results def generate_kibana_link(self, index, columns): """ Generating full link to the Kibana server with full test results information Args: index (str): the kibana index name (results, response time, etc.) columns (str): list of all columns to display Return: str : an http link to the appropriate kibana report """ stime = self.start_time.replace("GMT", ".000Z") etime = self.end_time.replace("GMT", ".000Z") log.info(json.dumps(self.crd_data.get("spec").get("elasticsearch"), indent=2)) host = self.crd_data.get("spec").get("elasticsearch").get("url") try: host = host.split(":")[1].replace("//", "") except Exception: log.error("No ES configuretion") return "" kibana_id = self.get_kibana_indexid(host, index) app = "app/kibana#/discover" if self.dev_mode: app = "app/discover#/" result = ( f"http://{host}:5601/{app}" f"?_a=(columns:!({columns}),filters:!(),index:'{kibana_id}',interval:auto," f"query:(language:kuery,query:'uuid:{self.uuid}'),sort:!())" f"&_g=(filters:!(),refreshInterval:(pause:!t,value:0),time:(from:'{stime}',to:'{etime}'))" ) return result def collect_benchmark_logs(self): """ Collecting the test log from all benchmark pods """ # Getting full list of benchmark clients self.full_client_list = get_pod_name_by_pattern( self.client_pod_name, benchmark_operator.BMO_NAME ) # Collecting logs from each pod for clpod in self.full_client_list: test_logs = self.pod_obj.exec_oc_cmd(f"logs {clpod}", out_yaml_format=False) log_file_name = f"{self.full_log_path}/{clpod}-pod.log" try: with open(log_file_name, "w") as f: f.write(test_logs) log.info(f"The Test log can be found at : {log_file_name}") except Exception: log.warning(f"Cannot write the log to the file {log_file_name}") log.info("Logs from all client pods got successfully") def run(self): log.info("Running SmallFile bench") self.deploy_and_wait_for_wl_to_start(timeout=240, sleep=10) # Getting the UUID from inside the benchmark pod self.uuid = self.operator.get_uuid(self.client_pod) self.wait_for_wl_to_finish(sleep=30) self.collect_benchmark_logs() try: if "RUN STATUS DONE" in self.test_logs: log.info("SmallFiles has completed successfully") return True except IOError: log.warning("SmallFiles failed to complete") return False def teardown(self): """ The teardown of the test environment in the end. """ log.info("cleanup the environment") if isinstance(self.es, ElasticSearch): self.es.cleanup() self.operator.cleanup() # wait up to 45 min for the ceph cluster be health OK after backend # operation completed. log.info("Verify (and wait if needed) that ceph health is OK") ceph_health_check(tries=45, delay=60) # Let the background operation (delete backed images) to finish time.sleep(120) @pytest.mark.parametrize( argnames=["file_size", "files", "threads", "samples", "clients", "interface"], argvalues=[ pytest.param(*[4, 5000, 22, 5, 33, constants.CEPHBLOCKPOOL]), pytest.param(*[16, 5000, 8, 5, 21, constants.CEPHBLOCKPOOL]), pytest.param(*[4, 2500, 4, 5, 9, constants.CEPHFILESYSTEM]), pytest.param(*[16, 1500, 4, 5, 9, constants.CEPHFILESYSTEM]), ], ) @pytest.mark.polarion_id("OCS-1295") def test_smallfile_workload( self, file_size, files, threads, samples, clients, interface ): """ Run SmallFile Workload Args: file_size (int) : the size of the file to be used files (int) : number of files to use threads (int) : number of threads to be use in the test samples (int) : how meany samples to run for each test interface (str) : the volume type (rbd / cephfs) """ # verify that there is an elasticsearch server for the benchmark if not self.es: log.error("This test must have an Elasticsearch server") return False # Getting the full path for the test logs self.full_log_path = get_full_test_logs_path(cname=self) self.results_path = get_full_test_logs_path(cname=self) self.full_log_path += ( f"-{file_size}-{files}-{threads}-{samples}-{clients}-{interface}" ) log.info(f"Logs file path name is : {self.full_log_path}") # Loading the main template yaml file for the benchmark log.info("Create resource file for small_files workload") self.crd_data = templating.load_yaml(constants.SMALLFILE_BENCHMARK_YAML) # Saving the Original elastic-search IP and PORT - if defined in yaml self.es_info_backup(self.es) self.set_storageclass(interface=interface) # Setting the data set to 40% of the total storage capacity self.setting_storage_usage(file_size, files, threads, samples, clients) self.get_env_info() if not self.run(): log.error("The benchmark failed to run !") return # Setting back the original elastic-search information if self.backup_es: self.crd_data["spec"]["elasticsearch"] = self.backup_es # Initialize the results doc file. full_results = self.init_full_results( SmallFileResultsAnalyse( self.uuid, self.crd_data, self.full_log_path, self.main_es ) ) log.info(f"Full results is : {full_results.results}") if isinstance(self.es, ElasticSearch): # Using internal deployed elasticsearch log.info("Getting data from internal ES") if self.main_es: self.copy_es_data(self.es) full_results.read() else: log.info("Dumping data from the Internal ES to tar ball file") self.es.dumping_all_data(self.full_log_path) else: log.info(self.es) self.es = Elasticsearch( hosts=[{"host": self.es["server"], "port": self.es["port"]}] ) full_results.read() full_results.add_key( "test_time", {"start": self.start_time, "end": self.end_time} ) if self.main_es: full_results.es = self.main_es if not full_results.dont_check: full_results.add_key("hosts", full_results.get_clients_list()) full_results.init_full_results() full_results.aggregate_host_results() test_status = full_results.aggregate_samples_results() # Generate link for the all data in the kibana columens = "optype,files,filesPerSec,elapsed,sample,tid" klink = self.generate_kibana_link("ripsaw-smallfile-results", columens) # Generate link for the all response-time data in the kibana columens = "optype,sample,iops,max,min,mean,'90%25','95%25','99%25'" rtlink = self.generate_kibana_link("ripsaw-smallfile-rsptimes", columens) full_results.all_results = {"kibana_all": klink, "kibana_rsptime": rtlink} if full_results.es_write(): res_link = full_results.results_link() log.info(f"The Result can be found at : {res_link}") # Create text file with results of all subtest (4 - according to the parameters) self.write_result_to_file(res_link) else: test_status = True assert test_status, "Test Failed !" def test_smallfile_results(self): """ This is not a test - it is only check that previous test ran and finish as expected and reporting the full results (links in the ES) of previous tests (4) """ # TODO : This function will push the results (if exists) to the performance dashboard. self.results_path = get_full_test_logs_path( cname=self, fname="test_smallfile_workload" ) self.results_file = os.path.join(self.results_path, "all_results.txt") log.info(f"Check results in {self.results_file}") try: input_file = open(self.results_file, "r") data = input_file.read().split("\n") data.pop() # remove the last empty element input_file.close() if len(data) != 4: log.error("Not all tests finished") raise exceptions.BenchmarkTestFailed() else: log.info("All test finished OK, and the results can be found at :") for res in data: log.info(res) except OSError as err: log.error(f"OS error: {err}") raise err
{"hexsha": "8a2b1262bae497c57d706316ce12859ffd71835d", "size": 26594, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/e2e/performance/io_workload/test_small_file_workload.py", "max_stars_repo_name": "annagitel/ocs-ci", "max_stars_repo_head_hexsha": "284fe04aeb6e3d6cb70c99e65fec8ff1b1ea1dd5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-12T09:01:36.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-12T09:01:36.000Z", "max_issues_repo_path": "tests/e2e/performance/io_workload/test_small_file_workload.py", "max_issues_repo_name": "annagitel/ocs-ci", "max_issues_repo_head_hexsha": "284fe04aeb6e3d6cb70c99e65fec8ff1b1ea1dd5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-08-30T20:06:00.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-30T20:05:46.000Z", "max_forks_repo_path": "tests/e2e/performance/io_workload/test_small_file_workload.py", "max_forks_repo_name": "annagitel/ocs-ci", "max_forks_repo_head_hexsha": "284fe04aeb6e3d6cb70c99e65fec8ff1b1ea1dd5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.8293029872, "max_line_length": 115, "alphanum_fraction": 0.5744528841, "include": true, "reason": "import numpy", "num_tokens": 5697}
import pandas as pd import numpy as np import random import sys import pathlib import string from datetime import datetime # TODO: # Ensure generated company names are unique # OverflowError: int too large to convert to float test_data = pd.DataFrame() def string_generator(size): chars = string.ascii_uppercase + string.ascii_lowercase return ''.join(random.choice(chars) for _ in range(size)) def word_generator(num, max_size=10): """Return a string of random length (1-10) num times""" words = [] for num in range(num): r_number = random.randint(1,max_size) words.append(string_generator(r_number)) return words def save_file(file_name, file_type='xlsx'): """Save xlsx file as file_name""" if file_type is 'xlsx': save_name = (sys.path[0] + '\\'+ file_name + '.xlsx') save_name_path = pathlib.Path(save_name) if save_name_path.is_file(): print('File already exists.') else: test_data.to_excel(save_name) elif file_type is 'csv': save_name = (sys.path[0] + '\\'+ file_name + '.csv') save_name_path = pathlib.Path(save_name) if save_name_path.is_file(): print('File already exists.') else: test_data.to_csv(save_name) else: print('File not saved. Invalid filetype.') def linear_graph(size, positive=True): """Take a size and return either a positive or negative linear set, adjusted by a random number""" graph_frame = [] if positive: for i in range(size): graph_frame.append(i + 1 + np.round(np.random.uniform(low=0.5, high=5), decimals=2)) else: for i in range(size): graph_frame.append(i - 1 - np.round(np.random.uniform(low=0.5, high=5), decimals=2)) return graph_frame def exponential_graph(size, positive=True): """Take a size and return either a positive or negative exponential set, adjusted by a random number""" graph_frame = [] if positive: for i in range(size): graph_frame.append(i*i + np.round(np.random.uniform(low=0.5, high=5), decimals=2)) else: for i in range(size): graph_frame.append(-i*i - np.round(np.random.uniform(low=0.5, high=5), decimals=2)) return graph_frame def cubic_graph(size, positive=True): """Take a size and return either a positive or negative cubic set, adjusted by a random number""" graph_frame = [] if positive: for i in range(size): graph_frame.append(i*i*i + np.round(np.random.uniform(low=0.5, high=5), decimals=2)) else: for i in range(size): graph_frame.append(-i*i*i - np.round(np.random.uniform(low=0.5, high=5), decimals=2)) return graph_frame def expo_graph(size, positive=True): """Take a size and return either a positive or negative expo set, adjusted by a random number""" graph_frame = [] if positive: for i in range(size): graph_frame.append(5**i + np.round(np.random.uniform(low=0.0, high=0.5), decimals=2)) else: for i in range(size): graph_frame.append(5**(-i) - np.round(np.random.uniform(low=0.0, high=0.5), decimals=2)) return graph_frame def random_dates(n, unit='D', seed=None): """Return random dates between a year n times""" start_time = pd.to_datetime('2019-01-01', infer_datetime_format=True ) end_time = pd.to_datetime('2019-12-31', infer_datetime_format=True ) time_frame = [] if not seed: np.random.seed(0) ndays = (end_time - start_time).days + 1 time_frame.append(start_time + pd.to_timedelta( np.random.randint(0, ndays, n), unit=unit )) returned_time = pd.DataFrame(time_frame).transpose() returned_time.columns = ['Dates'] ordered_time = returned_time.sort_values('Dates').reset_index(drop=True) return ordered_time list_of_graphs = [cubic_graph, exponential_graph, linear_graph, expo_graph] list_of_companies = word_generator(500) def build_dataframe(): """Return a dataframe made of several companies with random data""" my_frame_data = pd.DataFrame() for company in list_of_companies: mini_data = pd.DataFrame() mini_data['Data'] = random.choice(list_of_graphs)(200, positive=bool(random.choice([True, False]))) mini_data['Company'] = company mini_data['Dates'] = random_dates(200) my_frame_data = my_frame_data.append(mini_data, ignore_index=True) # Append doesn't happen in-place, so we have to store it.. my_frame_data = my_frame_data[['Dates', 'Company', 'Data']] # Reorder the columns return my_frame_data example = build_dataframe() #build_dataframe().to_csv('savederp3.csv') print(example)
{"hexsha": "43126adaba652688d76bf85401491c34bc676e50", "size": 4877, "ext": "py", "lang": "Python", "max_stars_repo_path": "RandoData.py", "max_stars_repo_name": "Midnitte/RandoData", "max_stars_repo_head_hexsha": "715cfbab261b0247b66b2131b226ee179973781d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "RandoData.py", "max_issues_repo_name": "Midnitte/RandoData", "max_issues_repo_head_hexsha": "715cfbab261b0247b66b2131b226ee179973781d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-10-09T12:21:06.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-09T12:32:13.000Z", "max_forks_repo_path": "RandoData.py", "max_forks_repo_name": "Midnitte/RandoData", "max_forks_repo_head_hexsha": "715cfbab261b0247b66b2131b226ee179973781d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1259259259, "max_line_length": 134, "alphanum_fraction": 0.6448636457, "include": true, "reason": "import numpy", "num_tokens": 1143}
[STATEMENT] lemma list_member_conv_member [simp]: "equal_base.list_member (=) = List.member" [PROOF STATE] proof (prove) goal (1 subgoal): 1. equal_base.list_member (=) = List.member [PROOF STEP] proof(intro ext) [PROOF STATE] proof (state) goal (1 subgoal): 1. \<And>x xa. equal_base.list_member (=) x xa = List.member x xa [PROOF STEP] fix xs and x :: 'a [PROOF STATE] proof (state) goal (1 subgoal): 1. \<And>x xa. equal_base.list_member (=) x xa = List.member x xa [PROOF STEP] show "equal_base.list_member (=) xs x = List.member xs x" [PROOF STATE] proof (prove) goal (1 subgoal): 1. equal_base.list_member (=) xs x = List.member xs x [PROOF STEP] by(induct xs)(auto simp add: List.member_def) [PROOF STATE] proof (state) this: equal_base.list_member (=) xs x = List.member xs x goal: No subgoals! [PROOF STEP] qed
{"llama_tokens": 348, "file": "Containers_DList_Set", "length": 5}
import sys import json import time import array import struct import logging import numpy as np from copy import deepcopy from pybleno import * import wasatch from wasatch.WasatchDevice import WasatchDevice from wasatch.WasatchBus import WasatchBus from wasatch import applog logger = logging.getLogger(__name__) ################################################################################ # # # Characteristics # # # ################################################################################ class Battery_Status(Characteristic): def __init__(self, uuid, device, msg_queue, msg_func): Characteristic.__init__(self, {'uuid': uuid, 'properties': [ 'read', 'notify'], 'value': None}) self._value = array.array('B', [0] * 0) self.page = None self.subpage = None self.device = device self.guid = deepcopy(uuid) self.msg_num = 0 self.msg_queue = msg_queue self.msg_func = msg_func def onReadRequest(self, offset, callback): logger.debug("Bluetooth: Central requested battery status.") msg_id = self.guid + str(self.msg_num) msg = {'Command': 'HAS_BATTERY', 'Value': None} has_battery = self.msg_func(msg_id, msg ,5)["Res_Value"] if has_battery: self.msg_num += 1 msg_id = self.guid + str(self.msg_num) msg = {'Command': 'BATTERY', 'Value': None} dev_battery = self.msg_func(msg_id, msg , 5)["Res_Value"] logger.debug(f"Bluetooth: Device has battery. Returning state of {dev_battery}%.") self.msg_num += 1 self.msg_num %= 8000 dev_battery = int(dev_battery) callback(Characteristic.RESULT_SUCCESS, dev_battery.to_bytes(2,"big")) else: logger.debug("Bluetooth: Device does not have battery. Returning 100.%") full_battery = 100 callback(Characteristic.RESULT_SUCCESS,full_battery.to_bytes(2,"big")) class Acquire_Spectrum(Characteristic): def __init__(self, uuid, device, msg_queue, msg_func): Characteristic.__init__(self, {'uuid': uuid, 'properties': ['write'], 'value': None}) self._value = array.array('B',[0] * 0) self.current_spec = None self.device = device self.guid = deepcopy(uuid) self.msg_num = 0 self.msg_queue = msg_queue self.msg_func = msg_func def onWriteRequest(self,data,offset,withoutResponse,callback): logger.debug("Bluetooth: Received command to acquire spectrum. Acquiring spectrum...") msg_id = self.guid + str(self.msg_num) msg = {"Command": "GET_SPECTRA", "Value": None} res = self.msg_func(msg_id, msg, 5)["Res_Value"] if res is not None: self.current_spec = res self.msg_num += 1 self.msg_num %= 8000 callback(Characteristic.RESULT_SUCCESS) def get_current_spectra(self): return self.current_spec def reset_current_spectra(self): self.current_spec = None class Spectrum_Request(Characteristic): def __init__(self, uuid, device): Characteristic.__init__(self, {'uuid': uuid, 'properties': [ 'write'], 'value': None}) self._value = array.array('B',[0] * 0) self.pixel_offset = None self.device = device def onWriteRequest(self, data, offset, withoutResponse, callback): pixel_start_value = int.from_bytes(data, "big") logger.debug(f"Bluetooth: Received request to set pixel offset for spectra {pixel_start_value}.") self.pixel_offset = pixel_start_value callback(Characteristic.RESULT_SUCCESS) def get_current_offset(self): return self.pixel_offset def reset_current_offset(self): self.pixel_offset = None class EEPROM_Cmd(Characteristic): def __init__(self, uuid, device, msg_queue, msg_func): Characteristic.__init__(self, {'uuid': uuid, 'properties': [ 'write'], 'value': None}) self._value = array.array('B', [0] * 0) self.page = None self.subpage = None self.device = device self.msg_queue = msg_queue self.write_buffers = None self.msg_num = 0 self.guid = deepcopy(uuid) self.msg_func = msg_func def onWriteRequest(self, data, offset, withoutResponse, callback): # data comes in as a byte array so it is easy to manipulate page = int(data[0]) subpage = int(data[1]) if page == 0 and subpage == 0: msg_id = self.guid + str(self.msg_num) msg = {'Command': 'EEPROM', 'Value': None} self.write_buffers = self.msg_func(msg_id, msg, 5)["Res_Value"] self.msg_num += 1 self.msg_num %= 8000 self.page = page self.subpage = subpage callback(Characteristic.RESULT_SUCCESS) def get_page(self): return self.page def get_subpage(self): return self.subpage class EEPROM_Data(Characteristic): def __init__(self, uuid, cmd_status, device): Characteristic.__init__(self, {'uuid': uuid, 'properties': ['read', 'notify'], 'value': None}) self.eeprom_cmd = cmd_status self._value = array.array('B', [0] * 0) self._updateValueCallback = None self.device = device def onReadRequest(self, offset, callback): page = self.eeprom_cmd.get_page() subpage = self.eeprom_cmd.get_subpage() logger.debug(f"Bluetooth: Central requested EEPROM read of page {page} and subpage {subpage}") self._value = bytearray(self.eeprom_cmd.write_buffers[page])[(0+16*subpage):(16+16*subpage)] callback(Characteristic.RESULT_SUCCESS, self._value) def onSubscribe(self, maxValueSize, updateValueCallback): logger.debug('Bluetooth: EEPROM Data subscribed to.') slef._updateValueCallback = updateValueCallback class IntegrationTime(Characteristic): def __init__(self, uuid, device, msg_func): Characteristic.__init__(self, {'uuid': uuid, 'properties': ['read', 'write'], 'value': None}) self._value = array.array('B', [0] * 0) self._updateValueCallback = None self.device = device self.msg_num = 0 self.guid = deepcopy(uuid) self.msg_func = msg_func def onReadRequest(self, offset, callback): #logger.debug(offset, callback, self._value) msg_id = self.guid + str(self.msg_num) msg = {"Command": "GET_INT_TIME", "Value": None} self._value = self.msg_func(msg_id, msg ,5)["Res_Value"] self.msg_num += 1 self.msg_num %= 8000 logger.debug(f"Bluetooth: Got integration time of {self._value}") callback(Characteristic.RESULT_SUCCESS, self._value.to_bytes(2, "big")) def onWriteRequest(self, data, offset, withoutResponse, callback): self._value = int.from_bytes(data,"big") msg_id = self.guid + str(self.msg_num) int_value = {"Command": "SET_INT_TIME", "Value": f"{self._value}"} self.msg_func(msg_id, int_value, 5) self.msg_num += 1 self.msg_num %= 8000 logger.debug("Integration time changed to %d ms" % self._value) if self._updateValueCallback: self._updateValueCallback(self._value) callback(Characteristic.RESULT_SUCCESS) def onSubscribe(self, maxValueSize, updateValueCallback): logger.debug("onSubscribe") self._updateValueCallback = updatevalueCallback def onUnsubscribe(self): logger.debug("on unsubscribe") self._updateValueCallback = None class Scans_to_average(Characteristic): def __init__(self, uuid, device): Characteristic.__init__(self, {'uuid': uuid, 'properties': ['read', 'write'], 'value': None}) self._value = array.array('B', [0] * 0) self._updateValueCallback = None self.device = device def onReadRequest(self, offset, callback): logger.debug("Scans to average read called") callback(Characteristic.RESULT_SUCCESS, self._value) def onWriteRequest(self, data, offset, withoutResponse, callback): self._value = data device.change_setting("scans_to_average", data) logger.debug("Scans average changed to %d" %int(data)) if self._updateValueCallback: self._updateValueCallback(self._value) callback(Characteristic.RESULT_SUCCESS) def onSubscribe(self, maxValueSize, updateValueCallback): logger.debug("onSubscribe") self._updateValueCallback = updatevalueCallback def onUnsubscribe(self): logger.debug("on unsubscribe") self._updateValueCallback = None class Read_Spectrum(Characteristic): def __init__(self, uuid, spec_acquire, spec_cmd, device, laser_state): Characteristic.__init__(self, {'uuid': uuid, 'properties': ['read'], 'value': None}) self._value = array.array('B', [0] * 0) self._updateValueCallback = None self.spec_acquire = spec_acquire self.spec_cmd = spec_cmd self.device = device def onReadRequest(self, offset, callback): #logger.debug(self._value, self.value, callback, offset) logger.debug("Bluetooth: Received request to return spectrum that has been taken.") spec_read = self.spec_acquire.get_current_spectra() pixel_offset = self.spec_cmd.get_current_offset() reading = spec_read logger.debug(f"Creating return bytes from reading. Starting at pixel {pixel_offset}.") return_bytes = bytes() if reading is not None: while len(return_bytes) < 180 and pixel_offset < len(reading): pixel_byte_value = int(reading[pixel_offset]).to_bytes(2,"little") return_bytes += pixel_byte_value pixel_offset += 1 return_bytes = pixel_offset.to_bytes(2,"big") + return_bytes else: return_bytes = pixel_offset.to_bytes(2,"big") + return_bytes logger.error(f"Reading was None, so returning null bytes value") logger.debug(f"Finished building return bytes of length {len(return_bytes)} containing up to pixel {pixel_offset}.") callback(Characteristic.RESULT_SUCCESS, return_bytes) def onSubscribe(self, maxValueSize, updateValueCallback): logger.debug("onSubscribe") self._updateValueCallback = updatevalueCallback def onUnsubscribe(self): logger.debug("on unsubscribe") self._updateValueCallback = None class Gain(Characteristic): def __init__(self, uuid, device, msg_func): Characteristic.__init__(self, {'uuid': uuid, 'properties': ['read', 'write'], 'value': None}) self._value = array.array('B', [0] * 0) self._updateValueCallback = None self.device = device self.msg_func = msg_func self.guid = deepcopy(uuid) self.msg_num = 0 def onReadRequest(self, offset, callback): msg_id = self.guid + str(self.msg_num) msg = {"Command": "GET_GAIN", "Value": None} gain = self.msg_func(msg_id, msg, 5)["Res_Value"] self.msg_num += 1 self.msg_num %= 8000 gain = int(gain) logger.debug("Bluetooth: Received device response for gain of {gain}") callback(Characteristic.RESULT_SUCCESS, gain.to_bytes(2, "big")) def onWriteRequest(self, data, offset, withoutResponse, callback): data = bytearray(data) lsb = data[1] msb = data[0] gain = msb + lsb / 256.0 msg_id = self.guid + str(self.msg_num) logger.debug(f"Bluetooth: Updating gain value to {gain}") msg = {"Command": "SET_GAIN", "Value": f"{gain}"} self.msg_func(msg_id, msg, 5) self.msg_num += 1 self.msg_num %= 8000 callback(Characteristic.RESULT_SUCCESS) class Laser_State(Characteristic): def __init__(self, uuid, device, msg_queue, msg_func): Characteristic.__init__(self, {'uuid': uuid, 'properties': ['read', 'write', 'notify'], 'value': None}) self._value = array.array('B', [0] * 0) self._updateValueCallback = None self.raman_mode = False self.device = device self.laser_type = 0 self.laser_enable = False self.laser_watchdog = False self.watchdog_time = 5 self.laser_delay = 300 self.msg_func = msg_func self.guid = deepcopy(uuid) self.msg_num = 0 def disable_laser_error_byte(self): msg_id = self.guid + str(self.msg_num) self.device.hardware.set_laser_enable(False) msg = {"Command": "SET_LASER", "Value": "0"} self.msg_func(msg_id, msg, 0) self.msg_num += 1 self.msg_num %= 8000 logger.warn("Bluetooth: Received an incorrect byte that triggered a laser shut off.") def onReadRequest(self, offset, callback): logger.debug("Bluetooth: Received laser read request.") msg_id = self.guid + str(self.msg_num) msg = {"Command": "GET_RAMAN_MODE", "Value": None} raman_mode = self.msg_func(msg_id, msg, 2)["Res_Value"] if raman_mode == None: logger.error("Got a none value for raman_mode, returning 0") raman_mode = 0 laser_type = 0 msg = {"Command": "GET_LASER_STATE", "Value": None} laser_enable = self.msg_func(msg_id, msg, 2)["Res_Value"] if laser_enable == None: logger.error("Got a none value for laser_enable, returning 0") laser_enable = 0 msg = {"Command": "GET_WATCHDOG_DELAY", "Value": None} laser_watchdog = self.msg_func(msg_id, msg, 2)["Res_Value"] if laser_watchdog == None: logger.error("Got a none value for laser_watchdog, returning 0") laser_watchdog = 0 msg = {"Command": "GET_RAMAN_DELAY", "Value": None} laser_delay = self.msg_func(msg_id, msg, 2)["Res_Value"] if laser_delay == None: logger.error("Got a none value for laser_delay, returning 0") laser_delay = 0 return_bytes = raman_mode.to_bytes(2, "big") + laser_type.to_bytes(2, "big") + laser_enable.to_bytes(2, "big") return_bytes += laser_watchdog.to_bytes(2, "big") + laser_delay.to_bytes(2, "big") self.msg_num += 1 self.msg_num %= 8000 callback(Characteristic.RESULT_SUCCESS, return_bytes) def onWriteRequest(self, data, offset, withoutResponse, callback): logger.debug(f"Bluetooth: Received laser write request with data {data}") msg_id = self.guid + str(self.msg_num) msg_raman = int(data[0]) msg_laser_type = int(data[1]) msg_laser_enable = int(data[2]) msg_laser_watch = int(data[3]) msg_laser_delay = int.from_bytes(data[4:6], "big") logger.debug(f"Bluetooth: Laser message values were Raman mode {msg_raman}, Laser type {msg_laser_type}, Laser enable {msg_laser_enable}, Laser watchdog {msg_laser_watch}, and Laser delay {msg_laser_delay}.") if msg_raman == 0: self.raman_mode = False elif msg_raman == 1: self.raman_mode = True elif msg_raman != 255: self.disable_laser_error_byte() if msg_laser_type == 0: self.laser_type = 0 elif msg_laser_type != 255: self.disable_laser_error_byte() if msg_laser_enable == 0: msg = {"Command": "SET_LASER", "Value": "0"} self.msg_func(msg_id, msg, 0) elif msg_laser_enable == 1: msg = {"Command": "SET_LASER", "Value": "1"} self.msg_func(msg_id, msg, 0) elif msg_laser_enable != 255: self.diable_laser_error_byte() if msg_laser_watch != 255: msg = {"Command": "SET_WATCHDOG", "Value": f"{msg_laser_watch}"} self.msg_func(msg_id, msg, 1) msg = {"Command": "SET_RAMAN_DELAY", "Value": f"{msg_laser_delay}"} self.msg_func(msg_id, msg, 1) self.msg_num += 1 self.msg_num %= 8000 callback(Characteristic.RESULT_SUCCESS) def onSubscribe(self, maxValueSize, updateValueCallback): logger.debug("onSubscribe") self._updateValueCallback = updatevalueCallback def onUnsubscribe(self): logger.debug("on unsubscribe") self._updateValueCallback = None class Detector_ROI(Characteristic): def __init__(self, uuid, device, msg_queue, msg_func): Characteristic.__init__(self, {'uuid': uuid, 'properties': ['read', 'write'], 'value': None}) self._value = array.array('B', [0] * 0) self._updateValueCallback = None self.device = device self.guid = deepcopy(uuid) self.msg_num = 0 self.msg_queue = msg_queue self.msg_func = msg_func def onReadRequest(self, offset, callback): logger.debug("Bluetooth: Received request for detector roi") msg_id = self.guid + str(self.msg_num) msg = {"Command": "GET_ROI", "Value": None} start_roi, end_roi = self.msg_func(msg_id, msg, 5)["Res_Value"] self.msg_num += 1 self.msg_num %= 8000 return_bytes = start_roi.to_bytes(2, "big") + end_roi.to_bytes(2, "big") logger.debug("Bluetooth: returning roi values of {start_roi} and {end_roi}") callback(Characteristic.RESULT_SUCCESS, return_bytes) def onWriteRequest(self, data, offset, withoutResponse, callback): # For enlighten mobile the bytes are coming in cropped # This pads the bytes to the ENG-120 specific 4 in order to get the correct value msg_id = self.guid + str(self.msg_num) while len(data) < 4: data += bytes([0]) start_roi = int.from_bytes(data[0:2], "big") end_roi = int.from_bytes(data[2:4], "big") logger.debug(f"Bluetooth: Received command of data {data} to set roi to {start_roi} and {end_roi}") msg = {"Command": "SET_ROI", "Value": f"{start_roi},{end_roi}"} self.msg_func(msg_id, msg, 5) self.msg_num += 1 self.msg_num %= 8000 callback(Characteristic.RESULT_SUCCESS)
{"hexsha": "d1040f3c26c7ee1433adf949d96591306a74f63b", "size": 18456, "ext": "py", "lang": "Python", "max_stars_repo_path": "Bluetooth/Characteristics.py", "max_stars_repo_name": "WasatchPhotonics/RPi-Communication", "max_stars_repo_head_hexsha": "3dfb695e75f4a5fd84c4a00f8fc5c57519a7884e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Bluetooth/Characteristics.py", "max_issues_repo_name": "WasatchPhotonics/RPi-Communication", "max_issues_repo_head_hexsha": "3dfb695e75f4a5fd84c4a00f8fc5c57519a7884e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Bluetooth/Characteristics.py", "max_forks_repo_name": "WasatchPhotonics/RPi-Communication", "max_forks_repo_head_hexsha": "3dfb695e75f4a5fd84c4a00f8fc5c57519a7884e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.0410022779, "max_line_length": 216, "alphanum_fraction": 0.6192566103, "include": true, "reason": "import numpy", "num_tokens": 4375}
#!/usr/bin/python # -*- coding: utf-8 -*- from PIL import Image import numpy as np #Returns numpy image at size imageSize*imageSize def getProcessedData(img,imageSize): img = img.resize((imageSize,imageSize), resample=Image.ANTIALIAS) imgData = np.asarray(img, dtype=np.uint8).reshape(imageSize,imageSize,1) imgData = imgData/255. return imgData #Returns numpy image at size imageSize*imageSize def getImageData(filename,imageSize): img = Image.open(filename) imgData = getProcessedData(img, imageSize) return imgData
{"hexsha": "35914257719d308fdec3ee722fcfc4a8472cff09", "size": 547, "ext": "py", "lang": "Python", "max_stars_repo_path": "CNN/imageFilesTools.py", "max_stars_repo_name": "jleapeMIT/danceable", "max_stars_repo_head_hexsha": "ddc5584214a334d38532c5a3d0160a5ba4edf118", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "CNN/imageFilesTools.py", "max_issues_repo_name": "jleapeMIT/danceable", "max_issues_repo_head_hexsha": "ddc5584214a334d38532c5a3d0160a5ba4edf118", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-05-07T08:59:31.000Z", "max_issues_repo_issues_event_max_datetime": "2018-05-07T08:59:31.000Z", "max_forks_repo_path": "CNN/imageFilesTools.py", "max_forks_repo_name": "jleapeMIT/danceable", "max_forks_repo_head_hexsha": "ddc5584214a334d38532c5a3d0160a5ba4edf118", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-06-17T23:11:25.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-17T23:11:25.000Z", "avg_line_length": 32.1764705882, "max_line_length": 76, "alphanum_fraction": 0.7440585009, "include": true, "reason": "import numpy", "num_tokens": 137}
[STATEMENT] lemma reach_reach\<^sub>t_fst: "reach \<Sigma> \<delta> q\<^sub>0 = fst ` reach\<^sub>t \<Sigma> \<delta> q\<^sub>0" [PROOF STATE] proof (prove) goal (1 subgoal): 1. reach \<Sigma> \<delta> q\<^sub>0 = fst ` reach\<^sub>t \<Sigma> \<delta> q\<^sub>0 [PROOF STEP] unfolding reach\<^sub>t_def reach_def image_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. {run \<delta> q\<^sub>0 w n |w n. {y. \<exists>x\<in>UNIV. y = w x} \<subseteq> \<Sigma>} = {y. \<exists>x\<in>{run\<^sub>t \<delta> q\<^sub>0 w n |w n. {y. \<exists>x\<in>UNIV. y = w x} \<subseteq> \<Sigma>}. y = fst x} [PROOF STEP] by fastforce
{"llama_tokens": 274, "file": "LTL_to_DRA_DTS", "length": 2}
import pandas as pd import numpy as np from pandas import Series from pandas import DataFrame from statsmodels import regression def init(context): context.hs300 = "000300.XSHG" # window must larger than 64 context.WINDOW = 400 def handle_bar(context, bar_dict): time_series = history_bars(context.hs300, context.WINDOW, '1d', 'close') hurstex = hurst(time_series) #hurstex = Hurst(time_series, 318) plot("hurst", hurstex) ''' if abs(hurstex-0.5)<0.05: order_target_value(context.s,0) elif hurstex>0.5 and : order_target_percent(context.s,1) elif hurst.ex<0.5: ''' ''' # 买入卖出条件需要再次调试 curPosition = context.portfolio.positions[context.hs300].quantity if hurstex > 0.55: if curPosition == 0: order_target_percent(context.hs300, 1) elif hurstex < 0.45: if curPosition > 0: order_target_value(context.hs300, 0) ''' # def hurst(ts): # # if not isinstance(ts, Iterable): # print 'error' # return # # n_min, n_max = 2, len(ts) // 3 # RSlist = [] # for cut in range(n_min, n_max): # children = len(ts) // cut # children_list = [ts[i * children:(i + 1) * children] for i in range(cut)] # L = [] # for a_children in children_list: # Ma = np.mean(a_children) # Xta = Series(map(lambda x: x - Ma, a_children)).cumsum() # Ra = max(Xta) - min(Xta) # Sa = np.std(a_children) # rs = Ra / Sa # L.append(rs) # RS = np.mean(L) # RSlist.append(RS) # return np.polyfit(np.log(range(2 + len(RSlist), 2, -1)), np.log(RSlist), 1)[0] def hurst(history): daily_return = list(Series(history).pct_change())[1:] ranges = ['1', '2', '4', '8', '16', '32'] lag = Series(index=ranges) for i in range(len(ranges)): if i == 0: lag[i] = len(daily_return) else: lag[i] = lag[0] // (2 ** i) ARS = Series(index=ranges) for r in ranges: # RS用来存储每一种分割方式中各个片段的R/S值 RS = list() # 第i个片段 for i in range(int(r)): # 用Range存储每一个片段数据 Range = daily_return[int(i * lag[r]):int((i + 1) * lag[r])] mean = np.mean(Range) Deviation = np.cumsum(Range - mean,axis=0) #Deviation = Range - mean maxi = max(Deviation) mini = min(Deviation) RS.append(maxi - mini) sigma = np.std(Range) RS[i] = RS[i] / sigma ARS[r] = np.mean(RS) lag = np.log10(lag) ARS = np.log10(ARS) hurst_exponent = np.polyfit(lag, ARS, 1) hurst = hurst_exponent[0] return hurst # def Hurst(XX,T): # XX = np.array(XX) #读入时间序列,一维矩阵 # Lenth = XX.shape[0] #读取时间序列的总长度, # hurst = np.zeros(Lenth) # for i in xrange(T,Lenth): # X=XX[i-T:i+1] # RS = np.zeros(T) #array # logRS = np.zeros(T) #array # logn = np.zeros(T) #array # for n in xrange(10,T): #每个长度计算一次 # a = int(T/n) # x1 = X[0:n*a].reshape(n,a) #正向分段,n行a列,每一列为一个子序列 # x2 = X[(T - a * n): T].reshape(n,a) #反向分段,n行a列,每一列为一个子序列 # m1 = np.mean(x1,axis=0) #按列取均值,1行a列 # m2 = np.mean(x2,axis=0) #按列取均值,1行a列 # p = np.ones((n,1)) #n行,1列 # y1 = x1 - p * m1 #n行,a列,对每一列求离差 # y2 = x2 - p * m2 #n行,a列,对每一列求离差 # sig1 = np.std(x1,axis=0) #1行,a列,对每一列求标准差 # sig2 = np.std(x2,axis=0) #1行,a列,对每一列求标准差 # sum1 = np.cumsum(y1,axis=0) #n行,a列,求累计离差 # sum2 = np.cumsum(y2,axis=0) #n行,a列,求累计离差 # r1 = np.max(sum1,axis=0) - np.min(sum1,axis=0) #%1行,a列 # r2 = np.max(sum2,axis=0) - np.min(sum2,axis=0) #%1行,a列 # RS1[n] = np.mean(r1 / sig1,axis=0) #1行,1列 # RS2[n] = np.mean(r2 / sig2,axis=0) #1行,1列 # RS[n] = 0.5*RS1[n] + 0.5*RS2[n] # logRS[n] = np.log(RS[n]) # logn[n] = np.log(n) # R = regression.linear_model.OLS(logRS[10:T],logn[10:T]).fit() # hurst[i] = R.params[0] # return hurst
{"hexsha": "b62f2909b3fe4d1a903e38e979eb54591e394b75", "size": 4307, "ext": "py", "lang": "Python", "max_stars_repo_path": "rqalpha/strategy/hurst.py", "max_stars_repo_name": "quantModel/Rqalpha-myquant-learning", "max_stars_repo_head_hexsha": "5dc39c6d8f6d89bb89350ef64c860cb53c369c9f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 44, "max_stars_repo_stars_event_min_datetime": "2017-06-17T09:43:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-02T11:08:03.000Z", "max_issues_repo_path": "rqalpha/strategy/hurst.py", "max_issues_repo_name": "quantModel/Rqalpha-myquant-learning", "max_issues_repo_head_hexsha": "5dc39c6d8f6d89bb89350ef64c860cb53c369c9f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2017-08-26T13:10:17.000Z", "max_issues_repo_issues_event_max_datetime": "2018-06-14T12:28:08.000Z", "max_forks_repo_path": "rqalpha/strategy/hurst.py", "max_forks_repo_name": "DingTobest/Rqalpha-myquant-learning", "max_forks_repo_head_hexsha": "5dc39c6d8f6d89bb89350ef64c860cb53c369c9f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 28, "max_forks_repo_forks_event_min_datetime": "2017-06-26T10:06:29.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-17T04:26:04.000Z", "avg_line_length": 32.8778625954, "max_line_length": 84, "alphanum_fraction": 0.5045275134, "include": true, "reason": "import numpy,from statsmodels", "num_tokens": 1518}
""" MinOver algorithm to find a point inside a polytope. Francesc Font-Clos Oct 2018 """ import numpy as np class MinOver(object): """MinOver solver.""" def __init__(self, polytope, ): """ Create a MinOver solver. Parameters ---------- polytope: hitandrun.polytope Polytope in H-representation """ self.polytope = polytope def run(self, speed=1, starting_point=None, max_iters=100, verbose=False): """ Run the MinOver algorithm. Parameters ---------- speed: float Distance moved at each learning step max_iters: int Maximum number of iterations (per hyperplan). starting_poin: np.array Initial condition. Returns ------- current: np.array The final point. convergence: bool True if the algorithm converged, False, otherwise. """ self.max_iters = max_iters * self.polytope.nplanes self.speed = speed if starting_point is None: self.current = np.zeros(self.polytope.dim) else: self.current = starting_point # compute step 0 worst planes # this is a trick to handle first steps self.worst_indexes = [-1, -2] self.worst_distances = [-1, -2] self._set_worst_constraint() for i in range(self.max_iters): convergence = self._step() self.iter = i self._check_speed() if verbose: self._print_worst() if convergence: break return self.current, convergence def _step(self): self._move_towards_worst_plane() self._set_worst_constraint() return np.all(self.distances < 0) def _check_speed(self): i0, i1, i2 = self.worst_indexes[::-1][:3] d0, d1, d2 = self.worst_distances[::-1][:3] if i0 != i1 and i0 == i2 and d0 >= d2: self.speed *= 0.9 def _set_worst_constraint(self): self.distances = self.polytope.A @ self.current - self.polytope.b self.worst = np.argmax(self.distances) self.worst_indexes.append(self.worst) self.worst_distances.append(self.distances[self.worst]) def _move_towards_worst_plane(self): self.current = self.current - self.speed * self.polytope.A[self.worst] def _print_worst(self): worst_distance = self.distances[self.worst] print("iter", self.iter, "index:", self.worst, "distance:", worst_distance, "speed:", self.speed)
{"hexsha": "4c00ffcb169d0a7e43cb26f29e2ecbd87c2be0c3", "size": 2665, "ext": "py", "lang": "Python", "max_stars_repo_path": "hitandrun/minover.py", "max_stars_repo_name": "fontclos/hitandrun", "max_stars_repo_head_hexsha": "00c29424acfee685208301e5f16d2782325733ff", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2019-11-04T23:45:31.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T10:22:32.000Z", "max_issues_repo_path": "hitandrun/minover.py", "max_issues_repo_name": "fontclos/hitandrun", "max_issues_repo_head_hexsha": "00c29424acfee685208301e5f16d2782325733ff", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2019-11-05T00:35:02.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-14T14:07:42.000Z", "max_forks_repo_path": "hitandrun/minover.py", "max_forks_repo_name": "fontclos/hitandrun", "max_forks_repo_head_hexsha": "00c29424acfee685208301e5f16d2782325733ff", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-11-04T23:45:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-19T12:09:03.000Z", "avg_line_length": 28.6559139785, "max_line_length": 78, "alphanum_fraction": 0.5722326454, "include": true, "reason": "import numpy", "num_tokens": 618}
################################### # Script : # 1) Contains class to generate XL-MS # plots # 2) Inherits from CX class # # ganesans - Salilab - UCSF # ganesans@salilab.org ################################### import pandas as pd import glob import sys,os,math,itertools import numpy as np import pandas as pd from validation import sas, get_input_information,cx from bokeh.io import output_file, show, curdoc, export_png, export_svgs from bokeh.models import Span,ColumnDataSource, LinearAxis, Legend, FactorRange from bokeh.palettes import GnBu3, RdBu,OrRd3,Blues,YlOrBr, Spectral6, Set1 from bokeh.plotting import figure, output_file, save from bokeh.models.widgets import Tabs, Panel from bokeh.layouts import row,column,gridplot class cx_validation_plots(cx.cx_validation): def __init__(self,mmcif_file): super().__init__(mmcif_file) self.ID=str(get_input_information.get_id(self)) self.xl_df=cx.cx_validation.get_xl_data(self) self.model_df=cx.cx_validation.get_df_for_models(self) self.filename = os.path.join('Output/images//') self.filename_add = os.path.join('static/images//') def plot_linker_dist_I(self,df,intra=1,key='Intra'): ''' plot distance distribution per linker based on inter and intra links ''' for i in df['Linker'].unique(): df_c=df[df['Linker']==i] if i=='DSS': loc=30 elif i=='EDC': loc=20 else: loc=30 output_file(self.ID+i+"linker.html",mode="inline") measured=df_c[df_c['Intra']==intra]['dist'] hist, edges = np.histogram(measured, density=False, bins=50) #hist_l, edges_l = np.histogram(measured, density=False, bins=25) p = figure(title=key+'-molecular distances/Linker '+i, plot_height=400, plot_width=400) p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:], fill_color="navy", line_color="white", alpha=0.3) #p.line(edges, hist, line_color="navy", line_width=4, alpha=0.7, legend_label=key+"/"+i) vline = Span(location=loc, dimension='height', line_color='red', line_width=3,line_dash='dashed') p.renderers.extend([vline]) p.xaxis.major_label_text_font_size="14pt" p.yaxis.major_label_text_font_size="14pt" p.title.text_font_size='12pt' p.title.align="center" p.title.vertical_align='top' p.xaxis.axis_label = 'Distance \u212B' p.xaxis.axis_label_text_font_size='14pt' p.yaxis.axis_label = 'Number of cross-links' p.yaxis.axis_label_text_font_size='14pt' p.output_backend="svg" save(p,filename=self.filename+'/'+self.ID+i+key+"linker.html") export_svgs(p,filename=self.filename+'/'+self.ID+i+key+"linker.svg") save(p,filename=self.filename_add+'/'+self.ID+i+key+"linker.html") export_svgs(p,filename=self.filename_add+'/'+self.ID+i+key+"linker.svg") def plot_linker_dist_S(self,df,struc=1,key='Structured'): ''' plot distance distribution per linker based on structured/unstrcutured/between struc&unstruc ''' for i in df['Linker'].unique(): df_c=df[df['Linker']==i] if i=='DSS': loc=30 elif i=='EDC': loc=20 else: loc=30 output_file(self.ID+i+"linker.html",mode="inline") measured=df_c[df_c['Structured']==struc]['dist'] hist, edges = np.histogram(measured, density=False, bins=50) #hist_l, edges_l = np.histogram(measured, density=False, bins=25) p = figure(title=key+ ' regions/Linker '+i, plot_height=350, plot_width=350) p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:], fill_color="navy", line_color="white", alpha=0.3) #p.line(edges, hist, line_color="navy", line_width=4, alpha=0.7, legend_label=key+"/"+i) vline = Span(location=loc, dimension='height', line_color='red', line_width=3,line_dash='dashed') p.renderers.extend([vline]) p.xaxis.major_label_text_font_size="14pt" p.yaxis.major_label_text_font_size="14pt" p.title.text_font_size='12pt' p.title.align="center" p.title.vertical_align='top' p.xaxis.axis_label = 'Distance \u212B' p.xaxis.axis_label_text_font_size='14pt' p.yaxis.axis_label = 'Number of cross-links' p.yaxis.axis_label_text_font_size='14pt' p.output_backend="svg" save(p,filename=self.filename+'/'+self.ID+i+key+"linker.html") export_svgs(p,filename=self.filename+'/'+self.ID+i+key+"linker.svg") save(p,filename=self.filename_add+'/'+self.ID+i+key+"linker.html") export_svgs(p,filename=self.filename_add+'/'+self.ID+i+key+"linker.svg") def plot_intra_summary_deprecated(self): ''' plot summary of intra/inter xl-ms satisfaction ''' for model_id,df in self.model_df.items(): factors=self.get_factors(df) regions = ['Satisfied', 'Violated'] source = ColumnDataSource(data=dict( x=factors, Satisfied=self.get_satisfied(df,factors), Violated=self.get_violated(df,factors), )) fig_id = figure(x_range=FactorRange(*factors), plot_height=400,plot_width=500, title='CX-MS Satisfaction for model:'+str(model_id)) fig_id.vbar_stack(regions, x='x', width=0.9, alpha=0.5, color=["blue", "red"], source=source, legend_label=regions) fig_id.xaxis.major_label_text_font_size="14pt" fig_id.yaxis.major_label_text_font_size="14pt" fig_id.yaxis.axis_label_text_font_size='14pt' fig_id.title.text_font_size='12pt' fig_id.title.align="center" fig_id.title.vertical_align='top' fig_id.yaxis.axis_label = 'Number of cross-links' fig_id.y_range.start = 0 fig_id.y_range.end = df.shape[0] fig_id.x_range.range_padding = 0.1 fig_id.xaxis.major_label_orientation = 1 fig_id.xgrid.grid_line_color = None fig_id.legend.location = "top_center" fig_id.legend.orientation = "horizontal" fig_id.output_backend="svg" save(fig_id,filename=self.filename+'/'+self.ID+str(model_id)+"IS.html") export_svgs(fig_id,filename=self.filename+'/'+self.ID+str(model_id)+"IS.svg") save(fig_id,filename=self.filename_add+'/'+self.ID+str(model_id)+"IS.html") export_svgs(fig_id,filename=self.filename_add+'/'+self.ID+str(model_id)+"IS.svg") def plot_distributions(self): ''' plot inter and intra distance distributions ''' for model_id,df in self.model_df.items(): self.plot_linker_dist_I(df,intra=1,key='Intra') self.plot_linker_dist_I(df,intra=0,key='Inter') self.plot_linker_dist_S(df,struc=1,key='Structured') self.plot_linker_dist_S(df,struc=0,key='Unstructured') self.plot_linker_dist_S(df,struc=2,key='Intermediate') def get_factors(self,df): ''' get grouped inter/intra factors for stacked bar plot ''' link=df['Linker'].unique() xl=['Inter','Intra'] factors=list(itertools.product(link, xl)) return factors def get_factors_struc(self,df): ''' get grouped struc factors for stacked bar plot ''' link=df['Linker'].unique() xl=['Structured','Unstructured','Intermediate'] factors=list(itertools.product(link, xl)) return factors def get_satisfied(self,df,factors): ''' get satisfied list for stacked bar plot;inter/intra info ''' Satisfied=[] for i in factors: df_1=df[df['Linker']==i[0]] df_2=df_1[df_1[i[1]]==1] Satisfied.append(df_2[df_2['Satisfied']==1].shape[0]) return Satisfied def get_satisfied_struc(self,df,factors): ''' get satisfied list for stacked bar plot;struc info ''' Satisfied=[] struc_dict={'Structured':1,'Unstructured':0,'Intermediate':2} for i in factors: df_1=df[df['Linker']==i[0]] df_2=df_1[df_1['Structured']==struc_dict[i[1]]] Satisfied.append(df_2[df_2['Satisfied']==1].shape[0]) return Satisfied def get_violated(self,df,factors): ''' get violated list for stacked bar plot;inter/intra info ''' Violated=[] for i in factors: df_1=df[df['Linker']==i[0]] df_2=df_1[df_1[i[1]]==1] Violated.append(df_2[df_2['Satisfied']==0].shape[0]) return Violated def get_violated_struc(self,df,factors): ''' get violated list for stacked bar plot;struc info ''' struc_dict={'Structured':1,'Unstructured':0,'Intermediate':2} Violated=[] for i in factors: df_1=df[df['Linker']==i[0]] df_2=df_1[df_1['Structured']==struc_dict[i[1]]] Violated.append(df_2[df_2['Satisfied']==0].shape[0]) return Violated def make_gridplot_intra(self): ''' make gridplot;inter/intra info ''' grid=[] for model_id,df in self.model_df.items(): grid.append(self.plot_intra_summary(df,model_id)) gridP=gridplot(grid, ncols=len(grid)) save(gridP,filename=self.filename+'/'+self.ID+"IS.html") export_png(gridP,filename=self.filename+'/'+self.ID+"IS.png") save(gridP,filename=self.filename_add+'/'+self.ID+"IS.html") export_png(gridP,filename=self.filename_add+'/'+self.ID+"IS.png") def plot_intra_summary(self,df,model_id): ''' plot summary stats for inter/intra data ''' factors=self.get_factors(df) regions = ['Satisfied', 'Violated'] source = ColumnDataSource(data=dict( x=factors, Satisfied=self.get_satisfied(df,factors), Violated=self.get_violated(df,factors), )) fig_id = figure(x_range=FactorRange(*factors), plot_height=300,plot_width=350, title='Model:'+str(model_id)) fig_id.vbar_stack(regions, x='x', width=0.9, alpha=0.5, color=["blue", "red"], source=source, legend_label=regions) fig_id.xaxis.major_label_text_font_size="12pt" fig_id.yaxis.major_label_text_font_size="12pt" fig_id.yaxis.axis_label_text_font_size='12pt' fig_id.title.text_font_size='12pt' fig_id.title.align="center" fig_id.title.vertical_align='top' fig_id.yaxis.axis_label = 'Number of cross-links' #fig_id.y_range.start = 0 #fig_id.y_range.end = df.shape[0] fig_id.x_range.range_padding = 0.1 fig_id.xaxis.major_label_orientation = 1 fig_id.xgrid.grid_line_color = None fig_id.legend.location = "top_center" fig_id.legend.orientation = "horizontal" return fig_id def make_gridplot_struc(self): ''' plot grid plot for struc info ''' grid=[] for model_id,df in self.model_df.items(): grid.append(self.plot_struc_summary(df,model_id)) gridP=gridplot(grid, ncols=len(grid)) save(gridP,filename=self.filename+'/'+self.ID+"SS.html") export_png(gridP,filename=self.filename+'/'+self.ID+"SS.png") save(gridP,filename=self.filename_add+'/'+self.ID+"SS.html") export_png(gridP,filename=self.filename_add+'/'+self.ID+"SS.png") def plot_struc_summary(self,df,model_id): ''' plot summary stats: struc/unstruc/intermediate ''' factors=self.get_factors_struc(df) regions = ['Satisfied', 'Violated'] source = ColumnDataSource(data=dict( x=factors, Satisfied=self.get_satisfied_struc(df,factors), Violated=self.get_violated_struc(df,factors), )) fig_id = figure(x_range=FactorRange(*factors), plot_height=350, plot_width=400, title='Model:'+str(model_id)) fig_id.vbar_stack(regions, x='x', width=0.9, alpha=0.5, color=["blue", "red"], source=source, legend_label=regions) fig_id.xaxis.major_label_text_font_size="12pt" fig_id.yaxis.major_label_text_font_size="12pt" fig_id.yaxis.axis_label_text_font_size='12pt' fig_id.title.text_font_size='12pt' fig_id.title.align="center" fig_id.title.vertical_align='top' fig_id.yaxis.axis_label = 'Number of cross-links' #fig_id.y_range.start = 0 #fig_id.y_range.end = df.shape[0] fig_id.x_range.range_padding = 0.1 fig_id.xaxis.major_label_orientation = 1 fig_id.xgrid.grid_line_color = None fig_id.legend.location = "top_center" fig_id.legend.orientation = "horizontal" return fig_id
{"hexsha": "72334d33c606b6c1b4c00f77e141ced1f500fd6f", "size": 11434, "ext": "py", "lang": "Python", "max_stars_repo_path": "master/pyext/src/validation/cx_plots.py", "max_stars_repo_name": "salilab/IHMValidation", "max_stars_repo_head_hexsha": "ddf1a080a4b7f66c2f067312f5f4a5c6584848d1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "master/pyext/src/validation/cx_plots.py", "max_issues_repo_name": "salilab/IHMValidation", "max_issues_repo_head_hexsha": "ddf1a080a4b7f66c2f067312f5f4a5c6584848d1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 23, "max_issues_repo_issues_event_min_datetime": "2020-12-09T22:27:29.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T18:01:43.000Z", "max_forks_repo_path": "master/pyext/src/validation/cx_plots.py", "max_forks_repo_name": "salilab/IHMValidation", "max_forks_repo_head_hexsha": "ddf1a080a4b7f66c2f067312f5f4a5c6584848d1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-21T22:55:24.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T22:55:24.000Z", "avg_line_length": 36.4140127389, "max_line_length": 100, "alphanum_fraction": 0.705439916, "include": true, "reason": "import numpy", "num_tokens": 3335}
import os,sys,glob,time import obspy import scipy import pycwt import pyasdf import datetime import numpy as np import pandas as pd from obspy.signal.invsim import cosine_taper from obspy.signal.regression import linear_regression from scipy.fftpack import fft,ifft,next_fast_len from seisgo import stacking as stack from seisgo.types import CorrData, FFTData from seisgo import utils ##### ######################################################## ################ CROSS-CORRELATE FUNCTIONS ################## ######################################################## def cc_memory(inc_hours,sps,nsta,ncomp,cc_len,cc_step): """ Estimates the memory usage with given correlation parameters, assuming float 32. """ nseg_chunk = int(np.floor((3600*inc_hours-cc_len)/cc_step))+1 npts_chunk = int(nseg_chunk*cc_len*sps) memory_size = nsta*npts_chunk*4/1024/1024/1024**ncomp return memory_size def compute_fft(trace,win_len,step,stainv=None, freqmin=None,freqmax=None,time_norm='no',freq_norm='no', smooth=20,smooth_spec=None,misc=dict(),taper_frac=0.05,df=None): """ Call FFTData to build the object. This is an alternative of directly call FFTData(). The motivation of this function is to provide an user interface to build FFTData object. """ return FFTData(trace=trace,win_len=win_len,step=step, stainv=stainv,freqmin=freqmin,freqmax=freqmax,time_norm=time_norm, freq_norm=freq_norm,smooth=smooth,smooth_spec=smooth_spec,misc=misc, taper_frac=taper_frac,df=df) #assemble FFT with given asdf file name def assemble_fft(sfile,win_len,step,freqmin=None,freqmax=None, time_norm='no',freq_norm='no',smooth=20,smooth_spec=20, taper_frac=0.05,df=None,exclude_chan=[None],v=True): #only deal with ASDF format for now. # retrive station information ds=pyasdf.ASDFDataSet(sfile,mpi=False,mode='r') sta_list = ds.waveforms.list() nsta=len(sta_list) print('found %d stations in total'%nsta) fftdata_all=[] if nsta==0: print('no data in %s'%sfile); return fftdata_all # loop through all stations print('working on file: '+sfile.split('/')[-1]) for ista in sta_list: # get station and inventory try: inv1 = ds.waveforms[ista]['StationXML'] except Exception as e: print('abort! no stationxml for %s in file %s'%(ista,sfile)) continue # get days information: works better than just list the tags all_tags = ds.waveforms[ista].get_waveform_tags() if len(all_tags)==0:continue #----loop through each stream---- for itag in all_tags: if v:print("FFT for station %s and trace %s" % (ista,itag)) # read waveform data source = ds.waveforms[ista][itag] if len(source)==0:continue # channel info comp = source[0].stats.channel if comp[-1] =='U': comp.replace('U','Z') #exclude some channels in the exclude_chan list. if comp in exclude_chan: print(comp+" is in the exclude_chan list. Skip it!") continue fftdata=FFTData(source,win_len,step,stainv=inv1, time_norm=time_norm,freq_norm=freq_norm, smooth=smooth,freqmin=freqmin,freqmax=freqmax, smooth_spec=smooth_spec,taper_frac=taper_frac,df=df) if fftdata.data is not None: fftdata_all.append(fftdata) #### return fftdata_all def smooth_source_spect(fft1,cc_method,sn): ''' this function smoothes amplitude spectrum of the 2D spectral matrix. (used in S1) PARAMETERS: --------------------- cc_para: dictionary containing useful cc parameters fft1: source spectrum matrix RETURNS: --------------------- sfft1: complex numpy array with normalized spectrum ''' smoothspect_N = sn #cc_para['smoothspect_N'] N=fft1.shape[0] Nfft2=fft1.shape[1] fft1=fft1.reshape(fft1.size) if cc_method == 'deconv': #-----normalize single-station cc to z component----- temp = utils.moving_ave(np.abs(fft1),smoothspect_N) try: sfft1 = fft1/temp**2 except Exception: raise ValueError('smoothed spectrum has zero values') elif cc_method == 'coherency': temp = utils.moving_ave(np.abs(fft1),smoothspect_N) try: sfft1 = fft1/temp except Exception: raise ValueError('smoothed spectrum has zero values') elif cc_method == 'xcorr': sfft1 = fft1 else: raise ValueError('no correction correlation method is selected at L59') return sfft1.reshape(N,Nfft2) # def do_correlation(sfile,win_len,step,maxlag,cc_method='xcorr',acorr_only=False, xcorr_only=False,substack=False,substack_len=None,smoothspect_N=20, maxstd=10,freqmin=None,freqmax=None,time_norm='no',freq_norm='no', smooth_N=20,exclude_chan=[None],outdir='.',v=True): """ Wrapper for computing correlation functions. It includes two key steps: 1) compute and assemble the FFT of all data in the sfile, into a list of FFTData objects; 2) loop through the FFTData object list and do correlation (auto or xcorr) for each source-receiver pair. ====RETURNS==== ndata: the number of station-component pairs in the sfile, that have been processed. """ if win_len in [1,2,3]: print("!!!WARNING: you may call do_correlation() in the old way with the 2nd argument as the ncomp info.") print(" This may cause errors with arguments getting the wrong values. In this version and later,") print(" ncomp is deprecated. No change for other arguments. This warning will be removed in") print(" versions v0.7.x and later.") if acorr_only and xcorr_only: raise ValueError('acorr_only and xcorr_only CAN NOT all be True.') tname = sfile.split('/')[-1] tmpfile = os.path.join(outdir,tname.split('.')[0]+'.tmp') if not os.path.isdir(outdir):os.makedirs(outdir) #file to store CC results. outfile=os.path.join(outdir,tname) # check whether time chunk been processed or not if os.path.isfile(tmpfile): ftemp = open(tmpfile,'r') alines = ftemp.readlines() if len(alines) and alines[-1] == 'done': return 0 else: ftemp.close() os.remove(tmpfile) if os.path.isfile(outfile): os.remove(outfile) ftmp = open(tmpfile,'w') ##############compute FFT############# fftdata=assemble_fft(sfile,win_len,step,freqmin=freqmin,freqmax=freqmax, time_norm=time_norm,freq_norm=freq_norm,smooth=smooth_N,exclude_chan=exclude_chan) ndata=len(fftdata) #############PERFORM CROSS-CORRELATION################## if v: print(tname) iend=ndata for iiS in range(ndata): # get index right for auto/cross correlation istart=iiS; src=fftdata[iiS].net+"."+fftdata[iiS].sta # if acorr_only:iend=np.minimum(iiS+ncomp,ndata) # if xcorr_only:istart=np.minimum(iiS+ncomp,ndata) #-----------now loop III for each receiver B---------- for iiR in range(istart,iend): # if v:print('receiver: %s %s' % (fftdata[iiR].net,fftdata[iiR].sta)) rcv=fftdata[iiR].net+"."+fftdata[iiR].sta if (acorr_only and src==rcv) or (xcorr_only and src != rcv) or (not acorr_only and not xcorr_only): if fftdata[iiS].data is not None and fftdata[iiR].data is not None: if v:print('receiver: %s %s' % (fftdata[iiR].net,fftdata[iiR].sta)) corrdata=correlate(fftdata[iiS],fftdata[iiR],maxlag,method=cc_method,substack=substack, smoothspect_N=smoothspect_N,substack_len=substack_len, maxstd=maxstd) if corrdata.data is not None: corrdata.to_asdf(file=outfile) # create a stamp to show time chunk being done ftmp.write('done') ftmp.close() return ndata def correlate(fftdata1,fftdata2,maxlag,method='xcorr',substack=False, substack_len=None,smoothspect_N=20,maxstd=10,terror=0.01): ''' this function does the cross-correlation in freq domain and has the option to keep sub-stacks of the cross-correlation if needed. it takes advantage of the linear relationship of ifft, so that stacking is performed in spectrum domain first to reduce the total number of ifft. PARAMETERS: --------------------- fftdata1: FFTData for the source station fftdata2: FFTData of the receiver station maxlag: maximum lags to keep in the cross correlation method: cross-correlation methods selected by the user terror: 0-1 fraction of timing error in searching for overlapping. The timing error = terror*dt RETURNS: --------------------- corrdata: CorrData object of cross-correlation functions in time domain ''' corrdata=CorrData() #check overlapping timestamps before any other processing #this step is required when there are gaps in the data. ind1,ind2=utils.check_overlap(fftdata1.time,fftdata2.time,error=terror*fftdata1.dt) if not len(ind1): print('no overlapped timestamps in the data.') return corrdata #---------- check the existence of earthquakes by std of the data.---------- source_std = fftdata1.std[ind1] sou_ind = np.where((source_std<maxstd)&(source_std>0)&(np.isnan(source_std)==0))[0] if not len(sou_ind): return corrdata receiver_std = fftdata2.std[ind2] rec_ind = np.where((receiver_std<maxstd)&(receiver_std>0)&(np.isnan(receiver_std)==0))[0] if not len(rec_ind): return corrdata bb=np.intersect1d(sou_ind,rec_ind) if len(bb)==0:return corrdata bb_data1=[ind1[i] for i in bb] bb_data2=[ind2[i] for i in bb] #----load paramters---- dt = fftdata1.dt cc_len = fftdata1.win_len cc_step = fftdata1.step if substack_len is None: substack_len=cc_len Nfft = fftdata1.Nfft Nfft2 = Nfft//2 fft1=np.conj(fftdata1.data[bb_data1,:Nfft2]) #get the conjugate of fft1 nwin = fft1.shape[0] fft2=fftdata2.data[bb_data2,:Nfft2] timestamp=fftdata1.time[bb_data1] if method != "xcorr": fft1 = smooth_source_spect(fft1,method,smoothspect_N) #------convert all 2D arrays into 1D to speed up-------- corr = np.zeros(nwin*Nfft2,dtype=np.complex64) corr = fft1.reshape(fft1.size,)*fft2.reshape(fft2.size,) if method == "coherency": temp = utils.moving_ave(np.abs(fft2.reshape(fft2.size,)),smoothspect_N) corr /= temp corr = corr.reshape(nwin,Nfft2) if substack: if substack_len == cc_len: # choose to keep all fft data for a day s_corr = np.zeros(shape=(nwin,Nfft),dtype=np.float32) # stacked correlation ampmax = np.zeros(nwin,dtype=np.float32) n_corr = np.zeros(nwin,dtype=np.int16) # number of correlations for each substack t_corr = timestamp # timestamp crap = np.zeros(Nfft,dtype=np.complex64) for i in range(nwin): n_corr[i]= 1 crap[:Nfft2] = corr[i,:] crap[:Nfft2] = crap[:Nfft2]-np.mean(crap[:Nfft2]) # remove the mean in freq domain (spike at t=0) crap[-(Nfft2)+1:] = np.flip(np.conj(crap[1:(Nfft2)]),axis=0) crap[0]=complex(0,0) s_corr[i,:] = np.real(np.fft.ifftshift(scipy.fftpack.ifft(crap, Nfft, axis=0))) # remove abnormal data ampmax = np.max(s_corr,axis=1) tindx = np.where( (ampmax<20*np.median(ampmax)) & (ampmax>0))[0] s_corr = s_corr[tindx,:] t_corr = t_corr[tindx] n_corr = n_corr[tindx] else: # get time information Ttotal = timestamp[-1]-timestamp[0] # total duration of what we have now tstart = timestamp[0] nstack = int(np.round(Ttotal/substack_len)) ampmax = np.zeros(nstack,dtype=np.float32) s_corr = np.zeros(shape=(nstack,Nfft),dtype=np.float32) n_corr = np.zeros(nstack,dtype=np.int) t_corr = np.zeros(nstack,dtype=np.float) crap = np.zeros(Nfft,dtype=np.complex64) for istack in range(nstack): # find the indexes of all of the windows that start or end within itime = np.where( (timestamp >= tstart) & (timestamp < tstart+substack_len) )[0] if len(itime)==0:tstart+=substack_len;continue crap[:Nfft2] = np.mean(corr[itime,:],axis=0) # linear average of the correlation crap[:Nfft2] = crap[:Nfft2]-np.mean(crap[:Nfft2]) # remove the mean in freq domain (spike at t=0) crap[-(Nfft2)+1:]=np.flip(np.conj(crap[1:(Nfft2)]),axis=0) crap[0]=complex(0,0) s_corr[istack,:] = np.real(np.fft.ifftshift(scipy.fftpack.ifft(crap, Nfft, axis=0))) n_corr[istack] = len(itime) # number of windows stacks t_corr[istack] = tstart # save the time stamps tstart += substack_len #print('correlation done and stacked at time %s' % str(t_corr[istack])) # remove abnormal data ampmax = np.max(s_corr,axis=1) tindx = np.where( (ampmax<20*np.median(ampmax)) & (ampmax>0))[0] s_corr = s_corr[tindx,:] t_corr = t_corr[tindx] n_corr = n_corr[tindx] else: # average daily cross correlation functions ampmax = np.max(corr,axis=1) tindx = np.where( (ampmax<20*np.median(ampmax)) & (ampmax>0))[0] n_corr = nwin s_corr = np.zeros(Nfft,dtype=np.float32) t_corr = timestamp[0] crap = np.zeros(Nfft,dtype=np.complex64) crap[:Nfft2] = np.mean(corr[tindx],axis=0) crap[:Nfft2] = crap[:Nfft2]-np.mean(crap[:Nfft2],axis=0) crap[-(Nfft2)+1:]=np.flip(np.conj(crap[1:(Nfft2)]),axis=0) s_corr = np.real(np.fft.ifftshift(scipy.fftpack.ifft(crap, Nfft, axis=0))) # trim the CCFs in [-maxlag maxlag] t = np.arange(-Nfft2+1, Nfft2)*dt ind = np.where(np.abs(t) <= maxlag)[0] if s_corr.ndim==1: s_corr = s_corr[ind] elif s_corr.ndim==2: s_corr = s_corr[:,ind] ### call CorrData to build the object cc_comp= fftdata1.chan[-1]+fftdata2.chan[-1] dist,azi,baz = obspy.geodetics.base.gps2dist_azimuth(fftdata1.lat,fftdata1.lon,fftdata2.lat,fftdata2.lon) corrdata=CorrData(net=[fftdata1.net,fftdata2.net],sta=[fftdata1.sta,fftdata2.sta],\ loc=[fftdata1.loc,fftdata2.loc],chan=[fftdata1.chan,fftdata2.chan],\ lon=[fftdata1.lon,fftdata2.lon],lat=[fftdata1.lat,fftdata2.lat],\ ele=[fftdata1.ele,fftdata2.ele],cc_comp=cc_comp,lag=maxlag,\ dt=fftdata1.dt,cc_len=cc_len,cc_step=cc_step,dist=dist/1000,az=azi,\ baz=baz,time=t_corr,data=s_corr,substack=substack,\ side="A",misc={"cc_method":method,"dist_unit":"km"}) return corrdata def do_stacking(ccfiles,pairlist=None,outdir='./STACK',method=['linear'], rotation=False,correctionfile=None,flag=False,keep_substack=False, to_egf=False): # source folder if pairlist is None: pairlist,netsta_all=get_stationpairs(ccfiles,False) if len(ccfiles)==0: raise IOError('Abort! no available CCF data for stacking') for s in netsta_all: tmp = os.path.join(outdir,s) if not os.path.isdir(tmp):os.mkdir(tmp) if isinstance(pairlist,str):pairlist=[pairlist] if not os.path.isdir(outdir):os.makedirs(outdir) if rotation: enz_system = ['EE','EN','EZ','NE','NN','NZ','ZE','ZN','ZZ'] rtz_components = ['ZR','ZT','ZZ','RR','RT','RZ','TR','TT','TZ'] for pair in pairlist: ttr = pair.split('_') snet,ssta = ttr[0].split('.') rnet,rsta = ttr[1].split('.') idir = ttr[0] # continue when file is done toutfn = os.path.join(outdir,idir+'/'+pair+'.tmp') if os.path.isfile(toutfn):continue if flag:print('assembling all corrdata ...') t0=time.time() corrdict_all=dict() #all components for the single station pair txtract=np.zeros(len(ccfiles),dtype=np.float32) tmerge=np.zeros(len(ccfiles),dtype=np.float32) tparameters=None for i,ifile in enumerate(ccfiles): # tt00=time.time() corrdict=extract_corrdata(ifile,pair=pair) # txtract[i]=time.time()-tt00 if len(list(corrdict.keys()))>0: comp_list=list(corrdict[pair].keys()) if len(comp_list)==0: continue elif len(comp_list) >9: print(comp_list) raise ValueError('more than 9 cross-component exists for %s %s! please double check'%(ifile,pair)) ### merge same component corrdata. # tt11=time.time() for c in comp_list: #convert corrdata to empirical Green's functions by #taking the negative time derivative. See types.CorrData.to_egf() for details. if to_egf: corrdict[pair][c].to_egf() if tparameters is None:tparameters=corrdict[pair][c].misc if c in list(corrdict_all.keys()): corrdict_all[c].merge(corrdict[pair][c]) else:corrdict_all[c]=corrdict[pair][c] # tmerge[i]=time.time()-tt11 # # if flag:print('extract time:'+str(np.sum(txtract))) # if flag:print('merge time:'+str(np.sum(tmerge))) t1=time.time() if flag:print('finished assembling in %6.2fs ...'%(t1-t0)) #get length info from anyone of the corrdata, assuming all corrdata having the same length. cc_comp=list(corrdict_all.keys()) #final check on number of keys after merging all data. if len(cc_comp)==0: if flag:print('continue! no cross components for %s'%(pair)) continue elif len(cc_comp)<9 and rotation: if flag:print('continue! not enough cross components for %s to do rotation'%(pair)) continue elif len(cc_comp) >9: print(cc_comp) raise ValueError('more than 9 cross-component exists for %s! please double check'%(pair)) #save data. outfn = pair+'.h5' if flag:print('ready to output to %s'%(outfn)) t2=time.time() # loop through cross-component for stacking if isinstance(method,str):method=[method] tparameters['station_source']=ssta tparameters['station_receiver']=rsta if rotation: #need to order the components according to enz_system list. if corrdict_all[cc_comp[0]].substack: npts_segmt = corrdict_all[cc_comp[0]].data.shape[1] else: npts_segmt = corrdict_all[cc_comp[0]].data.shape[0] bigstack=np.zeros(shape=(9,npts_segmt),dtype=np.float32) if flag:print('applying stacking and rotation ...') stack_h5 = os.path.join(outdir,idir+'/'+outfn) ds=pyasdf.ASDFDataSet(stack_h5,mpi=False) #codes for ratation option. for m in method: data_type = 'Allstack_'+m bigstack=np.zeros(shape=(9,npts_segmt),dtype=np.float32) for icomp in range(9): comp = enz_system[icomp] indx = np.where(cc_comp==comp)[0] # jump if there are not enough data dstack,stamps_final=stacking(corrdict_all[cc_comp[indx[0]]],method=m) bigstack[icomp]=dstack tparameters['time'] = stamps_final[0] ds.add_auxiliary_data(data=dstack, data_type=data_type, path=comp, parameters=tparameters) # start rotation if np.all(bigstack==0):continue bigstack_rotated = rotation(bigstack,tparameters,correctionfile,flag) # write to file data_type = 'Allstack_'+m for icomp2 in range(9): rcomp = rtz_components[icomp2] if rcomp != 'ZZ': ds.add_auxiliary_data(data=bigstack_rotated[icomp2], data_type=data_type, path=rcomp, parameters=tparameters) if keep_substack: for ic in cc_comp: for ii in range(corrdict_all[ic].data.shape[0]): tparameters2=tparameters tparameters2['time'] = corrdict_all[ic].time[ii] data_type = 'T'+str(int(corrdict_all[ic].time[ii])) ds.add_auxiliary_data(data=corrdict_all[ic].data[ii], data_type=data_type, path=ic, parameters=tparameters2) else: #no need to care about the order of components. stack_h5 = os.path.join(outdir,idir+'/'+outfn) ds=pyasdf.ASDFDataSet(stack_h5,mpi=False) if flag:print('applying stacking ...') for ic in cc_comp: # write stacked data into ASDF file dstack,stamps_final=stacking(corrdict_all[ic],method=method) tparameters['time'] = stamps_final[0] for i in range(len(method)): m=method[i] ds.add_auxiliary_data(data=dstack[i,:], data_type='Allstack_'+m, path=ic, parameters=tparameters) if keep_substack: for ii in range(corrdict_all[ic].data.shape[0]): tparameters2=tparameters tparameters2['time'] = corrdict_all[ic].time[ii] data_type = 'T'+str(int(corrdict_all[ic].time[ii])) ds.add_auxiliary_data(data=corrdict_all[ic].data[ii], data_type=data_type, path=ic, parameters=tparameters2) # if flag: print('stacking and saving took %6.2fs'%(time.time()-t2)) # write file stamps ftmp = open(toutfn,'w');ftmp.write('done');ftmp.close() del corrdict_all #### def stacking(corrdata,method='linear',par=None): ''' this function stacks the cross correlation data PARAMETERS: ---------------------- corrdata: CorrData object. method: stacking method, could be: linear, robust, pws, acf, or nroot. par: stacking parameters in a dictionary. See stacking.seisstack() for details. RETURNS: ---------------------- dstack: 1D matrix of stacked cross-correlation functions over all the segments cc_time: timestamps of the traces for the stack ''' if isinstance(method,str):method=[method] # remove abnormal data if corrdata.data.ndim==1: cc_time = [corrdata.time] # do stacking dstack = np.zeros((len(method),corrdata.data.shape[0]),dtype=np.float32) for i in range(len(method)): m =method[i] dstack[i,:]=corrdata.data[:] else: ampmax = np.max(corrdata.data,axis=1) tindx = np.where( (ampmax<20*np.median(ampmax)) & (ampmax>0))[0] nstacks=len(tindx) dstack=[] cc_time=[] if nstacks >0: # remove ones with bad amplitude cc_array = corrdata.data[tindx,:] cc_time = corrdata.time[tindx] # do stacking dstack = np.zeros((len(method),corrdata.data.shape[1]),dtype=np.float32) for i in range(len(method)): m =method[i] if nstacks==1: dstack[i,:]=cc_array else: dstack[i,:] = stack.seisstack(cc_array,method=method,par=par) # good to return return dstack,cc_time def rotation(bigstack,parameters,locs,flag): ''' this function transfers the Green's tensor from a E-N-Z system into a R-T-Z one PARAMETERS: ------------------- bigstack: 9 component Green's tensor in E-N-Z system parameters: dict containing all parameters saved in ASDF file locs: dict containing station angle info for correction purpose RETURNS: ------------------- tcorr: 9 component Green's tensor in R-T-Z system ''' # load parameter dic pi = np.pi azi = parameters['azi'] baz = parameters['baz'] ncomp,npts = bigstack.shape if ncomp<9: print('crap did not get enough components') tcorr=[] return tcorr staS = parameters['station_source'] staR = parameters['station_receiver'] if locs is not None: sta_list = list(locs['station']) angles = list(locs['angle']) # get station info from the name of ASDF file ind = sta_list.index(staS) acorr = angles[ind] ind = sta_list.index(staR) bcorr = angles[ind] #---angles to be corrected---- cosa = np.cos((azi+acorr)*pi/180) sina = np.sin((azi+acorr)*pi/180) cosb = np.cos((baz+bcorr)*pi/180) sinb = np.sin((baz+bcorr)*pi/180) else: cosa = np.cos(azi*pi/180) sina = np.sin(azi*pi/180) cosb = np.cos(baz*pi/180) sinb = np.sin(baz*pi/180) # rtz_components = ['ZR','ZT','ZZ','RR','RT','RZ','TR','TT','TZ'] tcorr = np.zeros(shape=(9,npts),dtype=np.float32) tcorr[0] = -cosb*bigstack[7]-sinb*bigstack[6] tcorr[1] = sinb*bigstack[7]-cosb*bigstack[6] tcorr[2] = bigstack[8] tcorr[3] = -cosa*cosb*bigstack[4]-cosa*sinb*bigstack[3]-sina*cosb*bigstack[1]-sina*sinb*bigstack[0] tcorr[4] = cosa*sinb*bigstack[4]-cosa*cosb*bigstack[3]+sina*sinb*bigstack[1]-sina*cosb*bigstack[0] tcorr[5] = cosa*bigstack[5]+sina*bigstack[2] tcorr[6] = sina*cosb*bigstack[4]+sina*sinb*bigstack[3]-cosa*cosb*bigstack[1]-cosa*sinb*bigstack[0] tcorr[7] = -sina*sinb*bigstack[4]+sina*cosb*bigstack[3]+cosa*sinb*bigstack[1]-cosa*cosb*bigstack[0] tcorr[8] = -sina*bigstack[5]+cosa*bigstack[2] return tcorr #### def merging(ccfiles,pairlist=None,outdir='./MERGED_PAIRS',verbose=False,to_egf=False, stack=False,stack_method='linear',stack_win_len=None): print("WARNING: Old function call, will be deprecated in v0.7.x. Function has been renamed to: merge_pairs() with the same options.") merge_pairs(ccfiles,pairlist=pairlist,outdir=outdir,verbose=verbose,to_egf=to_egf, stack=stack,stack_method=stack_method,stack_win_len=stack_win_len) ### def merge_pairs(ccfiles,pairlist=None,outdir='./MERGED_PAIRS',verbose=False,to_egf=False, stack=False,stack_method='linear',stack_win_len=None): """ This is a wrapper function that merges all data for the same station pair to a single CorrData object. It calls CorrData.merge() to assemble all CorrData. PARAMETERS ---------------------- ccfiles: a list of correlation functions in ASDF format, saved to *.h5 file. pairlist: a list of station pairs to merge. If None (default), it will merge all station pairs. outdir: directory to save the data. Defautl is ./MERGED_PAIRS. verbose: verbose flag. Default is False. to_egf: whether to convert the data to empirical Green's functions (EGF) before saving. Default is False. stack: whether to stack all merged data before saving. Default: False. stack_method: when stack is True, this is the method for stacking. stack_win_len: window length in seconds for stacking, only used when stack is True. When stack_win_len is not None, the stacking will be done over the specified windown lengths, instead of the entire data set. """ # source folder if pairlist is None: pairlist,netsta_all=get_stationpairs(ccfiles,False) if len(ccfiles)==0: raise IOError('Abort! no available CCF data for merging') for s in netsta_all: tmp = os.path.join(outdir,s) if not os.path.isdir(tmp):os.mkdir(tmp) if isinstance(pairlist,str):pairlist=[pairlist] if not os.path.isdir(outdir):os.makedirs(outdir) for pair in pairlist: ttr = pair.split('_') snet,ssta = ttr[0].split('.') rnet,rsta = ttr[1].split('.') idir = ttr[0] # continue when file is done ioutdir=os.path.join(outdir,idir) if not os.path.isdir(ioutdir):os.makedirs(ioutdir) if verbose:print('assembling all corrdata ...') t0=time.time() corrdict_all=dict() #all components for the single station pair # txtract=np.zeros(len(ccfiles),dtype=np.float32) # tmerge=np.zeros(len(ccfiles),dtype=np.float32) tparameters=None for i,ifile in enumerate(ccfiles): # tt00=time.time() corrdict=extract_corrdata(ifile,pair=pair) # txtract[i]=time.time()-tt00 if len(list(corrdict.keys()))>0: comp_list=list(corrdict[pair].keys()) if len(comp_list)==0: continue ### merge same component corrdata. # tt11=time.time() for c in comp_list: if c in list(corrdict_all.keys()): corrdict_all[c].merge(corrdict[pair][c]) else:corrdict_all[c]=corrdict[pair][c] del corrdict # tmerge[i]=time.time()-tt11 # # if flag:print('extract time:'+str(np.sum(txtract))) # if flag:print('merge time:'+str(np.sum(tmerge))) t1=time.time() if verbose:print('finished assembling in %6.2fs ...'%(t1-t0)) #get length info from anyone of the corrdata, assuming all corrdata having the same length. cc_comp=list(corrdict_all.keys()) #final check on number of keys after merging all data. if len(cc_comp)==0: if verbose:print('continue! no cross components for %s'%(pair)) continue #save data. outfn = pair+'.h5' if verbose:print('save to %s'%(outfn)) merged_h5 = os.path.join(ioutdir,outfn) for ic in cc_comp: #save components. #convert corrdata to empirical Green's functions by #taking the negative time derivative. See types.CorrData.to_egf() for details. try: if stack: corrdict_all[ic].stack(method=stack_method,win_len=stack_win_len) if to_egf: corrdict_all[ic].to_egf() corrdict_all[ic].to_asdf(file=merged_h5) except Exception as e: print(str(e)+"--> skipped: "+corrdict_all[ic].id) del corrdict_all ### def merge_chunks(ccfiles,outdir='./MERGED_CHUNKS',verbose=False,to_egf=False, stack=False,stack_method='linear',stack_win_len=None): """ This is a wrapper function that merges all data in the given list of correlation files. It calls CorrData.merge() to assemble all CorrData for the same station and component pairs. The functionality is similar with noise.merge_pairs(). This is particularly useful when the number of chunks is too large to be handled. At the same time, it provides the option to further reduce the data size by stacking. Please note that the stacking here works for the given list of files. PARAMETERS ---------------------- ccfiles: a list of correlation functions in ASDF format, saved to *.h5 file. outdir: directory to save the data. Defautl is ./MERGED_PAIRS. verbose: verbose flag. Default is False. to_egf: whether to convert the data to empirical Green's functions (EGF) before saving. Default is False. stack: whether to stack all merged data before saving. Default: False. stack_method: when stack is True, this is the method for stacking. stack_win_len: window length in seconds for stacking, only used when stack is True. When stack_win_len is not None, the stacking will be done over the specified windown lengths, instead of the entire data set. The function stacks all data if "stack_win_len" > the time duration of the whole list of correlation files. """ pairs_all,netsta,trange=get_stationpairs(ccfiles,getcclist=False,gettimerange=True) ts,te=trange outfile = os.path.join(outdir,str(obspy.UTCDateTime(ts)).replace(':', '-') + \ 'T' + str(obspy.UTCDateTime(te)).replace(':', '-') + '.h5') for p in pairs_all: corrdict_all=dict() for f in ccfiles: # print("---> "+ifile) corrdict=extract_corrdata(f,pair=p) # txtract[i]=time.time()-tt00 if len(list(corrdict.keys()))>0: comp_list=list(corrdict[p].keys()) if len(comp_list)==0: continue ### merge same pair and component corrdata. # tt11=time.time() if p not in list(corrdict_all.keys()): corrdict_all[p]=corrdict[p] for c in comp_list: if c in list(corrdict_all[p].keys()): corrdict_all[p][c].merge(corrdict[p][c]) else: corrdict_all[p][c]=corrdict[p][c] del corrdict # if p in list(corrdict_all.keys()): comp_list=list(corrdict_all[p].keys()) if len(comp_list)>0: for c in comp_list: if corrdict_all[p][c].data is not None: if stack: corrdict_all[p][c].stack(method=stack_method,win_len=stack_win_len) if to_egf: corrdict_all[p][c].to_egf() corrdict_all[p][c].to_asdf(file=outfile,v=False) del corrdict_all ######################################################## ################ XCORR ANALYSIS FUNCTIONS ################## ######################################################## def save_xcorr_amplitudes(dict_in,filenamebase=None): """ This function saves the amplitude data for both negative and positive lags, for each xcorr component pair, to csv files. PARAMETERS: ---------------------------- dict_in: dictionary containing peak amplitude information from one virtual source to all other receivers. This can be the output of get_xcorr_peakamplitudes(). filenamebase: file name base of the csv file, default is source_component_peakamp.txt in the current dir. """ source=dict_in['source']['name'] lonS0,latS0,eleS0=dict_in['source']['location'] # if filenamebase is None: filenamebase = source cc_comp=list(dict_in['cc_comp'].keys()) for ic in range(len(cc_comp)): comp = cc_comp[ic] receivers=list(dict_in['cc_comp'][comp].keys()) lonS=lonS0*np.ones((len(receivers),)) latS=latS0*np.ones((len(receivers),)) eleS=eleS0*np.ones((len(receivers),)) comp_out=len(receivers)*[comp] source_out=len(receivers)*[source] lonR=[] latR=[] eleR=[] dist=[] peakamp_neg=[] peakamp_pos=[] peaktt_neg=[] peaktt_pos=[] az=[] baz=[] for ir in range(len(receivers)): receiver=receivers[ir] dist0=dict_in['cc_comp'][comp][receiver]['dist'] dist.append(dist0) lonR.append(dict_in['cc_comp'][comp][receiver]['location'][0]) latR.append(dict_in['cc_comp'][comp][receiver]['location'][1]) eleR.append(0.0) az.append(dict_in['cc_comp'][comp][receiver]['az']) baz.append(dict_in['cc_comp'][comp][receiver]['baz']) peakamp_neg.append(np.array(dict_in['cc_comp'][comp][receiver]['peak_amplitude'])[0]) peakamp_pos.append(np.array(dict_in['cc_comp'][comp][receiver]['peak_amplitude'])[1]) peaktt_neg.append(np.array(dict_in['cc_comp'][comp][receiver]['peak_amplitude_time'])[0]) peaktt_pos.append(np.array(dict_in['cc_comp'][comp][receiver]['peak_amplitude_time'])[1]) outDF=pd.DataFrame({'source':source_out,'lonS':lonS,'latS':latS,'eleS':eleS, 'receiver':receivers,'lonR':lonR,'latR':latR,'eleR':eleR, 'az':az,'baz':baz,'dist':dist,'peakamp_neg':peakamp_neg, 'peakamp_pos':peakamp_pos,'peaktt_neg':peaktt_neg, 'peaktt_pos':peaktt_pos,'comp':comp_out}) fname=filenamebase+'_'+comp+'_peakamp.txt' outDF.to_csv(fname,index=False) print('data was saved to: '+fname) def get_stationpairs(ccfiles,getcclist=False,verbose=False,gettimerange=False): """ Extract unique station pairs from all cc files in ASDF format. ====PARAMETERS=== ccfiles: a list of cc files. getcclist: get cc component list. default False. verbose: verbose flag; default False. ====RETURNS=== pairs_all: all netstaion pairs in the format of NET1.STA1_NET2.STA2 netsta_all: all net.sta (unique list) ccomp_all: all unique list of cc components. """ if isinstance(ccfiles,str):ccfiles=[ccfiles] pairs_all = [] ccomp_all=[] if gettimerange: ts=[] te=[] for f in ccfiles: # load the data from daily compilation try: ds=pyasdf.ASDFDataSet(f,mpi=False,mode='r') except Exception as e: raise IOError("error openning "+f+":"+str(e)) try: pairlist = ds.auxiliary_data.list() if getcclist: for p in pairlist: chanlist=ds.auxiliary_data[p].list() for c in chanlist: if gettimerange: para=ds.auxiliary_data[p][c].parameters ttime=para['time'] if 'time_mean' in list(para.keys()): ttime += para['time_mean'] ts.append(np.min(ttime)) te.append(np.max(ttime)) c1,c2=c.split('_') ccomp_all.extend(c1[-1]+c2[-1]) ccomp_all=sorted(set(ccomp_all)) elif gettimerange: for p in pairlist: chanlist=ds.auxiliary_data[p].list() for c in chanlist: para=ds.auxiliary_data[p][c].parameters ttime=para['time'] if 'time_mean' in list(para.keys()): ttime += para['time_mean'] ts.append(np.min(ttime)) te.append(np.max(ttime)) pairs_all.extend(pairlist) pairs_all=sorted(set(pairs_all)) except Exception: if verbose:print('continue! no data in %s'%(f)) continue netsta_all=[] for p in pairs_all: netsta=p.split('_') netsta_all.extend(netsta) netsta_all=sorted(set(netsta_all)) if getcclist: if gettimerange: trange=[np.min(ts),np.max(te)] return pairs_all,netsta_all,ccomp_all,trange else: return pairs_all,netsta_all,ccomp_all else: if gettimerange: trange=[np.min(ts),np.max(te)] return pairs_all,netsta_all,trange else: return pairs_all,netsta_all def get_cctimerange(ccfiles,verbose=False): """ Extract time range from all cc files in ASDF format. ====PARAMETERS=== ccfiles: a list of cc files. verbose: verbose flag; default False. ====RETURNS=== ts,te: start and end time of all ccdata. """ if isinstance(ccfiles,str):ccfiles=[ccfiles] ts_all = [] te_all = [] for f in ccfiles: # load the data from daily compilation corrdata=extract_corrdata(f,dataless=True) plist=list(corrdata.keys()) for p in plist: clist=list(corrdata[p].keys()) c=clist[0] if corrdata[p][c].substack: ts_all.append(corrdata[p][c].time[0]) te_all.append(corrdata[p][c].time[-1]) else: ts_all.append(corrdata[p][c].time) te_all.append(corrdata[p][c].time) del corrdata ts=np.array(ts_all).min() te=np.array(te_all).max() return ts,te def extract_corrdata(sfile,pair=None,comp=['all'],dataless=False): ''' extract the 2D matrix of the cross-correlation functions and the metadata for a certain time-chunck. PARAMETERS: -------------------------- sfile: cross-correlation functions outputed by SeisGo cross-correlation workflow pair: net1.sta1-net2.sta2 pair to extract, default is to extract all pairs. comp: cross-correlation component or a list of components to extract, default is all components. RETURN: -------------------------- corrdict: a dictionary that contains all extracted correlations, which each key as the station pair name. for each station pair, the correlaitons are saved as a list of CorrData objects. USAGE: -------------------------- extract_corrdata('temp.h5',comp='ZZ') ''' #check help or not at the very beginning # open data for read if isinstance(pair,str): pair=[pair] if isinstance(comp,str): comp=[comp] corrdict=dict() try: ds = pyasdf.ASDFDataSet(sfile,mpi=False,mode='r') # extract common variables spairs_all = ds.auxiliary_data.list() except Exception: print("exit! cannot open %s to read"%sfile);sys.exit() if pair is None: pair=spairs_all overlap_pair=list(set(pair) & set(spairs_all)) if len(overlap_pair)<1: print(str(pair)+" not found. Return empty.") return corrdict for spair in overlap_pair: ttr = spair.split('_') snet,ssta = ttr[0].split('.') rnet,rsta = ttr[1].split('.') path_lists = ds.auxiliary_data[spair].list() corrdict[spair]=dict() for ipath in path_lists: schan,rchan = ipath.split('_') cc_comp=schan[-1]+rchan[-1] if cc_comp in comp or comp == ['all'] or comp ==['ALL']: try: para=ds.auxiliary_data[spair][ipath].parameters substack,ttime,dt,maxlag,az,baz,cc_method,dist,slat,slon,rlat,rlon = \ [para['substack'],para['time'],\ para['dt'],para['maxlag'],para['azi'],para['baz'],\ para['cc_method'],para['dist'],para['latS'],para['lonS'],\ para['latR'],para['lonR']] if "eleS" in list(para.keys()): sele = para['eleS'] else: sele = 0.0 if "eleR" in list(para.keys()): rele = para['eleR'] else: rele = 0.0 if "cc_len" in list(para.keys()): cc_len = para['cc_len'] else: cc_len = None if "cc_step" in list(para.keys()): cc_step = para['cc_step'] else: cc_step = None if "side" in list(para.keys()): side = para['side'] else: side = "A" ##special handling of time, in case time_mean is saved to reduce the attribute memory_size if "time_mean" in list(para.keys()): tmean=para["time_mean"] ttime = np.float64(ttime) + tmean if not dataless: data = np.array(ds.auxiliary_data[spair][ipath].data) else: data = None except Exception: print('continue! something wrong with %s %s'%(spair,ipath)) continue corrdict[spair][cc_comp]=CorrData(net=[snet,rnet],sta=[ssta,rsta],loc=['',''],\ chan=[schan,rchan],lon=[slon,rlon],lat=[slat,rlat], ele=[sele,rele],cc_comp=cc_comp,dt=dt,lag=maxlag, cc_len=cc_len,cc_step=cc_step,dist=dist,az=az, baz=baz,time=ttime,data=data, substack=substack,side=side,misc=para) if "type" in list(para.keys()): corrdict[spair][cc_comp].type=para['type'] return corrdict def save_corrfile_to_sac(cfile,rootdir='.',pair=None,comp=['all'],v=True): """ Save correlation files in ASDF to sac files. === PARAMETERS === cfile: correlation file from SeisGo workflow. It could be a list of files. rootdir: folder to save the converted sac files. this is the root folder, not the folder for individual sources/receivers, which will be created by this function. Default is the current directory. pair: net1.sta1_net2.sta2 pair to extract, default is to extract all pairs. comp: cross-correlation component or a list of components to extract, default is 'all'. v: verbose or not, default is True. """ if isinstance(cfile,str):cfile=[cfile] if isinstance(pair,str): pair=[pair] nfile=len(cfile) for cf in cfile: if v: print('working on file: '+cf.split('/')[-1]) corrdict=extract_corrdata(cf) pairs_all=list(corrdict.keys()) if pair is None: extract_pair=pairs_all else: extract_pair=pair for p in extract_pair: if p in pairs_all: netsta1,netsta2=p.split('_') outdir=os.path.join(rootdir,netsta1,netsta2) comp_all=list(corrdict[p].keys()) for c in comp_all: if c in comp or comp == ['all'] or comp ==['ALL']: corrdict[p][c].to_sac(outdir=outdir) else: print('Pair %s not found. Skip.'%(p)) continue
{"hexsha": "7d477ad53ca5dbf6849bd87968236bc309f33d40", "size": 47272, "ext": "py", "lang": "Python", "max_stars_repo_path": "seisgo/noise.py", "max_stars_repo_name": "xtyangpsp/SeisGo", "max_stars_repo_head_hexsha": "c445cdc7e760de957559af3e33e3a26489e3ee55", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2021-06-06T01:27:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-10T02:39:49.000Z", "max_issues_repo_path": "seisgo/noise.py", "max_issues_repo_name": "xtyangpsp/SeisGo", "max_issues_repo_head_hexsha": "c445cdc7e760de957559af3e33e3a26489e3ee55", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2021-06-08T13:33:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-01T12:46:42.000Z", "max_forks_repo_path": "seisgo/noise.py", "max_forks_repo_name": "xtyangpsp/SeisGo", "max_forks_repo_head_hexsha": "c445cdc7e760de957559af3e33e3a26489e3ee55", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-08-21T02:23:52.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T01:37:14.000Z", "avg_line_length": 42.434470377, "max_line_length": 137, "alphanum_fraction": 0.5768742596, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 11767}
#include <boost/make_shared.hpp> #include <boost/thread/locks.hpp> #include <boost/thread/mutex.hpp> #include <string> #include <vector> #include "caffe/array/array.hpp" #include "caffe/array/math.hpp" namespace caffe { template<typename T> Array<T>::Array(const Array & o) : ArrayMemory(o), ArrayBase<T>(o) { } template<typename T> Array<T>::Array(ArrayMode mode) : ArrayMemory(), ArrayBase<T>(mode) { } template<typename T> Array<T>::Array(const ArrayShape &shape, ArrayMode mode): ArrayMemory(count(shape)*sizeof(T)), ArrayBase<T>(shape, mode) { } template<typename T> Array<T>::Array(SyncedMemory *memory, const ArrayShape &shape, ArrayMode mode):ArrayMemory(memory, count(shape)), ArrayBase<T>(shape, mode) { CHECK_GE(memory->size(), count(shape) *sizeof(T)) << "SyncedMemory size '" << memory->size() << "' is smaller than shape " << shapeToString(shape) << " with element size " << sizeof(T); } template<typename T> Array<T>::Array(shared_ptr<SyncedMemory> memory, const ArrayShape &shape, ArrayMode mode):ArrayMemory(memory, count(shape)), ArrayBase<T>(shape, mode) { CHECK_GE(memory->size(), count(shape)*sizeof(T)) << "SyncedMemory size '" << memory->size() << "' is smaller than shape " << shapeToString(shape) << " with element size " << sizeof(T); } template<typename T> Array<T>::Array(shared_ptr<SyncedMemory> m, size_t o, const ArrayShape &s, ArrayMode mode):ArrayMemory(m, o*sizeof(T), count(s)*sizeof(T)), ArrayBase<T>(s, mode) { CHECK_GE(m->size(), (o+count(s))*sizeof(T)) << "SyncedMemory size '" << m->size() << "' is smaller than shape " << shapeToString(s) << " with element size " << sizeof(T) << " and offset " << o; } template<typename T> Array<T>::~Array() {} template<typename T> void Array<T>::initialize(const ArrayShape &shape) { CHECK_EQ(count(this->shape_), 0) << "Array already initialized!"; this->shape_ = shape; ArrayMemory::initializeMemory(count(shape) * sizeof(T)); } template<typename T> void Array<T>::setMode(ArrayMode mode) { this->mode_ = mode; } template <typename T> void Array<T>::FromProto(const BlobProto& proto, bool reshape) { ArrayShape shape; if (proto.has_num() || proto.has_channels() || proto.has_height() || proto.has_width()) { // Using deprecated 4D Blob dimensions -- // shape is (num, channels, height, width). shape.resize(4); shape[0] = proto.num(); shape[1] = proto.channels(); shape[2] = proto.height(); shape[3] = proto.width(); } else { shape.resize(proto.shape().dim_size()); for (int i = 0; i < proto.shape().dim_size(); ++i) { shape[i] = proto.shape().dim(i); } } if (reshape) initialize(shape); else CHECK_EQ(shape, this->shape_) << "shape mismatch (reshape not set)"; // copy data T* data_vec = mutable_cpu_data(); for (int i = 0; i < count(this->shape_); i++) data_vec[i] = proto.data(i); CHECK_EQ(proto.diff_size(), 0) << "Cannot read BlobProto diff"; } template <typename T> void Array<T>::ToProto(BlobProto* proto) const { proto->clear_shape(); for (int i = 0; i < this->shape_.size(); i++) { proto->mutable_shape()->add_dim(this->shape_[i]); } proto->clear_data(); proto->clear_diff(); const T* data_vec = cpu_data(); for (int i = 0; i < count(this->shape_); i++) proto->add_data(data_vec[i]); } template<typename T> Array<T> Array<T>::eval() const { return *this; } template<typename T> shared_ptr<SyncedMemory> Array<T>::memory() const { return memory_; } template<typename T> const T *Array<T>::cpu_data() const { return static_cast<const T *>(ArrayMemory::cpu_data_()); } template<typename T> const T *Array<T>::gpu_data() const { return static_cast<const T *>(ArrayMemory::gpu_data_()); } template<typename T> T *Array<T>::mutable_cpu_data() { return static_cast<T *>(ArrayMemory::mutable_cpu_data_()); } template<typename T> T *Array<T>::mutable_gpu_data() { return static_cast<T *>(ArrayMemory::mutable_gpu_data_()); } template<typename T> Array<T> &Array<T>::operator=(const Expression<T> & other) { if (!memory_) { initialize(other.shape()); setMode(other.mode()); } CHECK_EQ(this->shape(), other.shape()) << "Array shape missmatches"; other.evaluate(this); return *this; } template<typename T> Array<T> &Array<T>::operator=(const T &v) { CHECK(memory_) << "Array not initialized"; #ifndef CPU_ONLY if (this->effectiveMode() == AR_GPU) caffe_gpu_set(count(this->shape()), v, this->mutable_gpu_data()); else #endif caffe_set(count(this->shape()), v, this->mutable_cpu_data()); return *this; } template<typename T> Array<T> &Array<T>::operator=(const Array<T> &other) { if (!memory_) { initialize(other.shape()); setMode(other.mode()); } CHECK_EQ(this->shape(), other.shape()) << "Array shape missmatches"; #ifndef CPU_ONLY if (this->effectiveMode() == AR_GPU) // NOLINT_NEXT_LINE(caffe/alt_fn) CUDA_CHECK(cudaMemcpy(this->mutable_gpu_data(), other.gpu_data(), sizeof(T) * count(this->shape()), cudaMemcpyDefault)); else #endif // NOLINT_NEXT_LINE(caffe/alt_fn) memcpy(this->mutable_cpu_data(), other.cpu_data(), sizeof(T) * count(this->shape())); return *this; } template<typename T> Array<T> Array<T>::reshape(ArrayShape shape) const { size_t p = 1; int md = -1; for (int d = 0; d < shape.size(); d++) if (shape[d] == -1) { CHECK_EQ(md, -1) << "Only one missing dimension supported"; md = d; } else { p *= shape[d]; } if (md >= 0) shape[md] = count(this->shape()) / p; CHECK_EQ(count(this->shape()), count(shape)) << "reshape cannot change array size"; return Array<T>(memory_, offset_/sizeof(T), shape, this->mode()); } template<typename T> Array<T> Array<T>::operator[](size_t d) { CHECK_GT(this->shape().size(), 0) << "At least one dimension required"; CHECK_LT(d, this->shape()[0]) << "Index out of range"; ArrayShape s(this->shape().begin()+1, this->shape().end()); return Array<T>(memory_, d*count(s), s, this->mode()); } template<typename T> const Array<T> Array<T>::operator[](size_t d) const { CHECK_GT(this->shape().size(), 0) << "At least one dimension required"; CHECK_LT(d, this->shape()[0]) << "Index out of range"; ArrayShape s(this->shape().begin()+1, this->shape().end()); return Array<T>(memory_, d*count(s), s, this->mode()); } INSTANTIATE_CLASS(Array); } // namespace caffe
{"hexsha": "8ecbdec11e421bd9f689e62ec808589d53223b7f", "size": 6442, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "caffe/src/caffe/array/array.cpp", "max_stars_repo_name": "tinghuiz/learn-reflectance", "max_stars_repo_head_hexsha": "31ab326d344834e9cd8bb042551176bcf3114a9c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 33.0, "max_stars_repo_stars_event_min_datetime": "2016-02-08T21:07:11.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-09T10:35:04.000Z", "max_issues_repo_path": "caffe/src/caffe/array/array.cpp", "max_issues_repo_name": "tinghuiz/learn-reflectance", "max_issues_repo_head_hexsha": "31ab326d344834e9cd8bb042551176bcf3114a9c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2019-05-08T06:56:46.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-08T06:56:46.000Z", "max_forks_repo_path": "caffe/src/caffe/array/array.cpp", "max_forks_repo_name": "tinghuiz/learn-reflectance", "max_forks_repo_head_hexsha": "31ab326d344834e9cd8bb042551176bcf3114a9c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 15.0, "max_forks_repo_forks_event_min_datetime": "2016-02-10T19:17:11.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-06T22:53:08.000Z", "avg_line_length": 32.3718592965, "max_line_length": 80, "alphanum_fraction": 0.6507295871, "num_tokens": 1730}
# This file is part of the Open Data Cube, see https://opendatacube.org for more information # # Copyright (c) 2015-2020 ODC Contributors # SPDX-License-Identifier: Apache-2.0 import numpy as np import toolz from ..model import Dataset from ..storage import reproject_and_fuse, BandInfo from ..storage._rio import RasterioDataSource, RasterDatasetDataSource from ..utils.geometry._warp import resampling_s2rio from ..storage._read import rdr_geobox from ..utils.geometry import GeoBox from ..utils.geometry import gbox as gbx from ..index.eo3 import is_doc_eo3, _norm_grid # type: ignore[attr-defined] from types import SimpleNamespace class RasterFileDataSource(RasterioDataSource): """ This is only used in test code """ def __init__(self, filename, bandnumber, nodata=None, crs=None, transform=None, lock=None): super(RasterFileDataSource, self).__init__(filename, nodata, lock=lock) self.bandnumber = bandnumber self.crs = crs self.transform = transform def get_bandnumber(self, src): return self.bandnumber def get_transform(self, shape): if self.transform is None: raise RuntimeError('No transform in the data and no fallback') return self.transform def get_crs(self): if self.crs is None: raise RuntimeError('No CRS in the data and no fallback') return self.crs def _raster_metadata(band): source = RasterDatasetDataSource(band) with source.open() as rdr: return SimpleNamespace(dtype=rdr.dtype.name, nodata=rdr.nodata, geobox=rdr_geobox(rdr)) def get_raster_info(ds: Dataset, measurements=None): """ :param ds: Dataset :param measurements: List of band names to load """ if measurements is None: measurements = list(ds.type.measurements) return {n: _raster_metadata(BandInfo(ds, n)) for n in measurements} def eo3_geobox(ds: Dataset, band: str) -> GeoBox: mm = ds.measurements.get(ds.type.canonical_measurement(band), None) if mm is None: raise ValueError(f"No such band: {band}") crs = ds.crs doc_path = ('grids', mm.get('grid', 'default')) grid = toolz.get_in(doc_path, ds.metadata_doc) if crs is None or grid is None: raise ValueError('Not a valid EO3 dataset') grid = _norm_grid(grid) h, w = grid.shape return GeoBox(w, h, grid.transform, crs) def native_geobox(ds, measurements=None, basis=None): """Compute native GeoBox for a set of bands for a given dataset :param ds: Dataset :param measurements: List of band names to consider :param basis: Name of the band to use for computing reference frame, other bands might be reprojected if they use different pixel grid :return: GeoBox describing native storage coordinates. """ gs = ds.type.grid_spec if gs is not None: # Dataset is from ingested product, figure out GeoBox of the tile this dataset covers bb = [gbox for _, gbox in gs.tiles(ds.bounds)] if len(bb) != 1: # Ingested product but dataset overlaps several/none tiles -- no good raise ValueError('Broken GridSpec detected') return bb[0] if measurements is None and basis is None: measurements = list(ds.type.measurements) if is_doc_eo3(ds.metadata_doc): if basis is not None: return eo3_geobox(ds, basis) gboxes = [eo3_geobox(ds, band) for band in measurements] else: if basis is not None: return get_raster_info(ds, [basis])[basis].geobox ii = get_raster_info(ds, measurements) gboxes = [info.geobox for info in ii.values()] geobox = gboxes[0] consistent = all(geobox == gbox for gbox in gboxes) if not consistent: raise ValueError('Not all bands share the same pixel grid') return geobox def native_load(ds, measurements=None, basis=None, **kw): """Load single dataset in native resolution. :param ds: Dataset :param measurements: List of band names to load :param basis: Name of the band to use for computing reference frame, other bands might be reprojected if they use different pixel grid :param **kw: Any other parameter load_data accepts :return: Xarray dataset """ from datacube import Datacube geobox = native_geobox(ds, measurements, basis) # early exit via exception if no compatible grid exists if measurements is not None: mm = ds.type.lookup_measurements(measurements) else: mm = ds.type.measurements return Datacube.load_data(Datacube.group_datasets([ds], 'time'), geobox, measurements=mm, **kw) def dc_read(path, band=1, gbox=None, resampling='nearest', dtype=None, dst_nodata=None, fallback_nodata=None): """ Use default io driver to read file without constructing Dataset object. """ source = RasterFileDataSource(path, band, nodata=fallback_nodata) with source.open() as rdr: dtype = rdr.dtype if dtype is None else dtype if gbox is None: gbox = rdr_geobox(rdr) if dst_nodata is None: dst_nodata = rdr.nodata # currently dst_nodata = None case is not supported. So if fallback_nodata # was None and file had none set, then use 0 as default output fill value if dst_nodata is None: dst_nodata = 0 im = np.full(gbox.shape, dst_nodata, dtype=dtype) reproject_and_fuse([source], im, gbox, dst_nodata, resampling=resampling) return im def write_gtiff(fname, pix, crs='epsg:3857', resolution=(10, -10), offset=(0.0, 0.0), nodata=None, overwrite=False, blocksize=None, gbox=None, **extra_rio_opts): """ Write ndarray to GeoTiff file. Geospatial info can be supplied either via - resolution, offset, crs or - gbox (takes precedence if supplied) """ # pylint: disable=too-many-locals from affine import Affine import rasterio from pathlib import Path if pix.ndim == 2: h, w = pix.shape nbands = 1 band = 1 elif pix.ndim == 3: nbands, h, w = pix.shape band = tuple(i for i in range(1, nbands+1)) else: raise ValueError('Need 2d or 3d ndarray on input') if not isinstance(fname, Path): fname = Path(fname) if fname.exists(): if overwrite: fname.unlink() else: raise IOError("File exists") if gbox is not None: assert gbox.shape == (h, w) A = gbox.transform crs = str(gbox.crs) else: sx, sy = resolution tx, ty = offset A = Affine(sx, 0, tx, 0, sy, ty) rio_opts = dict(width=w, height=h, count=nbands, dtype=pix.dtype.name, crs=crs, transform=A, predictor=2, compress='DEFLATE') if blocksize is not None: rio_opts.update(tiled=True, blockxsize=min(blocksize, w), blockysize=min(blocksize, h)) if nodata is not None: rio_opts.update(nodata=nodata) rio_opts.update(extra_rio_opts) with rasterio.open(str(fname), 'w', driver='GTiff', **rio_opts) as dst: dst.write(pix, band) meta = dst.meta meta['gbox'] = gbox if gbox is not None else rio_geobox(meta) meta['path'] = fname return SimpleNamespace(**meta) def dc_crs_from_rio(crs): from datacube.utils.geometry import CRS if crs.is_epsg_code: return CRS('EPSG:{}'.format(crs.to_epsg())) return CRS(crs.wkt) def rio_geobox(meta): """ Construct geobox from src.meta of opened rasterio dataset """ if 'crs' not in meta or 'transform' not in meta: return None h, w = (meta['height'], meta['width']) crs = dc_crs_from_rio(meta['crs']) transform = meta['transform'] return GeoBox(w, h, transform, crs) def _fix_resampling(kw): r = kw.get('resampling', None) if isinstance(r, str): kw['resampling'] = resampling_s2rio(r) def rio_slurp_reproject(fname, gbox, dtype=None, dst_nodata=None, **kw): """ Read image with reprojection """ import rasterio from rasterio.warp import reproject _fix_resampling(kw) with rasterio.open(str(fname), 'r') as src: if src.count == 1: shape = gbox.shape src_band = rasterio.band(src, 1) else: shape = (src.count, *gbox.shape) src_band = rasterio.band(src, tuple(range(1, src.count+1))) if dtype is None: dtype = src.dtypes[0] if dst_nodata is None: dst_nodata = src.nodata if dst_nodata is None: dst_nodata = 0 pix = np.full(shape, dst_nodata, dtype=dtype) reproject(src_band, pix, dst_nodata=dst_nodata, dst_transform=gbox.transform, dst_crs=str(gbox.crs), **kw) meta = src.meta meta['src_gbox'] = rio_geobox(meta) meta['path'] = fname meta['gbox'] = gbox return pix, SimpleNamespace(**meta) def rio_slurp_read(fname, out_shape=None, **kw): """ Read whole image file using rasterio. :returns: ndarray (2d or 3d if multi-band), dict (rasterio meta) """ import rasterio _fix_resampling(kw) if out_shape is not None: kw.update(out_shape=out_shape) with rasterio.open(str(fname), 'r') as src: data = src.read(1, **kw) if src.count == 1 else src.read(**kw) meta = src.meta src_gbox = rio_geobox(meta) same_gbox = out_shape is None or out_shape == src_gbox.shape gbox = src_gbox if same_gbox else gbx.zoom_to(src_gbox, out_shape) meta['src_gbox'] = src_gbox meta['gbox'] = gbox meta['path'] = fname return data, SimpleNamespace(**meta) def rio_slurp(fname, *args, **kw): """ Dispatches to either: rio_slurp_read(fname, out_shape, ..) rio_slurp_reproject(fname, gbox, ...) """ if len(args) == 0: if 'gbox' in kw: return rio_slurp_reproject(fname, **kw) else: return rio_slurp_read(fname, **kw) if isinstance(args[0], GeoBox): return rio_slurp_reproject(fname, *args, **kw) else: return rio_slurp_read(fname, *args, **kw) def rio_slurp_xarray(fname, *args, rgb='auto', **kw): """ Dispatches to either: rio_slurp_read(fname, out_shape, ..) rio_slurp_reproject(fname, gbox, ...) then wraps it all in xarray.DataArray with .crs,.nodata etc. """ from xarray import DataArray if len(args) == 0: if 'gbox' in kw: im, mm = rio_slurp_reproject(fname, **kw) else: im, mm = rio_slurp_read(fname, **kw) else: if isinstance(args[0], GeoBox): im, mm = rio_slurp_reproject(fname, *args, **kw) else: im, mm = rio_slurp_read(fname, *args, **kw) if im.ndim == 3: dims = ('band', *mm.gbox.dims) if rgb and im.shape[0] in (3, 4): im = im.transpose([1, 2, 0]) dims = tuple(dims[i] for i in [1, 2, 0]) else: dims = mm.gbox.dims return DataArray(im, dims=dims, coords=mm.gbox.xr_coords(with_crs=True), attrs=dict( nodata=mm.nodata))
{"hexsha": "7cc852e1183d26d9cb3b3f80a93a76374673b188", "size": 11862, "ext": "py", "lang": "Python", "max_stars_repo_path": "datacube/testutils/io.py", "max_stars_repo_name": "agdc-research-trial/gdf", "max_stars_repo_head_hexsha": "82ed29c263eaf65f5c1fbb4e9207c99e9700b85c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2015-06-01T01:31:44.000Z", "max_stars_repo_stars_event_max_datetime": "2015-06-01T01:31:44.000Z", "max_issues_repo_path": "datacube/testutils/io.py", "max_issues_repo_name": "agdc-research-trial/gdf", "max_issues_repo_head_hexsha": "82ed29c263eaf65f5c1fbb4e9207c99e9700b85c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "datacube/testutils/io.py", "max_forks_repo_name": "agdc-research-trial/gdf", "max_forks_repo_head_hexsha": "82ed29c263eaf65f5c1fbb4e9207c99e9700b85c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.2888888889, "max_line_length": 108, "alphanum_fraction": 0.6003203507, "include": true, "reason": "import numpy", "num_tokens": 3007}
""" parse_options(kwargs) Internal function. Takes the keyword arguments from the main function and parses it into a usable Dict object # Examples ```julia-repl julia> parse_options(ex::Expr) Dict{String,Any} with 2 entries: "screen_name" => "jack" ... ``` """ function parse_options(kwargs) options = Dict{String, Any}() for arg in kwargs options[string(arg[1])] = arg[2] end options end """ parse_results(cursorable, newdata::Dict, api_options, data_holder, cur_count) Internal function. parses Twitter API results by determining the type of data and organizing for cursorized processing. There are two methods, this one takes a DICT object, indicating a set of user IDs or search results. returns cursorable, newdata, api_options, cur_count. # Examples ```julia-repl julia> parse_results(cursorable, newdata::Dict, api_options, data_holder, cur_count) ... ``` """ function parse_results(cursorable, newdata::Dict, api_options, data_holder, cur_count) # Handle data type to cursor if haskey(newdata, "ids") newdata["ids"] = vcat(data_holder, newdata["ids"]) cur_count += length(newdata["ids"]) cursorable = (newdata["next_cursor"] != 0) & (cur_count < api_options["count"] ) api_options["cursor"] = newdata["next_cursor"] elseif haskey(newdata, "statuses") out = [Tweets(x) for x in newdata["statuses"]] newdata["statuses"] = vcat(data_holder, out) cur_count += length(newdata["statuses"]) cursorable = cur_count < api_options["count"] api_options["max_id"] = minimum(x.id for x in newdata["statuses"]) else cur_count += length(newdata) cursorable = cur_count < api_options["count"] end cursorable, newdata, api_options, cur_count end """ parse_results(cursorable, newdata::Array, api_options, data_holder, cur_count) Internal function. parses Twitter API results by determining the type of data and organizing appropriately. There are two methods, this one takes an ARRAY object, indicating a set of Tweets. returns cursorable, newdata, api_options, cur_count # Examples ```julia-repl julia> parse_results(cursorable, newdata::Array, api_options, data_holder, cur_count) ... ``` """ function parse_results(cursorable, newdata::Array, api_options, data_holder, cur_count) newdata = Tweets[Tweets(x) for x in newdata] length(newdata) == 0 && return false, data_holder, api_options, cur_count # tree of options for max_id or since id cur_count += length(newdata) cursorable = cur_count < api_options["count"] api_options["max_id"] = minimum([x.id for x in newdata])-1 # get min id newdata = vcat(data_holder, newdata) cursorable, newdata, api_options, cur_count end """ cursor(cursorable::Bool, newdata::Dict, options::Dict, endp::String, cur_count::Integer) Internal function method for gathering IDS. Takes a tuple and returns a tuple of equal size, calls the Twitter API until the desired count of records is recovered or until the API exhausts its limits. Note: when a DICT object is provided as the data, this function assumes you are gathering follower or friends ids. # Examples ```julia-repl julia> while cursorable & (length(newdata["ids"]) < min_records) cursorable, newdata, options, kwargs, endp = cursor(cursorable, newdata, options, kwargs, endp) end ``` """ ################# cursor when new data is a Dict object - like followers or friends IDS function cursor(cursorable::Bool, newdata::Dict, options::Dict, endp::String, cur_count::Integer) cursorable == false && return cursorable, newdata, options, endp, cur_count data_holder = haskey(newdata, "ids") ? newdata["ids"] : haskey(newdata, "statuses") ? newdata["statuses"] : [] # save existing ids api_options = copy(options) # the get_oauth overwrites options, so store the correct data here cur_alloc = reconnect("$endp") # start reconnect loop remaining_calls = cur_alloc["remaining"] @debug "$remaining_calls calls left on this endpoint." r = get_oauth("https://api.twitter.com/1.1/$endp", options) if r.status == 200 newdata = JSON.parse(String(r.body)) cursorable, newdata, api_options, cur_count = parse_results(cursorable, newdata, api_options, data_holder, cur_count) cursorable, newdata, api_options, endp, cur_count else error("Twitter API returned $(r.status) status") end end """ cursor(cursorable::Bool, newdata::Dict, options::Dict, endp::String, cur_count::Integer) Internal function for gathering . Takes a tuple and returns a tuple of equal size, calls the Twitter API until the desired count of records is recovered or until the API exhausts its limits. Note: when an ARRAY object is provided as the data, this function assumes you are gathering a tweet timeline. Note: when retrieving tweets, the API always starts with the most recent. Therefore, if you want a chunk of older tweets, you must specify both since_id, and max_id when cursoring. # Examples ```julia-repl julia> while cursorable & (length(newdata["ids"]) < count) cursorable, newdata, options, endp = cursor(cursorable, newdata, options, endp) end ``` """ function cursor(cursorable::Bool, newdata::Array, options::Dict, endp::String, cur_count::Integer) cursorable == false && return cursorable, newdata, options, endp, cur_count data_holder = copy(newdata) # save existing ids api_options = copy(options) # the get_oauth overwrites options, so store the correct data here cur_alloc = reconnect("$endp") # start reconnect loop remaining_calls = cur_alloc["remaining"] @debug "$remaining_calls calls left on this endpoint." r = get_oauth("https://api.twitter.com/1.1/$endp", options) if r.status == 200 # parse and put into proper type form newdata = JSON.parse(String(r.body)) cursorable, newdata, api_options, cur_count = parse_results(cursorable, newdata, api_options, data_holder, cur_count) cursorable, newdata, api_options, endp, cur_count else error("Twitter API returned $(r.status) status") end end ########### EXPORTED FUNCTIONS....... """ get_followers_ids(; kwargs...) Get a Dict object of follower ids from a particular Twitter user. This function will call the API as many times as allowed or until the desired `max_records` is reached, whichever comes first. # Examples ```julia-repl julia> get_followers_ids(screen_name = "jack", count = 10_000) Dict{String,Any} with 6 entries: "previous_cursor_str" => "0" ... ``` """ function get_followers_ids(; kwargs...) # Could be doing some pre-allocation here to optimize performance, # but since this is an API function that only deals with 25K records at most... endp = "followers/ids.json" options = parse_options(kwargs) if "count" ∈ keys(options) count = options["count"] else options["count"] = 1 count = 1 # default to one record end cur_count = 0 cursorable = true newdata = Dict{String,Any}() newdata["ids"] = [] #Array{String,1}[] while cursorable & (length(newdata["ids"]) < count) cursorable, newdata, options, endp, cur_count = cursor(cursorable, newdata, options, endp, cur_count) end newdata end """ get_friends_ids(; kwargs...) Get a Dict object of follower ids from a particular Twitter user. This function will call the API as until the desired `count` is reached or the API runs out, whichever comes first. # Examples ```julia-repl julia> get_friends_ids(screen_name = "barackobama", count = 1000) ``` """ function get_friends_ids(; kwargs...) # Could be doing some pre-allocation here to optimize performance, # but since this is an API function that only deals with 25K records at most... endp = "friends/ids.json" options = parse_options(kwargs) if "count" ∈ keys(options) count = options["count"] else options["count"] = 1 count = 1 # default to one record end cur_count = 0 cursorable = true newdata = Dict{String,Any}() newdata["ids"] = [] #Array{String,1}[] while cursorable & (length(newdata["ids"]) < count) cursorable, newdata, options, endp, cur_count = cursor(cursorable, newdata, options, endp, cur_count) end newdata end ########################## OTHER TYPE: """ get_mentions_timeline(; kwargs...) Get an array object of mentions for a particular Twitter user. This function will call the API until the desired `count` is reached or the API runs out, whichever comes first. # Examples ```julia-repl julia> get_mentions_timeline(screen_name = "twitter", count = 1000) ``` """ function get_mentions_timeline(; kwargs...) # Could be doing some pre-allocation here to optimize performance, # but since this is an API function that only deals with 25K records at most... endp = "statuses/mentions_timeline.json" options = parse_options(kwargs) if "count" ∈ keys(options) count = options["count"] else options["count"] = 1 count = 1 end cur_count = 0 # make the first call to the API cursorable = true newdata = Tweets[] while cursorable & (length(newdata) < count) cursorable, newdata, options, endp, cur_count = cursor(cursorable, newdata, options, endp, cur_count) end newdata end """ get_user_timeline(; kwargs...) Get an array object of timeline tweets from a particular Twitter user. This function will call the API until the desired `count` is reached or the API runs out, whichever comes first. # Examples ```julia-repl julia> get_user_timeline(screen_name = "twitter", count = 1000) ``` """ function get_user_timeline(; kwargs...) # Could be doing some pre-allocation here to optimize performance, # but since this is an API function that only deals with 25K records at most... endp = "statuses/user_timeline.json" options = parse_options(kwargs) if "count" ∈ keys(options) count = options["count"] else options["count"] = 1 count = 1 end cur_count = 0 # make the first call to the API cursorable = true newdata = Tweets[] while cursorable & (length(newdata) < count) cursorable, newdata, options, endp, cur_count = cursor(cursorable, newdata, options, endp, cur_count) end newdata end """ get_home_timeline(; kwargs...) Get an array object of timeline tweets from the owning user. This function will call the API until the desired `count` is reached or the API runs out, whichever comes first. # Examples ```julia-repl julia> get_home_timeline(count = 1000) ``` """ function get_home_timeline(; kwargs...) # Could be doing some pre-allocation here to optimize performance, # but since this is an API function that only deals with 25K records at most... endp = "statuses/home_timeline.json" options = parse_options(kwargs) if "count" ∈ keys(options) count = options["count"] else options["count"] = 1 count = 1 end cur_count = 0 # make the first call to the API cursorable = true newdata = Tweets[] while cursorable & (length(newdata) < count) cursorable, newdata, options, endp, cur_count = cursor(cursorable, newdata, options, endp, cur_count) end newdata end """ get_retweets_of_me(; kwargs...) Get an array object of retweets from the owning user. This function will call the API until the desired `count` is reached or the API runs out, whichever comes first. # Examples ```julia-repl julia> get_retweets_of_me(count = 1000) ``` """ function get_retweets_of_me(; kwargs...) # Could be doing some pre-allocation here to optimize performance, # but since this is an API function that only deals with 25K records at most... endp = "statuses/retweets_of_me.json" options = parse_options(kwargs) if "count" ∈ keys(options) count = options["count"] else options["count"] = 1 count = 1 end cur_count = 0 # make the first call to the API cursorable = true newdata = Tweets[] while cursorable & (length(newdata) < count) cursorable, newdata, options, endp, cur_count = cursor(cursorable, newdata, options, endp, cur_count) end newdata end """ get_retweets_of_me(; kwargs...) Get an array object of retweets from the owning user. This function will call the API until the desired `count` is reached or the API runs out, whichever comes first. # Examples ```julia-repl julia> get_retweets_of_me(count = 1000) ``` """ function get_search_tweets(; kwargs...) # Could be doing some pre-allocation here to optimize performance, # but since this is an API function that only deals with 25K records at most... endp = "search/tweets.json" options = parse_options(kwargs) if "count" ∈ keys(options) count = options["count"] else options["count"] = 1 count = 1 end cur_count = 0 # make the first call to the API cursorable = true newdata = Tweets[] while cursorable & (length(newdata) < count) cursorable, newdata, options, endp, cur_count = cursor(cursorable, newdata, options, endp, cur_count) end newdata end
{"hexsha": "9f5f4e34ff15fd5e28add2359b5cc43796388210", "size": 13336, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/cursoring.jl", "max_stars_repo_name": "alexpkeil1/Twitter.jl", "max_stars_repo_head_hexsha": "a2c257aae4b37ba7a37ca2cfe0cfce0071cb2ad4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 57, "max_stars_repo_stars_event_min_datetime": "2015-01-11T17:48:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-02T06:51:56.000Z", "max_issues_repo_path": "src/cursoring.jl", "max_issues_repo_name": "alexpkeil1/Twitter.jl", "max_issues_repo_head_hexsha": "a2c257aae4b37ba7a37ca2cfe0cfce0071cb2ad4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 37, "max_issues_repo_issues_event_min_datetime": "2015-02-02T18:58:29.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-03T02:31:09.000Z", "max_forks_repo_path": "src/cursoring.jl", "max_forks_repo_name": "alexpkeil1/Twitter.jl", "max_forks_repo_head_hexsha": "a2c257aae4b37ba7a37ca2cfe0cfce0071cb2ad4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 31, "max_forks_repo_forks_event_min_datetime": "2015-03-08T05:56:49.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-01T19:41:32.000Z", "avg_line_length": 34.5492227979, "max_line_length": 134, "alphanum_fraction": 0.6894121176, "num_tokens": 3377}
# In Pandas which is an open source BSD-licensed python library, easy to use data structures and data # analysis tools for the python PL # Pandas delase with three DS, Panel, Dataframe, series # In Pandas DataFrame, .head(n=5) return the first n rows # In Pandas DataFrame, .describe() generates descriptive statistics that summarize the central tendency, # dispersion, shape of a dataset's distribution, exluding NaN (Not a number) values. import numpy as np from keras.layers import Dense from keras.models import Sequential target = np.loadtxt('Datasets/hourly_wages.csv', dtype=float, delimiter=',', skiprows=1, usecols=0) predictors = np.loadtxt('Datasets/hourly_wages.csv', dtype=float, delimiter=',', skiprows=1, usecols=(1, 2, 3, 4, 5, 6, 7, 8, 9)) n_cols = predictors.shape[1] model = Sequential() # Add the first layer model.add(Dense(50, activation="relu", input_shape=(n_cols,))) # Add the second layer model.add(Dense(32, activation="relu")) # Add the output layer model.add(Dense(1)) # Compile the model model.compile(optimizer='adam', loss='mean_squared_error') # What is loss function of the method? # print("Loss Function: "+ model.loss) # By Printing model.loss u can access its loss function # Fitting the model model.fit(predictors, target, epochs=10)
{"hexsha": "fdde5abd13659df97f7f4a5e02887206a18ef59e", "size": 1348, "ext": "py", "lang": "Python", "max_stars_repo_path": "BuildModel-Keras.py", "max_stars_repo_name": "JoyeBright/Deep-Learning", "max_stars_repo_head_hexsha": "ba62cc8b3cbeeacc11b69f52999aac2bd0d7f018", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-04-07T12:00:33.000Z", "max_stars_repo_stars_event_max_datetime": "2018-04-30T13:57:22.000Z", "max_issues_repo_path": "BuildModel-Keras.py", "max_issues_repo_name": "JoyeBright/Deep-Learning", "max_issues_repo_head_hexsha": "ba62cc8b3cbeeacc11b69f52999aac2bd0d7f018", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "BuildModel-Keras.py", "max_forks_repo_name": "JoyeBright/Deep-Learning", "max_forks_repo_head_hexsha": "ba62cc8b3cbeeacc11b69f52999aac2bd0d7f018", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-04-07T12:00:35.000Z", "max_forks_repo_forks_event_max_datetime": "2019-02-06T08:01:49.000Z", "avg_line_length": 39.6470588235, "max_line_length": 104, "alphanum_fraction": 0.7121661721, "include": true, "reason": "import numpy", "num_tokens": 337}
import numpy as np from sitator.dynamics import JumpAnalysis from sitator.util import PBCCalculator from sitator.network.merging import MergeSites from sitator.util.mcl import markov_clustering import logging logger = logging.getLogger(__name__) class MergeSitesByDynamics(MergeSites): """Merges sites using dynamical data. Given a SiteTrajectory, merges sites using Markov Clustering. :param float distance_threshold: Don't merge sites further than this in real space. Zeros out the connectivity_matrix at distances greater than this; a hard, step function style cutoff. For a more gentle cutoff, try changing `connectivity_matrix_generator` to incorporate distance. :param float post_check_thresh_factor: Throw an error if proposed merge sites are further than this * distance_threshold away. Only a sanity check; not a hard guerantee. Can be `None`; defaults to `1.5`. Can be loosely thought of as how "normally distributed" the merge sites need to be, with larger values allowing more and more oblong point clouds. :param bool check_types: If True, only sites of the same type are candidates to be merged; if false, type information is ignored. Merged sites will only be assigned types if this is True. :param int iterlimit: Maximum number of Markov Clustering iterations to run before throwing an error. :param dict markov_parameters: Parameters for underlying Markov Clustering. Valid keys are ``'inflation'``, ``'expansion'``, and ``'pruning_threshold'``. """ def __init__(self, connectivity_matrix_generator = None, distance_threshold = 1.0, post_check_thresh_factor = 1.5, check_types = True, iterlimit = 100, markov_parameters = {}): super().__init__( maximum_merge_distance = post_check_thresh_factor * distance_threshold, check_types = check_types ) if connectivity_matrix_generator is None: connectivity_matrix_generator = MergeSitesByDynamics.connectivity_n_ij assert callable(connectivity_matrix_generator) self.connectivity_matrix_generator = connectivity_matrix_generator self.distance_threshold = distance_threshold self.post_check_thresh_factor = post_check_thresh_factor self.check_types = check_types self.iterlimit = iterlimit self.markov_parameters = markov_parameters # Connectivity Matrix Generation Schemes: @staticmethod def connectivity_n_ij(sn): """Basic default connectivity scheme: uses n_ij directly as connectivity matrix. Works well for systems with sufficient statistics. """ return sn.n_ij @staticmethod def connectivity_jump_lag_biased(jump_lag_coeff = 1.0, jump_lag_sigma = 20.0, jump_lag_cutoff = np.inf, distance_coeff = 0.5, distance_sigma = 1.0): """Bias the typical connectivity matrix p_ij with jump lag and distance contributions. The jump lag and distance are processed through Gaussian functions with the given sigmas (i.e. higher jump lag/larger distance => lower connectivity value). These matrixes are then added to p_ij, with a prefactor of ``jump_lag_coeff`` and ``distance_coeff``. Site pairs with jump lags greater than ``jump_lag_cutoff`` have their bias set to zero regardless of ``jump_lag_sigma``. Defaults to ``inf``. """ def cfunc(sn): jl = sn.jump_lag.copy() jl -= 1.0 # Center it around 1 since that's the minimum lag, 1 frame jl /= jump_lag_sigma np.square(jl, out = jl) jl *= -0.5 np.exp(jl, out = jl) # exp correctly takes the -infs to 0 jl[sn.jump_lag > jump_lag_cutoff] = 0. # Distance term pbccalc = PBCCalculator(sn.structure.cell) dists = pbccalc.pairwise_distances(sn.centers) dmat = dists.copy() # We want to strongly boost the similarity of *very* close sites dmat /= distance_sigma np.square(dmat, out = dmat) dmat *= -0.5 np.exp(dmat, out = dmat) return (sn.p_ij + jump_lag_coeff * jl) * (distance_coeff * dmat + (1 - distance_coeff)) return cfunc # Real methods def _get_sites_to_merge(self, st): # -- Compute jump statistics if not st.site_network.has_attribute('n_ij'): ja = JumpAnalysis() ja.run(st) pbcc = PBCCalculator(st.site_network.structure.cell) site_centers = st.site_network.centers # -- Build connectivity_matrix connectivity_matrix = self.connectivity_matrix_generator(st.site_network).copy() n_sites_before = st.site_network.n_sites assert n_sites_before == connectivity_matrix.shape[0] centers_before = st.site_network.centers # For diagnostic purposes no_diag_graph = connectivity_matrix.astype(dtype = np.float, copy = True) np.fill_diagonal(no_diag_graph, np.nan) # Rather arbitrary, but this is really just an alarm for if things # are really, really wrong edge_threshold = np.nanmean(no_diag_graph) + 3 * np.nanstd(no_diag_graph) n_alarming_ignored_edges = 0 # Apply distance threshold for i in range(n_sites_before): dists = pbcc.distances(centers_before[i], centers_before[i + 1:]) js_too_far = np.where(dists > self.distance_threshold)[0] js_too_far += i + 1 if np.any(connectivity_matrix[i, js_too_far] > edge_threshold) or \ np.any(connectivity_matrix[js_too_far, i] > edge_threshold): n_alarming_ignored_edges += 1 connectivity_matrix[i, js_too_far] = 0 connectivity_matrix[js_too_far, i] = 0 # Symmetry if n_alarming_ignored_edges > 0: logger.warning(" At least %i site pairs with high (z-score > 3) fluxes were over the given distance cutoff.\n" " This may or may not be a problem; but if `distance_threshold` is low, consider raising it." % n_alarming_ignored_edges) # -- Do Markov Clustering clusters = markov_clustering(connectivity_matrix, **self.markov_parameters) return clusters
{"hexsha": "798f42fb2f11f814b0c4df149cb89d6485826b30", "size": 6587, "ext": "py", "lang": "Python", "max_stars_repo_path": "sitator/dynamics/MergeSitesByDynamics.py", "max_stars_repo_name": "materials-DFT/sitator", "max_stars_repo_head_hexsha": "6755a71ccd975425b0f9e9df27585b618be3433a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sitator/dynamics/MergeSitesByDynamics.py", "max_issues_repo_name": "materials-DFT/sitator", "max_issues_repo_head_hexsha": "6755a71ccd975425b0f9e9df27585b618be3433a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sitator/dynamics/MergeSitesByDynamics.py", "max_forks_repo_name": "materials-DFT/sitator", "max_forks_repo_head_hexsha": "6755a71ccd975425b0f9e9df27585b618be3433a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.4967741935, "max_line_length": 149, "alphanum_fraction": 0.6505237589, "include": true, "reason": "import numpy", "num_tokens": 1452}
#!/usr/bin/env python # coding: utf-8 from keras.models import load_model from keras.preprocessing.image import img_to_array, load_img import sys from urllib.request import urlopen import numpy as np # Base values target_height = 180 target_width = 320 channels = 3 model = load_model('../models/human_not_human.h5') url = sys.argv[1] print(url) img = load_img(urlopen(url), target_size=(target_height, target_width)) x = img_to_array(img) x = x / 255.0 size = img.size channels=3 dataset = np.ndarray(shape=(1, size[1], size[0], channels),dtype=np.float32) dataset[0] = x result = model.predict(dataset) print(result[0][0])
{"hexsha": "635d143a7438a4212077ecb0635552e8a9db6933", "size": 634, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/guess.py", "max_stars_repo_name": "jeffisadams/human-nothuman", "max_stars_repo_head_hexsha": "c67eba2b5ad5882ca0989bb17175bb6fbca19db4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/guess.py", "max_issues_repo_name": "jeffisadams/human-nothuman", "max_issues_repo_head_hexsha": "c67eba2b5ad5882ca0989bb17175bb6fbca19db4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/guess.py", "max_forks_repo_name": "jeffisadams/human-nothuman", "max_forks_repo_head_hexsha": "c67eba2b5ad5882ca0989bb17175bb6fbca19db4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.2121212121, "max_line_length": 76, "alphanum_fraction": 0.7413249211, "include": true, "reason": "import numpy", "num_tokens": 176}
subroutine UpdateBladeVel(IFLG) use configr use blade use wake use wallsoln integer :: i,ygcErr real :: Point(3), dVel(3), dUdX ! Calculate the velocity induced on the blades by wake, wall, and freestream if (iflg .eq. 0) then ! re-initialize uiwake viwake wiwake as we are beginning a new time step uiwake(:)=0.0 viwake(:)=0.0 wiwake(:)=0.0 end if do I=1,NE ! If flag is set, just recompute the velocity contiribution due to the bound vorticies on the blades. ! Otherwise, calculate all wake, wall and freestream induced velocity. if (IFLG .eq. 0) then ! Calculate freestream velocity at blade elements CALL CalcFreestream(X(NT,I),Y(NT,I),Z(NT,I),UFSB(I),VFSB(I),WFSB(I),ygcErr) ! Set freestream velocity of next shed wake elements to that calculated on the blade UFS(NT,I)=UFSB(I) VFS(NT,I)=VFSB(I) WFS(NT,I)=WFSB(I) USUM=0.0 VSUM=0.0 WSUM=0.0 if (NT > 1) then ! Calculate wake velocity at blade elements (excluding bound vorticity component) Call BladeIndVel(NT,ntTerm,NBE,NB,NE,X(NT,I),Y(NT,I),Z(NT,I),USUM,VSUM,WSUM,dUdX,2,0) ! Calculate wall induced velocities at blade locations Point=[X(NT,I),Y(NT,I),Z(NT,I)] Call WallIndVel(Point,dVel) USUM=USUM+dVel(1) VSUM=VSUM+dVel(2) WSUM=WSUM+dVel(3) end if uiwake(I)=USUM viwake(I)=VSUM wiwake(I)=WSUM else USUM=uiwake(I) VSUM=viwake(I) WSUM=wiwake(I) end if ! CALCULATE THE VELOCITY CONTRIBUTIONS DUE TO JUST THE BOUND VORTICIES ON THE BLADES ( GS(NT,:) ) Call BladeIndVel(NT,ntTerm,NBE,NB,NE,X(NT,I),Y(NT,I),Z(NT,I),UP,VP,WP,dUdX,1,0) ! Set wake and wall velocities on blade UB(I)=USUM+UP VB(I)=VSUM+VP WB(I)=WSUM+WP ! Set induced velocity of next shed wake elements if (iut .eq. -2) then ! Fix wake velocities at freestream velocity U(NT,I)=0.0 V(NT,I)=0.0 W(NT,I)=0.0 else U(NT,I)=UB(I) V(NT,I)=VB(I) W(NT,I)=WB(I) end if end do return end subroutine UpdateBladeVel
{"hexsha": "017731898877eeffb0526968056bc64a83e031b1", "size": 2458, "ext": "f95", "lang": "FORTRAN", "max_stars_repo_path": "src/UpdateBladeVel.f95", "max_stars_repo_name": "ebranlard/CACTUS", "max_stars_repo_head_hexsha": "6d89b48759fe78d1890a77656bafdbd1e703bbb2", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2020-03-04T18:49:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T23:03:37.000Z", "max_issues_repo_path": "src/UpdateBladeVel.f95", "max_issues_repo_name": "ebranlard/CACTUS", "max_issues_repo_head_hexsha": "6d89b48759fe78d1890a77656bafdbd1e703bbb2", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 40, "max_issues_repo_issues_event_min_datetime": "2020-03-12T21:21:13.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-27T01:56:25.000Z", "max_forks_repo_path": "src/UpdateBladeVel.f95", "max_forks_repo_name": "ebranlard/CACTUS", "max_forks_repo_head_hexsha": "6d89b48759fe78d1890a77656bafdbd1e703bbb2", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2020-03-04T03:51:05.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T08:52:49.000Z", "avg_line_length": 28.2528735632, "max_line_length": 109, "alphanum_fraction": 0.5414971522, "num_tokens": 760}
/* * ==================================================================== * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * ==================================================================== */ #include <boost/test/unit_test.hpp> #include <codecvt> #include <cstdint> #include <locale> #include <random> #include <stdexcept> #include <string> #include <vector> #include "private/svn_utf_private.h" #include "../src/aprwrap.hpp" namespace { std::string to_utf8(const std::u32string& str) { static const int32_t endiancheck = 0xa5cbbc5a; static const bool arch_big_endian = (reinterpret_cast<const char*>(&endiancheck)[sizeof(endiancheck) - 1] == '\x5a'); apr::pool scratch_pool; const svn_string_t* utf8_string; auto err = svn_utf__utf32_to_utf8( &utf8_string, reinterpret_cast<const apr_int32_t*>(str.c_str()), str.size(), arch_big_endian, scratch_pool.get(), scratch_pool.get()); if (err) { svn_error_clear(err); throw std::range_error("bad unicode code point"); } return std::string(utf8_string->data, utf8_string->len); } template<typename C> struct codepoint; template<> struct codepoint<void> { using src_type = char32_t; static constexpr std::uint_least32_t min = 0; static constexpr std::uint_least32_t max = 0x10ffff; static constexpr std::uint_least32_t surrogate_min = 0xd800; static constexpr std::uint_least32_t surrogate_max = 0xdfff; }; template<> struct codepoint<char32_t> : public codepoint<void> { using dst_type = char32_t; static std::u32string convert(const std::u32string& str) { return str; }; }; template<> struct codepoint<char16_t> : public codepoint<void> { using dst_type = char16_t; static std::u16string convert(const std::u32string& str) { std::wstring_convert<std::codecvt_utf8_utf16<dst_type>, dst_type> u; return u.from_bytes(to_utf8(str)); } }; template<> struct codepoint<wchar_t> : public codepoint<void> { using dst_type = wchar_t; #ifdef WIN32 // Be conservative, use UCS-2 for wchar_t on Windows static_assert(sizeof(wchar_t) == sizeof(char16_t), "I thought we had 2-byte wide chars on Windows"); static constexpr std::uint_least32_t max = 0xffff; #endif static std::wstring convert(const std::u32string& str) { #ifdef WIN32 const auto from_utf8 = [](const std::string& sstr) { apr::pool scratch_pool; const wchar_t* result; auto err = svn_utf__win32_utf8_to_utf16( &result, sstr.c_str(), nullptr, scratch_pool.get()); if (err) { svn_error_clear(err); throw std::range_error("bad conversion to utf16"); } return std::wstring(result); } #else std::wstring_convert<std::codecvt_utf8<dst_type>, dst_type> u; const auto from_utf8 = [&u](const std::string& sstr) { return u.from_bytes(sstr); }; #endif return from_utf8(to_utf8(str)); } }; // Generate random strings. template<typename C> inline std::vector<std::basic_string<C>> generate_string_data(int count) { using cp = codepoint<C>; std::mt19937 mt{std::random_device()()}; std::uniform_int_distribution<> cgen{typename cp::src_type(cp::min), typename cp::src_type(cp::max)}; std::uniform_int_distribution<> lgen{7U, 31U}; std::vector<std::basic_string<C>> result; result.reserve(count); for (int i = 0; i < count; ++i) { const unsigned len = lgen(mt); std::u32string val; val.reserve(len); for (unsigned j = 0; j < len; ++j) { repeat: auto c = cgen(mt); if (uint_least32_t(c) >= cp::surrogate_min && uint_least32_t(c) <= cp::surrogate_max) goto repeat; val.push_back(c); } result.emplace_back(cp::convert(val)); } return result; } } // anonymous namespace #include "../src/private/strings_private.hpp" #include "fixture_init.hpp" namespace svn = ::apache::subversion::svnxx; namespace impl = ::apache::subversion::svnxx::impl; BOOST_AUTO_TEST_SUITE(strings, * boost::unit_test::fixture<init>()); BOOST_AUTO_TEST_CASE(wstring_conversion_roundtrip) { for (const auto& sample : generate_string_data<wchar_t>(100)) BOOST_TEST((sample == impl::convert<wchar_t>(impl::convert(sample)))); } BOOST_AUTO_TEST_CASE(u16string_conversion_roundtrip) { for (const auto& sample : generate_string_data<char16_t>(100)) BOOST_TEST((sample == impl::convert<char16_t>(impl::convert(sample)))); } BOOST_AUTO_TEST_CASE(u32string_conversion_roundtrip) { for (const auto& sample : generate_string_data<char32_t>(100)) BOOST_TEST((sample == impl::convert<char32_t>(impl::convert(sample)))); } BOOST_AUTO_TEST_CASE(nulchar) { const std::string nulstr("\0", 1); const std::wstring wnulstr(L"\0", 1); const std::u16string u16nulstr(u"\0", 1); const std::u32string u32nulstr(U"\0", 1); BOOST_TEST(nulstr.size() == 1); BOOST_TEST(wnulstr.size() == 1); BOOST_TEST(u16nulstr.size() == 1); BOOST_TEST(u32nulstr.size() == 1); BOOST_TEST(impl::convert<wchar_t>(nulstr).size() == 1); BOOST_TEST(impl::convert<char16_t>(nulstr).size() == 1); BOOST_TEST(impl::convert<char32_t>(nulstr).size() == 1); BOOST_TEST((impl::convert<wchar_t>(nulstr) == wnulstr)); BOOST_TEST((impl::convert<char16_t>(nulstr) == u16nulstr)); BOOST_TEST((impl::convert<char32_t>(nulstr) == u32nulstr)); BOOST_TEST(impl::convert(wnulstr).size() == 1); BOOST_TEST(impl::convert(u16nulstr).size() == 1); BOOST_TEST(impl::convert(u32nulstr).size() == 1); BOOST_TEST((impl::convert(wnulstr) == nulstr)); BOOST_TEST((impl::convert(u16nulstr) == nulstr)); BOOST_TEST((impl::convert(u32nulstr) == nulstr)); } BOOST_AUTO_TEST_SUITE_END();
{"hexsha": "87d054fcb965ee928cf57a90d75217aca6fb26b8", "size": 6767, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "subversion/bindings/cxx/tests/test_strings.cpp", "max_stars_repo_name": "timgates42/subversion", "max_stars_repo_head_hexsha": "0f088f530747140c6783c2eeb77ceff8e8613c42", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2017-01-03T03:20:56.000Z", "max_stars_repo_stars_event_max_datetime": "2018-12-24T22:05:09.000Z", "max_issues_repo_path": "subversion/bindings/cxx/tests/test_strings.cpp", "max_issues_repo_name": "timgates42/subversion", "max_issues_repo_head_hexsha": "0f088f530747140c6783c2eeb77ceff8e8613c42", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3.0, "max_issues_repo_issues_event_min_datetime": "2016-06-12T17:02:25.000Z", "max_issues_repo_issues_event_max_datetime": "2019-02-03T11:08:18.000Z", "max_forks_repo_path": "subversion/bindings/cxx/tests/test_strings.cpp", "max_forks_repo_name": "timgates42/subversion", "max_forks_repo_head_hexsha": "0f088f530747140c6783c2eeb77ceff8e8613c42", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2017-01-21T00:15:13.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-04T07:23:50.000Z", "avg_line_length": 30.899543379, "max_line_length": 85, "alphanum_fraction": 0.645780996, "num_tokens": 1743}
import unittest import numpy as np import pandas as pd from apollon.tools import time_stamp from comsar.tracks import TimbreTrack class TestTimbreTrack(unittest.TestCase): def setUp(self): self.track = TimbreTrack() def test_nfeatures(self): self.assertIsInstance(self.track.n_features, int)
{"hexsha": "9dabb3279c757efac6a1a761f984e3a040bccfd6", "size": 320, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/tracks/test_timbre.py", "max_stars_repo_name": "ifsm/comsar", "max_stars_repo_head_hexsha": "aeb45d03409e223ff417d8d9345e7b128fc3a3af", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/tracks/test_timbre.py", "max_issues_repo_name": "ifsm/comsar", "max_issues_repo_head_hexsha": "aeb45d03409e223ff417d8d9345e7b128fc3a3af", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/tracks/test_timbre.py", "max_forks_repo_name": "ifsm/comsar", "max_forks_repo_head_hexsha": "aeb45d03409e223ff417d8d9345e7b128fc3a3af", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.3333333333, "max_line_length": 57, "alphanum_fraction": 0.753125, "include": true, "reason": "import numpy", "num_tokens": 69}
import numpy as np from spn.algorithms.Inference import EPSILON, add_node_likelihood from spn.structure.leaves.spmnLeaves.SPMNLeaf import Utility from spn.structure.leaves.histogram.Inference import histogram_likelihood def utility_value(node, data=None, dtype=np.float64): uVal = np.ones((data.shape[0], 1), dtype=dtype) nd = data[:, node.scope[0]] marg_ids = np.isnan(nd) uVal[~marg_ids] = nd[~marg_ids].reshape((-1,1)) uVal[uVal < EPSILON] = EPSILON return uVal def add_utility_inference_support(): add_node_likelihood(Utility, histogram_likelihood)
{"hexsha": "33e46ddc5f47ba00d0ce72c8e946d69f0df3fc82", "size": 591, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/spn/structure/leaves/spmnLeaves/Inference.py", "max_stars_repo_name": "radum2275/SPFlow", "max_stars_repo_head_hexsha": "4ba05aef644b66fc8621991c78e426cef408b985", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-05-27T02:07:26.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-27T02:07:26.000Z", "max_issues_repo_path": "src/spn/structure/leaves/spmnLeaves/Inference.py", "max_issues_repo_name": "radum2275/SPFlow", "max_issues_repo_head_hexsha": "4ba05aef644b66fc8621991c78e426cef408b985", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/spn/structure/leaves/spmnLeaves/Inference.py", "max_forks_repo_name": "radum2275/SPFlow", "max_forks_repo_head_hexsha": "4ba05aef644b66fc8621991c78e426cef408b985", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-08-09T15:42:25.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-11T10:44:10.000Z", "avg_line_length": 24.625, "max_line_length": 73, "alphanum_fraction": 0.7428087986, "include": true, "reason": "import numpy", "num_tokens": 161}
[STATEMENT] lemma rev_nth_snoc: \<open>(xs @ [x]) !. Suc v = Some y \<Longrightarrow> xs !. v = Some y\<close> [PROOF STATE] proof (prove) goal (1 subgoal): 1. (xs @ [x]) !. Suc v = Some y \<Longrightarrow> xs !. v = Some y [PROOF STEP] by (induct xs) auto
{"llama_tokens": 106, "file": "Hybrid_Logic_Hybrid_Logic", "length": 1}
/* ****************************************************************** ** ** OpenSees - Open System for Earthquake Engineering Simulation ** ** Pacific Earthquake Engineering Research Center ** ** ** ** ** ** (C) Copyright 1999, The Regents of the University of California ** ** All Rights Reserved. ** ** ** ** Commercial use of this program without express permission of the ** ** University of California, Berkeley, is strictly prohibited. See ** ** file 'COPYRIGHT' in main directory for information on usage and ** ** redistribution, and for a DISCLAIMER OF ALL WARRANTIES. ** ** ** ** Developed by: ** ** Frank McKenna (fmckenna@ce.berkeley.edu) ** ** Gregory L. Fenves (fenves@ce.berkeley.edu) ** ** Filip C. Filippou (filippou@ce.berkeley.edu) ** ** ** ** ****************************************************************** */ // $Revision: 1.1.1.1 $ // $Date: 2000-09-15 08:23:16 $ // $Source: /usr/local/cvs/OpenSees/SRC/analysis/algorithm/eigenAlgo/EigenAlgorithm.cpp,v $ // File: ~/analysis/algorithm/eigenAlgo/EigenAlgorithm.C // // Written: Jun Peng // Created: Wed Jan 27, 1999 // Revision: A // // Description: This file contains the class definition of EigenAlgorithm. // EigenAlgorithm is a class which performs a eigen solution algorithm // to solve the equations. // // This class is inheritanted from the base class of SolutionAlgorithm // which was created by fmk (Frank). #include <EigenAlgorithm.h> #include <AnalysisModel.h> #include <EigenIntegrator.h> #include <EigenSOE.h> EigenAlgorithm::EigenAlgorithm(int classTag) :SolutionAlgorithm(classTag), theModel(0), theIntegrator(0), theSOE(0) { // need do nothing here. } EigenAlgorithm::~EigenAlgorithm() { // do nothing here. } void EigenAlgorithm::setLinks(AnalysisModel &theNewModel, EigenIntegrator &theNewIntegrator, EigenSOE &theNewSOE) { theModel = &theNewModel; theIntegrator = &theNewIntegrator; theSOE = &theNewSOE; } AnalysisModel * EigenAlgorithm::getAnalysisModelPtr() const { return theModel; } EigenIntegrator * EigenAlgorithm::getEigenIntegratorPtr() const { return theIntegrator; } EigenSOE * EigenAlgorithm::getEigenSOEptr() const { return theSOE; }
{"hexsha": "e1b08c2866cd678bb79db3f3966f8fe279315fba", "size": 2971, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "OpenSees/SRC/analysis/algorithm/eigenAlgo/EigenAlgorithm.cpp", "max_stars_repo_name": "kuanshi/ductile-fracture", "max_stars_repo_head_hexsha": "ccb350564df54f5c5ec3a079100effe261b46650", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8.0, "max_stars_repo_stars_event_min_datetime": "2019-03-05T16:25:10.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-17T14:12:03.000Z", "max_issues_repo_path": "SRC/analysis/algorithm/eigenAlgo/EigenAlgorithm.cpp", "max_issues_repo_name": "steva44/OpenSees", "max_issues_repo_head_hexsha": "417c3be117992a108c6bbbcf5c9b63806b9362ab", "max_issues_repo_licenses": ["TCL"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "SRC/analysis/algorithm/eigenAlgo/EigenAlgorithm.cpp", "max_forks_repo_name": "steva44/OpenSees", "max_forks_repo_head_hexsha": "417c3be117992a108c6bbbcf5c9b63806b9362ab", "max_forks_repo_licenses": ["TCL"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2019-09-21T03:11:11.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-19T07:29:37.000Z", "avg_line_length": 34.5465116279, "max_line_length": 91, "alphanum_fraction": 0.4971390104, "num_tokens": 577}
import pandas as pd import numpy as np from tqdm import tqdm import argparse from datetime import datetime parser = argparse.ArgumentParser() parser.add_argument("--data", default='../data_cleaned/time_evolution_10_levels_natural.csv', \ help="filename.", type=str) parser.add_argument("--maxlevel", default=10, help="Maximum level of the book to study", type=int) parser.add_argument("--time_delta", default=1, help="Time delta in minutes", type=float) parser.add_argument("--acquisition_day", default='2020-04-06', \ help="First day of data acquisition in format YYYY-MM-DD", type=str) def main(data, maxlevel, time_delta, acquisition_day, tick_size=1e-4): """ Computes the order flow imbalance for each level of the book and computes the corresponding evolution of mid price. args: data: path to the dataframe containing the data, obtained by running 'deltas_computation.py' maxlevel: maximum level of the book to study time_delta: time delta in seconds to discretize time acquisition_day: first day of data acquisition in format YYYY-MM-DD """ date_acquisition = int(datetime.fromisoformat(acquisition_day).timestamp()) df = pd.read_csv(data) df = df.groupby('time', sort=False).agg(np.mean) df['time'] = df.index df.index = range(len(df)) #clean datetime according to the date of acquisition df.drop (df[df['time']<date_acquisition].index, axis=0, inplace=True) df = df.sort_values(['time'], ignore_index=True) # clean meaningless datetime, being sure that the last elment has the right length df.drop (df[[len(str((df['time'][i])))<len(str(list(df['time'])[-1]))\ for i in range(len(df))]].index, axis=0, inplace=True) conversion = 1e9 df['time_isoformat'] = df['time'].apply(lambda x: datetime.fromtimestamp(x/conversion)) #rescaling prices for i in range(0, maxlevel): df['ask_price_{}'.format(i)] = df['ask_price_{}'.format(i)]*tick_size df['bid_price_{}'.format(i)] = df['bid_price_{}'.format(i)]*tick_size df['mid_price'] = df['mid_price']*tick_size #computing the quantities delta V delta D for i in range(maxlevel): print('level {}'.format(i)) check_bid_prices = np.diff (df['bid_price_{}'.format(i)]) check_ask_prices = np.diff (df['ask_price_{}'.format(i)]) delta_W = [0] delta_V = [0] j = 1 # for future improvement: this process can be optimized by employing .diff and masks with tqdm(total=len(df)) as pbar: for bcheck, acheck in zip(check_bid_prices, check_ask_prices): if bcheck > 0: delta_W.append(np.array(df['bid_volume_{}'.format(i)])[j]) elif bcheck == 0: delta_W.append(np.array(df['bid_volume_{}'.format(i)])[j] - \ np.array(df['bid_volume_{}'.format(i)])[j-1]) else: delta_W.append(- np.array(df['bid_volume_{}'.format(i)])[j-1]) if acheck < 0: delta_V.append(np.array(df['ask_volume_{}'.format(i)])[j]) elif acheck == 0: delta_V.append(np.array(df['ask_volume_{}'.format(i)])[j] - \ np.array(df['ask_volume_{}'.format(i)])[j-1]) else: delta_V.append(np.array(-df['ask_volume_{}'.format(i)])[j-1]) j+=1 pbar.update(1) df['delta_W_{}'.format(i)] = delta_W df['delta_V_{}'.format(i)] = delta_V df['e_{}'.format(i)] = df['delta_W_{}'.format(i)]-df['delta_V_{}'.format(i)] #drop row without a previous item, (does not make sense in .diff) df.index = range(len(df)) df.drop ([0], axis=0, inplace=True) df.to_csv('../data_cleaned/time_evolution_{}_levels_processed.csv'.format(maxlevel),index=False) # discretizig time bin_edges = [] df = df.sort_values(['time_isoformat'], ignore_index=True) t = df['time_isoformat'].iloc[0] from datetime import timedelta time_delta = timedelta(minutes=time_delta) while t <= df['time_isoformat'].iloc[-1] + time_delta: bin_edges.append(t) t = t + time_delta df['time_bin'] = pd.cut(df['time_isoformat'], bin_edges) df = df.dropna() df.index = range(len(df)) # computing mid_price_delta bins = df['time_bin'].unique() mid_price_delta = [] ofi = pd.DataFrame(bins, columns=['time_bin']) ofi['bin_label'] = np.arange(len(bins)) for l in range(maxlevel): ofi['OFI_{}'.format(l)] = np.zeros(len(ofi)) index = 0 for b in bins: grouped = df[df['time_bin']==b] mid_price_delta.append(grouped['mid_price'].iloc[-1] - grouped['mid_price'].iloc[0]) for l in range(maxlevel): ofi.iloc[index, l+2]=grouped['e_{}'.format(l)].sum() index += 1 ofi['mid_price_delta'] = mid_price_delta ofi.to_csv('../data_cleaned/ofi_{}_levels.csv'.format(maxlevel), index=False) if __name__ == "__main__": args = vars(parser.parse_args()) main(**args)
{"hexsha": "e98735b7ac0a6c46066d22750f67b6fbab5d1088", "size": 5176, "ext": "py", "lang": "Python", "max_stars_repo_path": "order_flow_imbalance/ofi_computation.py", "max_stars_repo_name": "nicolezattarin/LOB-feature-analysis", "max_stars_repo_head_hexsha": "c73735d887c146a85f24267de7789d689a6c4311", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-17T14:58:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T14:58:37.000Z", "max_issues_repo_path": "order_flow_imbalance/ofi_computation.py", "max_issues_repo_name": "nicolezattarin/LOB-feature-analysis", "max_issues_repo_head_hexsha": "c73735d887c146a85f24267de7789d689a6c4311", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "order_flow_imbalance/ofi_computation.py", "max_forks_repo_name": "nicolezattarin/LOB-feature-analysis", "max_forks_repo_head_hexsha": "c73735d887c146a85f24267de7789d689a6c4311", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-21T06:00:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T06:00:09.000Z", "avg_line_length": 42.081300813, "max_line_length": 100, "alphanum_fraction": 0.611862442, "include": true, "reason": "import numpy", "num_tokens": 1316}
import speech_recognition as sr from tkinter import * from tkinter import ttk from tkinter import filedialog import threading import time import os import numpy as np import librosa.display import copy from sklearn.externals import joblib from winsound import * from numpy import array, zeros, argmin, inf, ndim from scipy.spatial.distance import cdist import json import sounddevice as sd import soundfile as sf from pydub import AudioSegment from pydub.silence import split_on_silence import os from os import listdir from os.path import isfile, join ed = [] with open('eng_dict.json') as data_file: eng_dict = json.load(data_file) for i in eng_dict: ed.append(i) filename = 'hin_dict' hin_dict = joblib.load(filename) ###DTW def dtw(x, y, dist, warp=1): """ Computes Dynamic Time Warping (DTW) of two sequences. :param array x: N1*M array :param array y: N2*M array :param func dist: distance used as cost measure :param int warp: how many shifts are computed. Returns the minimum distance, the cost matrix, the accumulated cost matrix, and the wrap path. """ assert len(x) assert len(y) r, c = len(x), len(y) D0 = zeros((r + 1, c + 1)) D0[0, 1:] = inf D0[1:, 0] = inf D1 = D0[1:, 1:] # view for i in range(r): for j in range(c): D1[i, j] = dist(x[i], y[j]) C = D1.copy() for i in range(r): for j in range(c): min_list = [D0[i, j]] for k in range(1, warp + 1): i_k = min(i + k, r - 1) j_k = min(j + k, c - 1) min_list += [D0[i_k, j], D0[i, j_k]] D1[i, j] += min(min_list) if len(x)==1: path = zeros(len(y)), range(len(y)) elif len(y) == 1: path = range(len(x)), zeros(len(x)) else: path = _traceback(D0) return D1[-1, -1] / sum(D1.shape), C, D1, path def accelerated_dtw(x, y, dist, warp=1): """ Computes Dynamic Time Warping (DTW) of two sequences in a faster way. Instead of iterating through each element and calculating each distance, this uses the cdist function from scipy (https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html) :param array x: N1*M array :param array y: N2*M array :param string or func dist: distance parameter for cdist. When string is given, cdist uses optimized functions for the distance metrics. If a string is passed, the distance function can be 'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation', 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'wminkowski', 'yule'. :param int warp: how many shifts are computed. Returns the minimum distance, the cost matrix, the accumulated cost matrix, and the wrap path. """ assert len(x) assert len(y) if ndim(x) == 1: x = x.reshape(-1, 1) if ndim(y) == 1: y = y.reshape(-1, 1) r, c = len(x), len(y) D0 = zeros((r + 1, c + 1)) D0[0, 1:] = inf D0[1:, 0] = inf D1 = D0[1:, 1:] D0[1:, 1:] = cdist(x, y, dist) C = D1.copy() for i in range(r): for j in range(c): min_list = [D0[i, j]] for k in range(1, warp + 1): min_list += [D0[min(i + k, r - 1), j], D0[i, min(j + k, c - 1)]] D1[i, j] += min(min_list) if len(x) == 1: path = zeros(len(y)), range(len(y)) elif len(y) == 1: path = range(len(x)), zeros(len(x)) else: path = _traceback(D0) return D1[-1, -1] / sum(D1.shape), C, D1, path def _traceback(D): i, j = array(D.shape) - 2 p, q = [i], [j] while (i > 0) or (j > 0): tb = argmin((D[i, j], D[i, j+1], D[i+1, j])) if tb == 0: i -= 1 j -= 1 elif tb == 1: i -= 1 else: # (tb == 2): j -= 1 p.insert(0, i) q.insert(0, j) return array(p), array(q) ###DTW-End reply = 0 entries = {} textbs = {} textpops = "" test_files = [] current_test = "" cg_dirname = "" flag_audio_pop = 0 def language_selection_window(a): global reply e = entries["mic"] reply = int(e.get()) # print(reply) def enghin(a): # global entries # print(entries) # e = entries["mic"] # rep = int(e.get()) # print(rep) global reply rep = reply # print(rep) mic_list = sr.Microphone.list_microphone_names() j = 1 mic_name = "" sample_rate = 48000 chunk_size = 2048 for i, microphone_name in enumerate(mic_list): # print(j,microphone_name) if j == rep: mic_name = microphone_name # print("MIC",mic_name) j += 1 r = sr.Recognizer() mic_list = sr.Microphone.list_microphone_names() for i, microphone_name in enumerate(mic_list): if microphone_name == mic_name: device_id = i # print("HELL",device_id) def exitf(a): root.destroy() def status_popup(): global textpops savp = Tk() savp.iconbitmap('wait.ico') savp.wm_title("Recognition in progress...") # Label(savp, text="Please wait...").grid(row=1, column=0, sticky="ew") prog = Text(savp, height=10, width=40, bd=5, font=("Times", 20)) prog.grid(row=2, columnspan=3, sticky="ew") # print("txtpps - ", textpops) prog.insert(INSERT, " Recognition in progress, Please wait! \n") prog.insert(INSERT, " Loading! \n") start = time.time() while not textpops: if (time.time() - start) > 5: break prog.insert(INSERT, ".") savp.update_idletasks() savp.update() textpops = "" def speakeng(a): with sr.Microphone(device_index=device_id, sample_rate=sample_rate, chunk_size=chunk_size) as source: # Adjusting noise level r.adjust_for_ambient_noise(source) audio = r.listen(source) global textpops t1 = threading.Thread(target=status_popup) t1.start() try: text = r.recognize_google(audio, language='en-IN') textpops = text # print("Speakeng - ",textpops) text = text + "\n" eng.insert(INSERT, text) except sr.UnknownValueError: text = "\n---\nGoogle Speech Recognition could not understand audio\n---\n" eng.insert(INSERT, text) except sr.RequestError as e: eng.insert(INSERT, "---") eng.insert(INSERT, "Could not request results from Google Speech Recognition service; {0}".format(e)) eng.insert(INSERT, "---") t1.join() # print("\nt1 still alive - ", t1.is_alive()) def speakhin(a): with sr.Microphone(device_index=device_id, sample_rate=sample_rate, chunk_size=chunk_size) as source: # Adjusting noise level r.adjust_for_ambient_noise(source) audio = r.listen(source) global textpops t1 = threading.Thread(target=status_popup) t1.start() try: text = r.recognize_google(audio, language='hi-IN') textpops = text # print("Speakhin - ", textpops) text = text + "\n" hin.insert(INSERT, text) except sr.UnknownValueError: text = "\n---\nGoogle Speech Recognition could not understand audio\n---\n" hin.insert(INSERT, text) except sr.RequestError as e: hin.insert(INSERT, "---") hin.insert(INSERT, "Could not request results from Google Speech Recognition service; {0}".format(e)) hin.insert(INSERT, "---") t1.join() # print("\nt1 still alive - ", t1.is_alive()) def cleareng(a): eng.delete(1.0, END) def clearhin(a): hin.delete(1.0, END) def saveeng(a): location = "" def browse(a): x = filedialog.askdirectory() e = entries["save_file_location"] e.insert(0, x) location = x def savv(a): e = entries["save_file_location"] location = str(e.get()) e = entries["save_file_name"] name = str(e.get()) input = eng.get("1.0", 'end-1c') # print(name) loc = location + "/" + name + ".txt" # print("\nFinal loc\n", loc) f = open(loc, 'w') f.write(input) f.close() sav.destroy() sav = Tk() sav.iconbitmap('save.ico') sav.wm_title("Save English Transcript") Label(sav, text="Enter the file name you want: ").grid(row=0, column=0, sticky=W) e = Entry(sav, width=50) e.grid(row=1, columnspan=2, sticky="ew") entries["save_file_name"] = e Label(sav, text="Choose the location to save at: ").grid(row=2, column=0, sticky=W) folentry = Entry(sav, width=77) folentry.grid(row=3, column=0, sticky="ew") entries["save_file_location"] = folentry ch = Button(sav, text="Browse") ch.bind("<Button-1>", browse) ch.grid(row=3, column=1, sticky="ew") ttk.Separator(sav).grid(row=4, pady=2, padx=2, columnspan=3, sticky="ew") ent = Button(sav, text="Save", width=11) ent.bind("<Button-1>", savv) ent.grid(row=5, column=1, sticky="ew") sav.mainloop() def savehin(a): location = "" def browse(a): x = filedialog.askdirectory() e = entries["save_file_location"] e.insert(0, x) location = x def savv(a): e = entries["save_file_location"] location = str(e.get()) e = entries["save_file_name"] name = str(e.get()) input = hin.get("1.0", 'end-1c') # print(name) loc = location + "/" + name + ".txt" # print("\nFinal loc\n", loc) f = open(loc, 'w', encoding="utf-8") f.write(input) f.close() sav.destroy() sav = Tk() sav.iconbitmap('save.ico') sav.wm_title("Save Hindi Transcript") Label(sav, text="Enter the file name you want: ").grid(row=0, column=0, sticky=W) e = Entry(sav, width=50) e.grid(row=1, columnspan=2, sticky="ew") entries["save_file_name"] = e Label(sav, text="Choose the location to save at: ").grid(row=2, column=0, sticky=W) folentry = Entry(sav, width=77) folentry.grid(row=3, column=0, sticky="ew") entries["save_file_location"] = folentry ch = Button(sav, text="Browse") ch.bind("<Button-1>", browse) ch.grid(row=3, column=1, sticky="ew") ttk.Separator(sav).grid(row=4, pady=2, padx=2, columnspan=3, sticky="ew") ent = Button(sav, text="Save", width=11) ent.bind("<Button-1>", savv) ent.grid(row=5, column=1, sticky="ew") sav.mainloop() win.destroy() root = Tk() root.iconbitmap('icon.ico') root.title("English and Hindi Voice Typing Editor") Label(root, text="English Speech to text:").grid(row=0, column=0, sticky=W) eng = Text(root, height=12, width=72, bd=5, font=("Times", 12)) eng.grid(row=3, columnspan=3) se = Button(root, text="Speak English", width=11) se.bind("<Button-1>", speakeng) se.grid(row=6, column=0) es = Button(root, text="Clear English", width=11) es.bind("<Button-1>", cleareng) es.grid(row=6, column=1) ce = Button(root, text="Save English", width=11) ce.bind("<Button-1>", saveeng) ce.grid(row=6, column=2) Label(root, text="Hindi Speech to text:").grid(row=7, column=0, sticky=W) hin = Text(root, height=12, width=72, bd=5, font=("Times", 12)) hin.grid(row=10, columnspan=3) sh = Button(root, text="Speak Hindi", width=11) sh.bind("<Button-1>", speakhin) sh.grid(row=13, column=0) hs = Button(root, text="Clear Hindi", width=11) hs.bind("<Button-1>", clearhin) hs.grid(row=13, column=1) ch = Button(root, text="Save Hindi", width=11) ch.bind("<Button-1>", savehin) ch.grid(row=13, column=2) ttk.Separator(root).grid(row=14, pady=2, padx=2, columnspan=3, sticky="ew") ex = Button(root, text="Exit", width=11) ex.bind("<Button-1>", exitf) ex.grid(row=16, columnspan=3, sticky="ew") root.mainloop() def exitwin(a): win.destroy() def test_folder(a): def cg(a): mfcc_arr = joblib.load('Training_mfcc_arr.pkl') y = joblib.load('Training_y.pkl') def exitcg(a): cgroot.destroy() def preprocess_mfcc(mfcc): mfcc_cp = copy.deepcopy(mfcc) for i in range(mfcc.shape[1]): mfcc_cp[:, i] = mfcc[:, i] - np.mean(mfcc[:, i]) mfcc_cp[:, i] = mfcc_cp[:, i] / np.max(np.abs(mfcc_cp[:, i])) return mfcc_cp def audio_popup(): def play(a): file = cg_dirname + "/" + current_test PlaySound(file, SND_FILENAME | SND_ASYNC) def exi(a): global flag_audio_pop flag_audio_pop = 1 savp.destroy() savp = Tk() savp.iconbitmap('audio.ico') savp.wm_title("Audio Player") Label(savp, text="Click on Play to play the following Audio file:\n" + current_test + "\nClick on Exit to close this window.").grid( row=1, column=0, sticky="ew") se = Button(savp, text="Play", width=11) se.bind("<Button-1>", play) se.grid(row=2, column=0) es = Button(savp, text="Exit", width=11) es.bind("<Button-1>", exi) es.grid(row=2, column=1) savp.mainloop() def recognize_mic(a): fs = 44100 duration = 5 # seconds myrecording = sd.rec(duration * fs, samplerate=fs, channels=2, dtype='float64') print("Recording Audio") sd.wait() print("Audio recording complete , Play Audio") sf.write("temp.wav", myrecording, fs) sd.wait() print("Play Audio Complete") AudioSegment.ffmpeg = "C://ffmpeg//bin" cwd = os.getcwd() loc = cwd + "\\" + "temp.wav" sound_file = AudioSegment.from_wav(loc) audio_chunks = split_on_silence(sound_file, # must be silent for at least half a second min_silence_len=250, # consider it silent if quieter than -16 dBF silence_thresh=-38 ) print("Hello") for i, chunk in enumerate(audio_chunks): out_file = cwd + "\\" + "temp\\temp_{0}.wav".format(i) print(i) if i < 10: out_file = cwd + "\\" + "temp\\temp_0{0}.wav".format(i) print("exporting", out_file) chunk.export(out_file, format="wav") foldname = cwd + "\\" + "temp" onlyfiles = [f for f in listdir(foldname) if isfile(join(foldname, f))] answer = "" for i in onlyfiles: # start = time.perf_counter() yTest, srTest = librosa.load(foldname + "/" + i) mfccTest = librosa.feature.mfcc(yTest, srTest) mfccTest = preprocess_mfcc(mfccTest) dists = [] for i in range(len(mfcc_arr)): mfcci = mfcc_arr[i] disti = dtw(mfcci.T, mfccTest.T, dist=lambda x, y: np.exp(np.linalg.norm(x - y, ord=1)))[0] dists.append(disti) # plt.plot(dists) min_dist = min(dists) min_dist_index = dists.index(min_dist) pre = int(y[min_dist_index]) output = hin_dict[pre] answer = answer + " " + output mi.insert(INSERT, answer) def recognize_all(a): start = time.perf_counter() dirname = cg_dirname files = test_files Test_Result = [] Reult_indices = [] for j in range(len(files)): start1 = time.perf_counter() yTest, srTest = librosa.load(dirname + "/" + files[j]) mfccTest = librosa.feature.mfcc(yTest, srTest) mfccTest = preprocess_mfcc(mfccTest) dists = [] for i in range(len(mfcc_arr)): mfcci = mfcc_arr[i] disti = dtw(mfcci.T, mfccTest.T, dist=lambda x, y: np.exp(np.linalg.norm(x - y, ord=1)))[0] dists.append(disti) min_dist = min(dists) min_dist_index = dists.index(min_dist) pre = int(y[min_dist_index]) output = hin_dict[pre] tt = time.perf_counter() - start1 output = "Input File : " + current_test + ".\nThe spoken word is : " + output + ".\nTime taken for Recognition : " + str(tt) + "\n" micl.insert(INSERT, output) Test_Result.append(hin_dict[pre]) Reult_indices.append(pre) # print(hin_dict[pre]) tt = time.perf_counter() - start output = "\nTotal Time taken for Recognizing "+str(len(test_files))+" Testing files : " +str(tt) + "\n" micl.insert(INSERT, output) #Accuracy j=0 correct = 0 total_files = len(test_files) #Precision precisions = np.array([0]*58) num = [0] * 58 den = [0] * 58 for i in range(len(Test_Result)): den[Reult_indices[i]] += 1 lis = list(files[i].split('_')) # print(eng_dict) index = ed.index(str(lis[0])) # print(index) if Reult_indices[i] == index: num[Reult_indices[i]] += 1 # print("Precisions word-wise:") for i in range(58): try: precisions[i] = (num[i] / den[i]) * 100 except: precisions[i] = -1 pass prc = np.array(precisions) np.save("precisions",prc) for i in test_files: lis = list(i.split('_')) index = ed.index(str(lis[0])) true_value = hin_dict[index] if Test_Result[j]==true_value: correct+=1 j+=1 accuracy = (correct/total_files)*100 output = "\nAccuracy of the complete Recognition : " + str(correct) + " out of " + str(total_files) + ".\nAccuracy percentage : "+str(accuracy)+"\n" anarray = [0,0] anarray = np.array(anarray) np.save("accuracy",anarray) micl.insert(INSERT, output) def selected_from_dd(*args): global current_test current_test = tkvar.get() t1 = threading.Thread(target=audio_popup) t1.start() start = time.perf_counter() yTest, srTest = librosa.load(cg_dirname + "/" + current_test) mfccTest = librosa.feature.mfcc(yTest, srTest) mfccTest = preprocess_mfcc(mfccTest) dists = [] for i in range(len(mfcc_arr)): mfcci = mfcc_arr[i] disti = dtw(mfcci.T, mfccTest.T, dist=lambda x, y: np.exp(np.linalg.norm(x - y, ord=1)))[0] dists.append(disti) # plt.plot(dists) min_dist = min(dists) min_dist_index = dists.index(min_dist) pre = int(y[min_dist_index]) output = hin_dict[pre] tt = time.perf_counter()-start output = "Input File : "+str(current_test)+".\nThe spoken word is : "+str(output)+".\nTime taken for Recognition : "+str(tt)+"\n" sop.insert(INSERT, output) global flag_audio_pop if flag_audio_pop == 1: t1.join() flag_audio_pop = 0 fol.destroy() cgroot = Tk() cgroot.iconbitmap('icon.ico') tkvar = StringVar(cgroot) cgroot.title("Chhattisgarhi Small Vocabulary Speech Recognition") drop_down_menu = OptionMenu(cgroot, tkvar, *test_files) Label(cgroot, text="Recognize a single file, Choose from below: ").grid(row=0, columnspan=2, sticky="w") drop_down_menu.grid(row=2, column=1, sticky="ew") tkvar.trace('w', selected_from_dd) sop = Text(cgroot, height=6, width=60, bd=5, font=("Times", 12)) sop.grid(row=2, column=0) ttk.Separator(cgroot).grid(row=3, pady=2, padx=2, columnspan=3, sticky="ew") Label(cgroot, text="Recognize all the Audio files of Test folder: ").grid(row=4, columnspan=2, sticky="w") micl = Text(cgroot, height=6, width=60, bd=5, font=("Times", 12)) micl.grid(row=5, column=0, sticky="w") reczall = Button(cgroot, text="Recognize All", width=11) reczall.bind("<Button-1>", recognize_all) reczall.grid(row=5, column=1, sticky="ew") ttk.Separator(cgroot).grid(row=6, pady=2, padx=2, columnspan=3, sticky="ew") Label(cgroot, text="Recognize through Mic (5-second recording): ").grid(row=7, columnspan=2, sticky="w") mi = Text(cgroot, height=6, width=60, bd=5, font=("Times", 12)) mi.grid(row=8, column=0, sticky="w") recmic = Button(cgroot, text="Recognize", width=11) recmic.bind("<Button-1>", recognize_mic) recmic.grid(row=9, column=1, sticky="ew") ttk.Separator(cgroot).grid(row=10, pady=2, padx=2, columnspan=3, sticky="ew") ex = Button(cgroot, text="Exit", width=11) ex.bind("<Button-1>", exitcg) ex.grid(row=11, columnspan=3, sticky="ew") cgroot.mainloop() def askfolder(a): global cg_dirname cg_dirname = filedialog.askdirectory() folentry.insert(0, cg_dirname) global test_files test_files = [f for f in os.listdir(cg_dirname) if os.path.isfile(os.path.join(cg_dirname,f))] if "desktop.ini" in test_files: test_files.remove("desktop.ini") # print(test_files) win.destroy() fol = Tk() fol.iconbitmap('save.ico') fol.title("Testing Folder Selection") Label(fol, text="Choose the folder containing Testing Audio files:").grid(row=0, column=0, sticky=W) folentry = Entry(fol, width=77) folentry.grid(row=1, sticky=W, column=0) ch = Button(fol, text="Browse") ch.bind("<Button-1>", askfolder) ch.grid(row=1, column=1, sticky=E) ch = Button(fol, text="Next") ch.bind("<Button-1>", cg) ch.grid(row=2, columnspan=2, sticky="ew") fol.mainloop() popup.destroy() win = Tk() win.iconbitmap('icon.ico') win.title("Select the language for Recognition") Label(win, text="English/Hindi Speech to text:").grid(row=0, column=0, sticky=W) se = Button(win, text="English/Hindi", width=11) se.bind("<Button-1>", enghin) se.grid(row=0, column=1, sticky="ew") ttk.Separator(win).grid(row=2, pady=2, padx=2, columnspan=3, sticky="ew") Label(win, text="Chhattisgarhi Small Vocabulary Recognition(Words listed below):").grid(row=4, column=0, sticky=W) words = Text(win, height=4, width=60, bd=5, font=("Times", 12)) words.grid(row=6, column=0) words.insert(INSERT, "'आबे', 'बईठ', 'बेरा', 'एती', 'गोड़', 'हमर', 'हे', 'जाहूँ', 'काबर', 'कहत', 'करत', 'खाबे', 'कोति', 'लइका','मोर', 'पीरात', 'रेंगत', 'टेरत', 'टूरा', 'तुमन'") sh = Button(win, text="Chhattisgarhi", width=11) sh.bind("<Button-1>", test_folder) sh.grid(row=6, column=1, sticky="ew") ttk.Separator(win).grid(row=10, pady=2, padx=2, columnspan=3, sticky="ew") exx = Button(win, text="Exit", width=11) exx.bind("<Button-1>", exitwin) exx.grid(row=12, columnspan=3, sticky="ew") win.mainloop() def genlist(a): mic_list = sr.Microphone.list_microphone_names() j = 1 li = "" for i, microphone_name in enumerate(mic_list): temp = str(j) temp = temp + " - " + microphone_name + "\n" li = li + temp j += 1 # print("\ngenlist's --\n",li) e = textbs["miclist"] # print("\ninslist's --\n", li) e.insert(INSERT, li) popup = Tk() popup.iconbitmap('mic.ico') popup.wm_title("Microphone Confirmation") Label(popup, text="Enter the serial number of the appropriate mic from the following list").grid(row=0,column=0,sticky=W) micl = Text(popup, height=6, width=30, bd=9, font=("Times", 12)) micl.grid(row=1, columnspan=1, sticky = "ew") textbs["miclist"] = micl gl = Button(popup, text="Generate list", width=11) gl.bind("<Button-1>",genlist) gl.grid(row=1, column=1, sticky = "ew") e = Entry(popup,width = 50) e.grid(row=7,sticky = "ew") entries["mic"] = e ent = Button(popup, text="Submit", width=11) ent.bind("<Button-1>", language_selection_window) ent.grid(row=7, column=1, sticky = "ew") popup.mainloop()
{"hexsha": "aab09dc792e7265cfbfbfa5570887a4edbd5e9f1", "size": 27498, "ext": "py", "lang": "Python", "max_stars_repo_path": "Final_app_of_DTW-58-word_Recognizer_using_MFCC-DTW/Speech_Recognizer.py", "max_stars_repo_name": "mayank-kumar-giri/Automatic-Speech-Recognition-for-Chhattisgarhi", "max_stars_repo_head_hexsha": "9c4d198713afe5c66b7ad51852acbad2143c61ec", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Final_app_of_DTW-58-word_Recognizer_using_MFCC-DTW/Speech_Recognizer.py", "max_issues_repo_name": "mayank-kumar-giri/Automatic-Speech-Recognition-for-Chhattisgarhi", "max_issues_repo_head_hexsha": "9c4d198713afe5c66b7ad51852acbad2143c61ec", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Final_app_of_DTW-58-word_Recognizer_using_MFCC-DTW/Speech_Recognizer.py", "max_forks_repo_name": "mayank-kumar-giri/Automatic-Speech-Recognition-for-Chhattisgarhi", "max_forks_repo_head_hexsha": "9c4d198713afe5c66b7ad51852acbad2143c61ec", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.0262008734, "max_line_length": 343, "alphanum_fraction": 0.5072732562, "include": true, "reason": "import numpy,from numpy,from scipy", "num_tokens": 6967}
import os import argparse from DFLIMG import DFLIMG, DFLPNG from pathlib import Path from PIL import Image import numpy as np parser = argparse.ArgumentParser() parser.add_argument('--upscale_factor', type=int, default=1) parser.add_argument('--model_path', type=str, default='experiments/pretrained_models/GFPGANv1.pth') parser.add_argument('--input_dir', type=str, default='') parser.add_argument('--output_dir', type=str, default='') parser.add_argument('--suffix', type=str, default=None, help='Suffix of the restored faces') parser.add_argument('--only_center_face', action='store_true') parser.add_argument('--aligned', action='store_true') parser.add_argument('--paste_back', action='store_true') parser.add_argument("--gpu_id", dest='gpu_id', default=0, type=int) parser.add_argument('--data_type', type=str, dest="data_type", default='dfl', choices=['dfl', 'raw'], help='Input image type. raw input image does not have meta data for face attributes') parser.add_argument('--randomize_noise', action='store_true') args = parser.parse_args() if args.input_dir.endswith('/'): args.input_dir = args.input_dir[:-1] save_root = args.output_dir os.makedirs(save_root, exist_ok=True) os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"]=str(args.gpu_id) import cv2 import glob import numpy as np import torch from facexlib.utils.face_restoration_helper import FaceRestoreHelper from torchvision.transforms.functional import normalize from archs.gfpganv1_arch import GFPGANv1 from basicsr.utils import img2tensor, imwrite, tensor2img def restoration(gfpgan, face_helper, img_path, save_root, has_aligned=False, only_center_face=True, suffix=None, paste_back=False): # read image img_name = os.path.basename(img_path) print(f'Processing {img_name} ...') basename, _ = os.path.splitext(img_name) input_img = cv2.imread(img_path, cv2.IMREAD_COLOR) face_helper.clean_all() if has_aligned: input_img = cv2.resize(input_img, (512, 512)) face_helper.cropped_faces = [input_img] else: face_helper.read_image(input_img) # get face landmarks for each face face_helper.get_face_landmarks_5(only_center_face=only_center_face, pad_blur=False) # align and warp each face face_helper.align_warp_face() # face restoration for idx, cropped_face in enumerate(face_helper.cropped_faces): # prepare data cropped_face_t = img2tensor(cropped_face / 255., bgr2rgb=True, float32=True) normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True) cropped_face_t = cropped_face_t.unsqueeze(0).to('cuda') try: with torch.no_grad(): output = gfpgan(cropped_face_t, return_rgb=False, randomize_noise=args.randomize_noise)[0] # convert to image restored_face = tensor2img(output.squeeze(0), rgb2bgr=True, min_max=(-1, 1)) except RuntimeError as error: print(f'\tFailed inference for GFPGAN: {error}.') restored_face = cropped_face restored_face = restored_face.astype('uint8') face_helper.add_restored_face(restored_face) if not has_aligned and paste_back: face_helper.get_inverse_affine(None) save_restore_path = os.path.join(save_root, img_name) # paste each restored face to the input image face_helper.paste_faces_to_input_image(save_restore_path) # Add DFL meta data to output image if args.data_type == 'dfl': dfl_img1 = DFLIMG.load(Path(img_path)) if dfl_img1: if save_restore_path.split('.')[-1] == 'jpg': dfl_img2 = DFLIMG.load(Path(save_restore_path)) # Add meta data to output image dfl_img2.set_face_type(dfl_img1.get_face_type()) dfl_img2.set_landmarks(dfl_img1.get_landmarks()) dfl_img2.set_source_rect(dfl_img1.get_source_rect()) dfl_img2.set_source_filename(dfl_img1.get_source_filename()) dfl_img2.set_source_landmarks(dfl_img1.get_source_landmarks()) dfl_img2.set_image_to_face_mat(dfl_img1.get_image_to_face_mat()) dfl_img2.save() elif save_restore_path.split('.')[-1] == 'png': DFLPNG.DFLPNG.embed_data( filename = save_restore_path, face_type = dfl_img1.get_face_type(), landmarks = dfl_img1.get_landmarks(), source_filename = dfl_img1.get_source_filename(), source_rect = dfl_img1.get_source_rect(), source_landmarks = dfl_img1.get_source_landmarks(), image_to_face_mat = dfl_img1.get_image_to_face_mat(), pitch_yaw_roll = None, eyebrows_expand_mod = dfl_img1.get_eyebrows_expand_mod(), cfg = None, model_data = None ) else: print('unknown output format: ' + save_restore_path.split('.')[-1]) if __name__ == '__main__': device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # initialize the GFP-GAN gfpgan = GFPGANv1( out_size=512, num_style_feat=512, channel_multiplier=1, decoder_load_path=None, fix_decoder=True, # for stylegan decoder num_mlp=8, input_is_latent=True, different_w=True, narrow=1, sft_half=True) gfpgan.to(device) checkpoint = torch.load(args.model_path, map_location=lambda storage, loc: storage) gfpgan.load_state_dict(checkpoint['params_ema']) gfpgan.eval() types = ('*.png', '*.jpg', '*.jpeg') files_grabbed = [] for files in types: files_grabbed.extend(glob.glob(os.path.join(args.input_dir, files))) img_list = sorted(files_grabbed) # initialize face helper face_helper = FaceRestoreHelper( upscale_factor=args.upscale_factor, face_size=512, crop_ratio=(1, 1), det_model='retinaface_resnet50', save_ext=img_list[0].split('.')[-1]) for img_path in img_list: restoration( gfpgan, face_helper, img_path, save_root, has_aligned=args.aligned, only_center_face=args.only_center_face, suffix=args.suffix, paste_back=args.paste_back) print('Results are in the ' + args.output_dir + ' folder.')
{"hexsha": "1af2dcd7991e91e4af1e874d081278611a039fba", "size": 6776, "ext": "py", "lang": "Python", "max_stars_repo_path": "inference_gfpgan_full.py", "max_stars_repo_name": "chuanli11/GFPGAN", "max_stars_repo_head_hexsha": "4adbf820cef782c7d33113be35e5f1a49f2a3793", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "inference_gfpgan_full.py", "max_issues_repo_name": "chuanli11/GFPGAN", "max_issues_repo_head_hexsha": "4adbf820cef782c7d33113be35e5f1a49f2a3793", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "inference_gfpgan_full.py", "max_forks_repo_name": "chuanli11/GFPGAN", "max_forks_repo_head_hexsha": "4adbf820cef782c7d33113be35e5f1a49f2a3793", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.3953488372, "max_line_length": 147, "alphanum_fraction": 0.6375442739, "include": true, "reason": "import numpy", "num_tokens": 1537}
""" Luis Eduardo Sánchez González Universidad Autonoma de Coahuila Facultad de Ciencias Físico Matemáticas mié 03 feb 2021 13:10:46 CST """ import numpy as np class Difference: def __init__(self, f): if callable(f): self.f = f else: raise ValueError("La derivada es igual a cero.") def InitialConditions(self, x0, h): if isinstance(x0, (int, float)): self.x0 = x0 else: self.x0 = np.asarray(x0) self.h = h class Forward(Difference): def Solve(self): f, x0, h = self.f, self.x0, self.h return (f(x0 + h) - f(x0))/h class Central(Difference): def Solve(self): f, x0, h = self.f, self.x0, self.h return (f(x0 + h) - f(x0 - h))/(2*h) class Backward(Difference): def Solve(self): f, x0, h = self.f, self.x0, self.h return (f(x0) - f(x0 - h))/h
{"hexsha": "216af2fc6dad9fff071d014fca00bf1ec068ff34", "size": 814, "ext": "py", "lang": "Python", "max_stars_repo_path": "PhysicsPy/Derivation.py", "max_stars_repo_name": "Luis2501/physicspy", "max_stars_repo_head_hexsha": "003069affc641726ebb4e167f6603033b98919b0", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "PhysicsPy/Derivation.py", "max_issues_repo_name": "Luis2501/physicspy", "max_issues_repo_head_hexsha": "003069affc641726ebb4e167f6603033b98919b0", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PhysicsPy/Derivation.py", "max_forks_repo_name": "Luis2501/physicspy", "max_forks_repo_head_hexsha": "003069affc641726ebb4e167f6603033b98919b0", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 14.0344827586, "max_line_length": 51, "alphanum_fraction": 0.6216216216, "include": true, "reason": "import numpy", "num_tokens": 279}
#pragma once #include <boost/filesystem.hpp> namespace rai { boost::filesystem::path AppPath(); void SetStdinEcho(bool); std::string PemPath(); }
{"hexsha": "b408c81ea59023bff400124cfdf1089919e0d97e", "size": 147, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "rai/secure/plat.hpp", "max_stars_repo_name": "gokoo/Raicoin", "max_stars_repo_head_hexsha": "494be83a1e29106d268f71e613fac1e4033a82f2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 94.0, "max_stars_repo_stars_event_min_datetime": "2019-09-25T05:57:44.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-30T09:08:06.000Z", "max_issues_repo_path": "rai/secure/plat.hpp", "max_issues_repo_name": "AltonMatrix/Raicoin", "max_issues_repo_head_hexsha": "90bbde4369cb9aa9d1b2cba7c3b82554d8e736a5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4.0, "max_issues_repo_issues_event_min_datetime": "2020-05-06T10:10:14.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-26T09:35:16.000Z", "max_forks_repo_path": "rai/secure/plat.hpp", "max_forks_repo_name": "AltonMatrix/Raicoin", "max_forks_repo_head_hexsha": "90bbde4369cb9aa9d1b2cba7c3b82554d8e736a5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 13.0, "max_forks_repo_forks_event_min_datetime": "2019-09-25T05:57:52.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-24T02:09:03.000Z", "avg_line_length": 14.7, "max_line_length": 34, "alphanum_fraction": 0.7414965986, "num_tokens": 38}
""" Simulated devices for documentation and testing """ import collections import itertools import os import tempfile import threading import time from bluesky.utils import short_uid import numpy as np from ophyd import Signal, Device, Component, DeviceStatus, Staged from ophyd.sim import new_uid import scipy.special x, y = np.mgrid[-100:100, -100:100] * 1/200 r = np.hypot(x, y) r *= 20 r -= 15 diffraction_pattern = scipy.special.airy(r)[0] diffraction_pattern -= diffraction_pattern.min() diffraction_pattern *= np.ptp(diffraction_pattern) * 0.5 * (2 ** 16) diffraction_pattern = diffraction_pattern.astype('uint16') shutter_state = {'state': 'open'} class Shutter(Signal): def put(self, value): shutter_state['state'] = value super().put(value) def generate_dark_frame(): values = (np.random.RandomState(0).randint(0, 2**16, 10) * 0.2).astype('uint16') # Tile values into bands. return np.broadcast_to(np.repeat(values, 20), (200, 200)).copy() def generate_image(dark=False): # TODO Add noise, zingers, and other nondeterministic things. output = generate_dark_frame() if not dark: output += diffraction_pattern return output class TimerStatus(DeviceStatus): """Simulate the time it takes for a detector to acquire an image.""" def __init__(self, device, delay): super().__init__(device) self.delay = delay # for introspection purposes threading.Timer(delay, self._finished).start() class DiffractionDetector(Device): exposure_time = Component(Signal, value=1) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._resource_uid = None self._datum_counter = None self._asset_docs_cache = collections.deque() self.save_path = tempfile.mkdtemp() self._path_stem = None self._stashed_image_reading = None self._stashed_image_data_key = None def stage(self): file_stem = short_uid() self._datum_counter = itertools.count() self._path_stem = os.path.join(self.save_path, file_stem) self._resource_uid = new_uid() resource = {'spec': 'NPY_SEQ', 'root': self.save_path, 'resource_path': file_stem, 'resource_kwargs': {}, 'uid': self._resource_uid, 'path_semantics': {'posix': 'posix', 'nt': 'windows'}[os.name]} self._asset_docs_cache.append(('resource', resource)) return super().stage() def trigger(self): if not self._staged == Staged.yes: raise RuntimeError("Device must be staged before it is triggered.") image = generate_image(dark=shutter_state['state'] == 'closed') # Save the actual reading['value'] to disk. For a real detector, # this part would be done by the detector IOC, not by ophyd. data_counter = next(self._datum_counter) np.save(f'{self._path_stem}_{data_counter}.npy', image, allow_pickle=False) # Generate a stash and Datum document. datum_id = '{}/{}'.format(self._resource_uid, data_counter) datum = {'resource': self._resource_uid, 'datum_kwargs': dict(index=data_counter), 'datum_id': datum_id} self._asset_docs_cache.append(('datum', datum)) self._stashed_image_reading = {'value': datum_id, 'timestamp': time.time()} self._stashed_image_data_key = {'source': 'SIM:image', 'shape': image.shape, 'dtype': 'array', 'external': 'FILESTORE'} return TimerStatus(self, self.exposure_time.get()) def read(self): ret = super().read() ret[f'{self.name}_image'] = self._stashed_image_reading return ret def describe(self): ret = super().describe() ret[f'{self.name}_image'] = self._stashed_image_data_key return ret def collect_asset_docs(self): items = list(self._asset_docs_cache) self._asset_docs_cache.clear() for item in items: yield item def unstage(self): self._resource_uid = None self._datum_counter = None self._asset_docs_cache.clear() self._path_stem = None return super().unstage()
{"hexsha": "39872afd408df462523d187202f163af4348e208", "size": 4447, "ext": "py", "lang": "Python", "max_stars_repo_path": "bluesky_darkframes/sim.py", "max_stars_repo_name": "tacaswell/bluesky-darkframes", "max_stars_repo_head_hexsha": "8922eacd7316b3b93112e969376268f2772523a7", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-11-06T22:06:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-14T17:09:05.000Z", "max_issues_repo_path": "bluesky_darkframes/sim.py", "max_issues_repo_name": "tacaswell/bluesky-darkframes", "max_issues_repo_head_hexsha": "8922eacd7316b3b93112e969376268f2772523a7", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 20, "max_issues_repo_issues_event_min_datetime": "2019-07-31T18:17:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T00:15:41.000Z", "max_forks_repo_path": "bluesky_darkframes/sim.py", "max_forks_repo_name": "tacaswell/bluesky-darkframes", "max_forks_repo_head_hexsha": "8922eacd7316b3b93112e969376268f2772523a7", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-07-30T14:18:35.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-18T17:46:00.000Z", "avg_line_length": 33.6893939394, "max_line_length": 84, "alphanum_fraction": 0.6197436474, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1028}
''' Record Linkage Testing Script using Logistic Regression Method over Graph Embeddings generated using TransH ''' import numpy as np import pandas as pd import random import re import recordlinkage import unittest import xml.etree.ElementTree from common import get_logger, log_quality_results, InformationRetrievalMetrics, export_embeddings, export_result_prob from data.cora import Cora from data.febrl import FEBRL from data.census import Census from ER.transh import TransH class TestLogisticTransH(unittest.TestCase): def _test_logistic_transh_erer(self, dataset, params): model = dataset() logger = get_logger('RL.Test.erer.LogisticTransH.ERER.' + str(model)) entA, entB, relA, relB, triA, triB, entity_pairs, prior_pairs, true_pairs = model.get_erer_model() self.assertTrue(all([(tp in entity_pairs) for tp in true_pairs])) #Generate embeddings for datasetA transh = TransH(entA, relA, triA, prior_pairs, dimension=params['dimension'], learning_rate=params['learning_rate'], margin=params['margin'], regularizer_scale=params['regularizer_scale'], batchSize=params['batchSize']) loss = transh.train(max_epochs=params['epochs']) logger.info("Training Complete with loss: %f", loss) ent_embeddingsA = transh.get_ent_embeddings() transh.close_tf_session() del transh #Generate embeddings for datasetB transh = TransH(entB, relB, triB, entity_pairs, dimension=params['dimension'], learning_rate=params['learning_rate'], margin=params['margin'], regularizer_scale=params['regularizer_scale'], batchSize=params['batchSize']) loss = transh.train(max_epochs=params['epochs']) logger.info("Training Complete with loss: %f", loss) ent_embeddingsB = transh.get_ent_embeddings() transh.close_tf_session() ent_embeddingsA = [np.array(ent_embeddingsA[i]) for i in range(ent_embeddingsA.shape[0])] ent_embeddingsB = [np.array(ent_embeddingsB[i]) for i in range(ent_embeddingsB.shape[0])] trainDataA = pd.DataFrame(data=ent_embeddingsA) trainDataB = pd.DataFrame(data=ent_embeddingsB) #Define comparision Class compare_cl = recordlinkage.Compare() for i in range(0, params['dimension']): compare_cl.numeric(i, i, label=str(i)) #method='exp') #sample negative pairs train_pairs = [] tuple_pp = set(map(tuple, prior_pairs)) logger.info("Number of prior_pairs: %d", len(prior_pairs)) for e1, e2 in prior_pairs: train_pairs.append((e1, e2)) while True: neg_e2 = random.choice(xrange(0, len(entB))) if neg_e2 == e2 or (e1, neg_e2) in tuple_pp: continue else: train_pairs.append((e1, neg_e2)) break logger.info("Number of Train Pairs: %d", len(train_pairs)) candidate_links = pd.MultiIndex.from_tuples(train_pairs) features = compare_cl.compute(candidate_links, trainDataA, trainDataB) logger.info("Train Features %s", str(features.describe())) #Train Logistic Regression Model logrg = recordlinkage.LogisticRegressionClassifier() candidate_links = pd.MultiIndex.from_tuples(prior_pairs) logrg.fit(features, candidate_links) #Test Classifier compare_cl = recordlinkage.Compare() for i in range(0, params['dimension']): compare_cl.numeric(i, i, label=str(i)) candidate_links = pd.MultiIndex.from_tuples(entity_pairs) features = compare_cl.compute(candidate_links, trainDataA, trainDataB) logger.info("Test Features %s", str(features.describe())) result = logrg.predict(features) log_quality_results(logger, result, true_pairs, len(entity_pairs)) prob_series = logrg.prob(features) prob = [(1 - p) for p in prob_series.tolist()] result_prob = [(entity_pairs[i][0], entity_pairs[i][1], prob[i]) for i in range(0, len(prob))] ir_metrics = InformationRetrievalMetrics(result_prob, true_pairs) ir_metrics.log_metrics(logger, params, params) #Export results export_embeddings('erer', str(model), 'LogTransH', entA, ent_embeddingsA) export_embeddings('erer', str(model), 'LogTransH', entB, ent_embeddingsB) export_result_prob(dataset, 'erer', str(model), 'LogTransH', entA, result_prob, true_pairs, entB) def _test_logistic_transh(self, dataset, params): """Note: Zero aligned pairs are returned, require fixation.""" model = dataset() logger = get_logger('RL.Test.LogisticTransH.' + str(model)) entity, relation, triples, entity_pairs, true_pairs = model.get_er_model() transh = TransH(entity, relation, triples, entity_pairs, dimension=params['dimension'], learning_rate=params['learning_rate'], margin=params['margin'], regularizer_scale=params['regularizer_scale'], batchSize=params['batchSize']) loss = transh.train(max_epochs=params['epochs']) logger.info("Training Complete with loss: %f", loss) ent_embeddings = transh.get_ent_embeddings() ent_embeddings = [np.array(ent_embeddings[i]) for i in range(ent_embeddings.shape[0])] trainDataA = pd.DataFrame(data=ent_embeddings) trainDataB = pd.DataFrame(data=ent_embeddings) compare_cl = recordlinkage.Compare() for i in range(0, params['dimension']): compare_cl.numeric(i, i, label=str(i), method='gauss') candidate_links = pd.MultiIndex.from_tuples(entity_pairs) features = compare_cl.compute(candidate_links, trainDataA, trainDataB) logger.info("Features %s", str(features.describe())) logrg = recordlinkage.LogisticRegressionClassifier() logrg.fit(features, true_pairs) result = logrg.predict(features) log_quality_results(logger, result, true_pairs, len(entity_pairs)) prob_series = logrg.prob(features) prob = [(1 - p) for p in prob_series.tolist()] result_prob = [(entity_pairs[i][0], entity_pairs[i][1], prob[i]) for i in range(0, len(prob))] ir_metrics = InformationRetrievalMetrics(result_prob, true_pairs) ir_metrics.log_metrics(logger, params) def get_default_params(self): return {'learning_rate': 0.1, 'margin': 1, 'dimension': 80, 'epochs': 100, 'regularizer_scale' : 0.1, 'batchSize' : 100} def test_cora(self): self._test_logistic_transh(Cora, self.get_default_params()) def test_febrl(self): self._test_logistic_transh(FEBRL, self.get_default_params()) def test_census(self): self._test_logistic_transh(Census, self.get_default_params()) def test_cora_erer(self): self._test_logistic_transh_erer(Cora, self.get_default_params()) def test_febrl_erer(self): self._test_logistic_transh_erer(FEBRL, self.get_default_params()) def test_census_erer(self): self._test_logistic_transh_erer(Census, self.get_default_params())
{"hexsha": "634479ced5b2780f56d8bdd8e38ffedbb1a1b99a", "size": 7466, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/erer/test_logistic_transh.py", "max_stars_repo_name": "bhaskargautam/record-linkage", "max_stars_repo_head_hexsha": "01eb29f8b7fb4dd1625187232f2dafe47f24cddf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-06-07T08:33:40.000Z", "max_stars_repo_stars_event_max_datetime": "2019-06-07T08:33:40.000Z", "max_issues_repo_path": "tests/erer/test_logistic_transh.py", "max_issues_repo_name": "bhaskargautam/record-linkage", "max_issues_repo_head_hexsha": "01eb29f8b7fb4dd1625187232f2dafe47f24cddf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2019-09-19T23:30:53.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T00:07:09.000Z", "max_forks_repo_path": "tests/erer/test_logistic_transh.py", "max_forks_repo_name": "bhaskargautam/record-linkage", "max_forks_repo_head_hexsha": "01eb29f8b7fb4dd1625187232f2dafe47f24cddf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.9759036145, "max_line_length": 118, "alphanum_fraction": 0.6517546209, "include": true, "reason": "import numpy", "num_tokens": 1644}
[STATEMENT] lemma list_rel_induct[induct set,consumes 1, case_names Nil Cons]: assumes "(l,l')\<in>\<langle>R\<rangle> list_rel" assumes "P [] []" assumes "\<And>x x' l l'. \<lbrakk> (x,x')\<in>R; (l,l')\<in>\<langle>R\<rangle>list_rel; P l l' \<rbrakk> \<Longrightarrow> P (x#l) (x'#l')" shows "P l l'" [PROOF STATE] proof (prove) goal (1 subgoal): 1. P l l' [PROOF STEP] using assms [PROOF STATE] proof (prove) using this: (l, l') \<in> \<langle>R\<rangle>list_rel P [] [] \<lbrakk>(?x, ?x') \<in> R; (?l, ?l') \<in> \<langle>R\<rangle>list_rel; P ?l ?l'\<rbrakk> \<Longrightarrow> P (?x # ?l) (?x' # ?l') goal (1 subgoal): 1. P l l' [PROOF STEP] unfolding list_rel_def [PROOF STATE] proof (prove) using this: (l, l') \<in> {(l, l'). list_all2 (\<lambda>x x'. (x, x') \<in> R) l l'} P [] [] \<lbrakk>(?x, ?x') \<in> R; (?l, ?l') \<in> {(l, l'). list_all2 (\<lambda>x x'. (x, x') \<in> R) l l'}; P ?l ?l'\<rbrakk> \<Longrightarrow> P (?x # ?l) (?x' # ?l') goal (1 subgoal): 1. P l l' [PROOF STEP] apply simp [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>list_all2 (\<lambda>x x'. (x, x') \<in> R) l l'; P [] []; \<And>x x' l l'. \<lbrakk>(x, x') \<in> R; list_all2 (\<lambda>x x'. (x, x') \<in> R) l l'; P l l'\<rbrakk> \<Longrightarrow> P (x # l) (x' # l')\<rbrakk> \<Longrightarrow> P l l' [PROOF STEP] by (rule list_all2_induct)
{"llama_tokens": 643, "file": "Automatic_Refinement_Parametricity_Relators", "length": 4}
[STATEMENT] lemma observable_io_target_unique_target : assumes "observable M" and "io_targets M q1 io = {q2}" and "path M (io || tr) q1" and "length io = length tr" shows "target (io || tr) q1 = q2" [PROOF STATE] proof (prove) goal (1 subgoal): 1. target (io || tr) q1 = q2 [PROOF STEP] using assms [PROOF STATE] proof (prove) using this: observable M io_targets M q1 io = {q2} path M (io || tr) q1 length io = length tr goal (1 subgoal): 1. target (io || tr) q1 = q2 [PROOF STEP] by auto
{"llama_tokens": 216, "file": "Adaptive_State_Counting_FSM_FSM", "length": 2}
df = DataFrame() df[:A] = 1:numData lamb_grid = [10. .^(-7:1)] c_grid = linspace(1, 5, 6) # This choice of c_grid yields no distinguishable difference. Try: c_grid = 2. .^(1:5) deg_grid = [2:6] #2 is a pretty meaningless choice. drop to 3. N = length(lamb_grid) * length(c_grid) * length(deg_grid) res = Array(Float64, N, 5) ix = 1 for l in lamb_grid for c in c_grid for d in deg_grid train(df) = train(df[:A], l, d, c, demand_data, flow_data, arcs) test(df, fit) = test(fit, df[:A], demand_data, flow_data, arcs, g, vArcs) rtrain, rtest = kfold_crossvalidate(df, train, test, 5) res[ix, 1] = l res[ix, 2] = c res[ix, 3] = float(d) res[ix, 4] = mean(rtest) / 1e6 res[ix, 5] = std(rtest) / 1e6 ix +=1 show(res) end end end writetable("trafficCVal.csv", DataFrame(res))
{"hexsha": "0a67f6b7fe0f49a724d837e00ddab2fbf96bd093", "size": 934, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "Julia_files/trafficCvalEnd.jl", "max_stars_repo_name": "jingzbu/InverseVITraffic", "max_stars_repo_head_hexsha": "c0d33d91bdd3c014147d58866c1a2b99fb8a9608", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-07-16T02:55:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-21T16:13:33.000Z", "max_issues_repo_path": "Julia_files/trafficCvalEnd.jl", "max_issues_repo_name": "jingzbu/InverseVITraffic", "max_issues_repo_head_hexsha": "c0d33d91bdd3c014147d58866c1a2b99fb8a9608", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Julia_files/trafficCvalEnd.jl", "max_forks_repo_name": "jingzbu/InverseVITraffic", "max_forks_repo_head_hexsha": "c0d33d91bdd3c014147d58866c1a2b99fb8a9608", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-02-09T15:37:10.000Z", "max_forks_repo_forks_event_max_datetime": "2019-02-09T15:37:10.000Z", "avg_line_length": 30.1290322581, "max_line_length": 114, "alphanum_fraction": 0.5471092077, "num_tokens": 314}
import geometry.tarski_2 open classical set namespace Euclidean_plane variables {point : Type} [Euclidean_plane point] local attribute [instance, priority 0] prop_decidable -- Right Angles def R (a b c : point) : Prop := eqd a c a (S b c) theorem R.symm {a b c : point} : R a b c → R c b a := begin intro h, have h1 := seven13 b a (S b c), simp at h1, unfold R at h, exact (eqd.trans h h1).flip end theorem eight3 {a b c a' : point} : R a b c → a ≠ b → col b a a' → R a' b c := begin intros h h1 h2, unfold R at *, exact four17 h1.symm h2 (seven5 b c).2 h end theorem R.flip {a b c : point} : R a b c → R a b (S b c) := begin intro h, unfold R at *, simpa using h.symm end @[simp] theorem eight4 (a b : point) : R a a b := (seven5 a b).2 @[simp] theorem eight5a (a b : point) : R a b b := (eight4 b a).symm theorem eight6 {a b c a' : point} : R a b c → R a' b c → B a c a' → b = c := begin intros h h1 h2, unfold R at *, generalize h3 : S b c = c', rw h3 at *, have : c = c', exact four19 h2 h h1.flip, rw ←this at *, exact (seven10.1 h3) end theorem eight7 {a b c : point} : R a b c → R a c b → b = c := begin intros h h1, have h_1 : eqd a c a (S b c), unfold R at h, exact h, have h_2 : eqd a b a (S c b), unfold R at h1, exact h1, have h2 := seven5 b c, generalize h3 : S b c = c', rw h3 at *, generalize h4 : S c a = a', by_contradiction h5, have h6 : col c b c', left, exact h2.1, have h7 := eight3 h1.symm h5 h6, unfold R at h7, rw h4 at h7, have h8 := seven5 c a, rw h4 at h8, have h9 := h8.2, have h10 : R a' b c, unfold R, rw h3, exact eqd.trans h9.symm.flip (eqd.trans h_1 h7.flip), exact h5 (eight6 h h10 h8.1) end theorem eight8 {a b : point} : R a b a → a = b := begin intro h, exact eight7 (eight5a b a).symm h end theorem eight9 {a b c : point} : R a b c → col a b c → a = b ∨ c = b := begin intros h h1, cases em (a = b), simp *, right, have h2 := eight3 h h_1 (four11 h1).2.1, exact eight8 h2 end theorem eight10 {a b c a' b' c' : point} : R a b c → cong a b c a' b' c' → R a' b' c' := begin intros h h1, cases em (b = c), rw h_1 at *, have h2 : b' = c', exact id_eqd h1.2.1.symm, rw h2, exact eight5a a' c', unfold R at *, generalize h2 : S b c = d, generalize h3 : S b' c' = d', have h4 := seven5 b c, have h5 := seven5 b' c', rw h2 at *, rw h3 at *, have h6 : afs c b d a c' b' d' a', repeat {split}, exact h4.1, exact h5.1, exact h1.2.1.flip, exact eqd.trans h4.2.symm (eqd.trans h1.2.1 h5.2), exact h1.2.2.flip, exact h1.1.flip, have h7 := afive_seg h6 (ne.symm h_1), exact eqd.trans h1.2.2.symm (eqd.trans h h7.flip) end def xperp (x : point) (A A' : set point) : Prop := line A ∧ line A' ∧ x ∈ A ∧ x ∈ A' ∧ ∀ {u v}, u ∈ A → v ∈ A' → R u x v def perp (A A' : set point) : Prop := ∃ x, xperp x A A' notation A ` ⊥ ` B := perp A B theorem xperp.symm {x : point} {A A' : set point} : xperp x A A' → xperp x A' A := begin intro h, unfold xperp at *, split, exact h.2.1, split, exact h.1, split, exact h.2.2.2.1, split, exact h.2.2.1, intros u v hu hv, exact (h.2.2.2.2 hv hu).symm end theorem perp.symm {A A' : set point} : perp A A' → perp A' A := begin intro h, cases h with x hx, constructor, exact hx.symm end theorem eight14a {A A' : set point} : perp A A' → A ≠ A' := begin intros h h1, subst A', rcases h with ⟨x, h1, h2, h3, h4, h5⟩, cases six22 h1 h3 with y hy, rw hy.2 at h5, exact hy.1.symm (eight8 (h5 (six17b x y) (six17b x y))) end theorem eight14b {x : point} {A A' : set point} : xperp x A A' → A ≠ A' := λ h, eight14a ⟨x, h⟩ theorem eight14c {x : point} {A A' : set point} : xperp x A A' ↔ perp A A' ∧ is x A A' := begin split, intro h, split, constructor, exact h, have h1 : perp A A', constructor, exact h, unfold xperp at h, unfold is, split, exact h.1, split, exact h.2.1, split, exact eight14a h1, split, exact h.2.2.1, exact h.2.2.2.1, intro h, cases h with h h1, cases h with y hy, unfold is at h1, suffices : x = y, rwa ←this at hy, by_contradiction, suffices : A = A', exact h1.2.2.1 this, apply six21 a h1.1 h1.2.1 h1.2.2.2.1 h1.2.2.2.2, unfold xperp at hy, exact hy.2.2.1, exact hy.2.2.2.1 end theorem eight14d {x y : point} {A A' : set point} : xperp x A A' → xperp y A A' → x = y := begin intros hx hy, by_contradiction, have h : perp A A', constructor, exact hx, have h1 := eight14a h, suffices : A = A', exact h1 this, unfold xperp at *, exact six21 a hx.1 hx.2.1 hx.2.2.1 hx.2.2.2.1 hy.2.2.1 hy.2.2.2.1 end theorem eight14e {A A' : set point} : perp A A' → line A ∧ line A' := begin intro h, cases h with x hx, split, exact hx.1, exact hx.2.1 end theorem eight14f {a b c : point} {A : set point} : perp (l a b) A → col a b c → a ≠ c → perp (l a c) A := begin intros h h1 h2, suffices : l a b = l a c, rwa ←this, exact six18 (eight14e h).1 h2 (six17a a b) h1 end theorem eight13 {x : point} {A A' : set point} : xperp x A A' ↔ line A ∧ line A' ∧ x ∈ A ∧ x ∈ A' ∧ ∃ u v, u ∈ A ∧ v ∈ A' ∧ u ≠ x ∧ v ≠ x ∧ R u x v := begin split, intro h, split, exact h.1, split, exact h.2.1, split, exact h.2.2.1, split, exact h.2.2.2.1, unfold xperp at h, cases six22 h.1 h.2.2.1 with u hu, cases six22 h.2.1 h.2.2.2.1 with v hv, existsi u, existsi v, have h1 : u ∈ A, rw hu.2, simp, have h2 : v ∈ A', rw hv.2, simp, split, exact h1, split, exact h2, split, exact hu.1.symm, split, exact hv.1.symm, exact h.2.2.2.2 h1 h2, intro h, unfold xperp, split, exact h.1, split, exact h.2.1, split, exact h.2.2.1, split, exact h.2.2.2.1, intros a b ha hb, cases h.2.2.2.2 with u hu, cases hu with v hv, have h1 : R a x v, apply eight3 hv.2.2.2.2 hv.2.2.1, have h_1 : A = l x u, exact six18 h.1 hv.2.2.1.symm h.2.2.1 hv.1, rw h_1 at ha, exact ha, apply R.symm, apply eight3 h1.symm hv.2.2.2.1, have h_2 : A' = l x v, exact six18 h.2.1 hv.2.2.2.1.symm h.2.2.2.1 hv.2.1, rw h_2 at hb, exact hb end theorem perp_of_R {a b c : point} : a ≠ b → c ≠ b → R a b c → perp (l a b) (l c (S b c)) := λ h h1 h2, ⟨b, eight13.2 ⟨six14 h, six14 (seven12b h1).symm, six17b a b, or.inr (or.inl (seven5 b c).1.symm), a, c, six17a a b, six17a c (S b c), h, h1, h2⟩⟩ theorem xperp_of_R {a b c : point} : a ≠ b → c ≠ b → R a b c → xperp b (l a b) (l c b) := λ h h1 h2, eight13.2 ⟨six14 h, six14 h1, six17b a b, six17b c b, a, c, six17a a b, six17a c b, h, h1, h2⟩ theorem eight15 {x : point} {A B : set point} : perp A B → x ∈ A → x ∈ B → xperp x A B := begin intros h h1 h2, cases h with y hy, suffices : x = y, subst x, exact hy, by_contradiction h_1, apply eight14b hy, apply six21 h_1, exact hy.1, exact hy.2.1, exact h1, exact h2, exact hy.2.2.1, exact hy.2.2.2.1 end theorem eight16 {a b c u x : point} : col a b x → col a b u → u ≠ x → (c ≠ x ∧ perp (l a b) (l c x) ↔ ¬col a b c ∧ R c x u) := begin intros h1 h2 h3, split, intro h4, have h5 : xperp x (l a b) (l c x), exact eight15 h4.2 h1 (six17b c x), split, intro h_1, apply eight14b h5, exact six18 h5.1 h4.1 h_1 h1, apply R.symm, exact h5.2.2.2.2 h2 (six17a c x), intro h4, cases h4 with h4 h5, have h_1 : c ≠ x, intro h_1, rw h_1 at h4, exact h4 h1, split, exact h_1, existsi x, apply eight13.2, split, exact six14 (six26 h4).1, split, exact six14 h_1, split, exact h1, split, simp, existsi u, existsi c, split, exact h2, split, exact (six17a c x), split, exact h3, split, exact h_1, exact h5.symm end theorem eight18 {a b c : point} : ¬col a b c → ∃! x, col a b x ∧ perp (l a b) (l c x) := begin intros h, cases seg_cons a a c b with y hy, cases seven25 hy.2.symm with p hp, have h1: R a p y, unfold R, suffices : S p y = c, rw this, exact hy.2, exact (seven6 hp.symm).symm, cases seg_cons y y p a with z hz, cases seg_cons y y a p with q hq, generalize hq' : S z q = q', cases seg_cons y y c q' with c' hc', have h2 : afs a y z q q y p a, focus {repeat {split}}, exact hz.1, exact hq.1.symm, exact hq.2.symm.flip, exact hz.2, exact two5 (eqd.refl a q), exact hq.2, have h3 : a ≠ y, intro h_1, rw h_1 at *, have : y = c, exact id_eqd hy.2.symm, exact (six26 h).2.2 this, have h4 := afive_seg h2 h3, have h5 : cong a p y q z y, split, exact h4.symm.flip, split, exact hz.2.symm.flip, exact hq.2.symm.flip, have h6 := (eight10 h1 h5).symm, have h7 : eqd y q y q', unfold R at h6, rwa hq' at h6, cases seven25 hc'.2 with x hx, existsi x, have h8 : R y x c, unfold R, suffices : S x c = c', rw this, exact hc'.2.symm, exact (seven6 hx.symm).symm, have h9 : c ≠ y, intro h_1, rw ←h_1 at hy, apply h, right, right, exact hy.1.symm, have h10 : y ≠ p, intro h_1, rw ←h_1 at hp, unfold M at hp, apply h9, exact id_eqd hp.2.flip, have h11 : hourglass q q' y c c' z x, have h_1 := seven5 z q, rw hq' at h_1, focus {repeat {split}}, exact (three7a hp.1 hq.1 h10.symm).symm, exact hc'.1, exact h7, exact hc'.2.symm, exact h_1.1, exact h_1.2, exact hx.1.symm, exact hx.2.symm, have h12 := seven22 h11, have h13 : y ≠ z, intro h_1, rw ←h_1 at hz, exact h10 (id_eqd hz.2.symm), have h14 : a ≠ y, intro h_1, rw ←h_1 at hy, apply (six26 h).2.2, exact id_eqd hy.2.symm, have h15 : l y z = l a b, apply six18 (six14 h13) (six26 h).1, right, right, exact hz.1, right, right, exact three7a hy.1 hz.1 h14, have h16 : c ≠ x, intro h_1, rw h_1 at *, apply h, have h_2 : x ∈ l a b, rw ←h15, right, right, exact h12.symm, exact h_2, have h17 : q ≠ z, intro h_1, rw h_1 at *, have h_2 : B z y c, exact three7b hq.1.symm hp.1.symm h10, apply h, suffices : c ∈ l a b, exact this, rw ←h15, right, right, exact h_2.symm, have h18 : xperp x (l y z) (l c x), apply eight13.2, split, exact six14 h13, split, exact (six14 h16), split, right, right, exact h12.symm, split, simp, existsi y, existsi c, simp, split, intro h_1, rw h_1 at *, have h_1 : q ∈ l c x, left, exact three7a hp.1 hq.1 h10.symm, have h_2 : q' ∈ l c x, suffices : c' ≠ x, cases five2 this hx.1 hc'.1.symm, right, right, exact h_2.symm, right, left, exact h_2, intro h_2, rw h_2 at *, apply h9, exact id_eqd hx.2.symm.flip, have h_3 := seven5 z q, rw hq' at h_3, have h_4 : q ≠ q', intro h_4, rw ←h_4 at *, apply h17, exact seven3.1 h_3, have h_5 : l c x = l q q', exact six18 (six14 h9) h_4 h_1 h_2, have h_6 : z ∈ l c x, rw h_5, right, left, exact h_3.1.symm, have h_7 := (four11 h_6).2.2.1, have h_8 : c ∈ l a b, rw ←h15, exact h_7, exact h h_8, split, exact h16, exact h8, rw h15 at h18, have h19 : x ∈ l y z, right, right, exact h12.symm, split, split, rw h15 at h19, exact h19, constructor, exact h18, intros x' hx', have h20 : c ≠ x', intro h_1, apply h, rw ←h_1 at hx', exact hx'.1, have h21 : xperp x' (l a b) (l c x'), exact eight15 hx'.2 hx'.1 (six17b c x'), have h22 : R c x x', apply (h18.symm).2.2.2.2, simp, exact hx'.1, have h23 : R c x' x, apply (h21.symm).2.2.2.2, simp, rw ←h15, exact h19, exact eight7 h23 h22 end theorem eight17 {a : point} {A : set point} : line A → a ∉ A → ∃! x, xperp x A (l a x) := begin intros h h1, rcases h with ⟨p, q, hq, h2⟩, subst h2, have h3 : ¬col p q a, intro h_1, exact h1 h_1, cases eight18 h3 with x hx, refine ⟨x, eight15 hx.1.2 hx.1.1 (six17b a x), _⟩, intros y hy, exact hx.2 y ⟨hy.2.2.1, y, hy⟩ end theorem eight19 {p q r : point} (a : point) : R p q r ↔ R (S a p) (S a q) (S a r) := begin unfold R, split, intro h, suffices : (S (S a q) (S a r)) = (S a (S q r)), rw this, exact (seven16 a).1 h, have h1 := seven5 (S a q) (S a r), suffices : M (S a r) (S a q) (S a (S q r)), exact seven4 h1 this, apply (seven14 a).1, exact seven5 q r, intro h, suffices : S a ((S (S a q) (S a r))) = S q r, rw ←this, apply (seven16 a).2, simp, exact h, suffices : S a (S a (S (S a q) (S a r))) = (S a (S q r)), exact seven9 this, simp, have h1 := seven5 (S a q) (S a r), suffices : M (S a r) (S a q) (S a (S q r)), exact seven4 h1 this, apply (seven14 a).1, exact seven5 q r end theorem eight20 {a b c p : point} : R a b c → M (S a c) p (S b c) → R b a p ∧ (b ≠ c → a ≠ p) := begin intros h h1, have h2 := seven5 b c, have h3 := seven5 a b, have h4 := seven5 a c, have h5 := seven5 a (S b c), have h6 := seven5 a p, have h7 : R (S a b) b c, cases em (a = b), rw h_1 at *, simp, apply eight3 h h_1, left, exact h3.1, have h8 := (eight19 a).1 h7, unfold R at h7, have h9 := (seven16 a).1 h7, simp at *, have h10 : ifs (S a c) p (S b c) b (S a (S b c)) (S a p) c b, focus {repeat {split}}, exact h1.1, have h_1 := (seven15 a).1 h1.1, simp at h_1, exact h_1.symm, apply two5, have h_2 := seven13 a (S a c) (S b c), simp at h_2, exact h_2, apply eqd.trans h1.2.symm, have h_3 := seven13 a p (S a c), simp at h_3, exact h_3, exact h9.flip, exact h2.2.symm.flip, have h11 := four2 h10, split, unfold R, exact h11.flip, intros hbc hap, apply hbc, have h12 := seven7 a c, rw hap at *, have h13 := seven5 p (S p c), simp at h13, have h14 := seven4 h13 h1, rw ←h14 at h2, exact (seven3.1 h2).symm end theorem eight21 {a b : point} (hab : a ≠ b) (c : point) : ∃ p t, perp (l a b) (l p a) ∧ col a b t ∧ B c t p := begin cases em (col a b c) with habc h, cases six25 hab with c' h, cases eight18 h with x hx, have h1 : c' ≠ x, intro h_1, rw h_1 at *, exact h hx.1.1, have h2 : xperp x (l a b) (l c' x), exact eight15 hx.1.2 hx.1.1 (six17b c' x), unfold xperp at h2, have h3 := h2.2.2.2.2 (six17a a b) (six17a c' x), unfold R at h3, have h4 := seven5 a c', cases seven25 (eqd.trans h4.2.symm h3) with p hp, have h5 := eight20 (h2.2.2.2.2 (six17a a b) (six17a c' x)) hp, have h6 := h5.2 h1.symm, existsi p, existsi c, cases em (x = a), rw h_1 at hx, rw h_1 at hp, have h_2 : S a c' = p, exact seven3.1 hp, rw h_2 at h4, have h_3 : col c' a p, left, exact h4.1, have h_4 : l c' a = l p a, apply six18 (six14 (six26 h).2.2.symm) h6.symm h_3 (six17b c' a), have h_5 := hx.1, rw h_4 at h_5, split, exact h_5.2, split, exact habc, exact three3 c p, split, existsi a, apply eight13.2, split, exact six14 (six26 h).1, split, exact six14 h6.symm, split, simp, split, simp, existsi x, existsi p, split, exact hx.1.1, simp, split, exact h_1, split, exact h6.symm, exact h5.1, split, exact habc, exact three3 c p, cases eight18 h with x hx, have h1 : c ≠ x, intro h_1, rw h_1 at *, exact h hx.1.1, have h2 : xperp x (l a b) (l c x), exact eight15 hx.1.2 hx.1.1 (six17b c x), unfold xperp at h2, have h3 := h2.2.2.2.2 (six17a a b) (six17a c x), unfold R at h3, have h4 := seven5 a c, cases seven25 (eqd.trans h4.2.symm h3) with p hp, have h5 := eight20 (h2.2.2.2.2 (six17a a b) (six17a c x)) hp, have h6 := h5.2 h1.symm, cases three17 (seven5 x c).1.symm h4.1.symm hp.1.symm with t ht, cases em (x = a), rw h_1 at ht, existsi p, existsi a, have h_2 : t = a, exact (bet_same ht.2).symm, rw h_2 at *, rw h_1 at hx, have h_3 := hx.1, have h_4 : l c a = l p a, apply six18 (six14 (six26 h).2.2.symm) h6.symm, left, exact ht.1.symm, simp, rw h_4 at h_3, split, exact h_3.2, split, exact h_3.1, exact ht.1.symm, existsi p, existsi t, have h7 : col a b t, have h_2 : col a x t, right, left, exact ht.2, exact five4 (ne.symm h_1) (four11 hx.1.1).1 h_2, split, existsi a, apply eight13.2, split, exact six14 (six26 h).1, split, exact six14 h6.symm, split, simp, split, simp, existsi x, existsi p, split, exact hx.1.1, split, simp, split, exact h_1, split, exact h6.symm, exact h5.1, split, exact h7, exact ht.1.symm end lemma eight23 {a b p q t t' r : point} (hp : ((l a b) ⊥ l p a) ∧ col a b t' ∧ B a t' p) (ht : ((l b a) ⊥ l q b) ∧ col b a t ∧ B p t q) (hr : B b r q ∧ eqd a p b r): ∃ x, M a x b ∧ M p x r := begin have h : a ≠ b, exact six13 (eight14e hp.1).1, cases pasch ht.2.2 hr.1 with x hx, have h1 : col a b x, have h_1 : col b t x, right, left, exact hx.1, cases em (b = t), rw ←h_2 at *, have h_3 : x = b, exact (bet_same hx.1).symm, rw h_3, left, exact three1 a b, exact (four11 (five4 h_2 (four11 ht.2.1).1 h_1)).2.1, have h2 : xperp a (l a b) (l p a), exact eight15 hp.1 (four11 (four12 a b)).1 (six17b p a), have h3 : xperp b (l b a) (l q b), exact eight15 ht.1 (four11 (four12 b a)).1 (six17b q b), have h4 := h2.2.2.2.2 (six17b a b) (six17a p a), have h5 := h3.2.2.2.2 (six17b b a) (six17a q b), have h6 : R a b r, have : col b q r, right, left, exact hr.1.symm, exact (eight3 h5.symm (six13 h3.2.1) this).symm, have h7 : ¬col a p b, intro h_1, cases eight9 h4 (four11 h_1).2.2.2.1, exact h h_2.symm, exact (six13 h2.2.1) h_2, have h8 : ¬col a b r, intro h_1, cases eight9 h6 h_1, exact h h_2, rw h_2 at *, exact (six13 h2.2.1).symm (id_eqd hr.2), suffices : eqd b p a r, have h_1 : p ≠ r, intro h_1, rw h_1 at hx, have h_2 : r = x, exact bet_same hx.2, rw h_2 at h8, exact h8 h1, constructor, apply seven21 h7 h_1 hr.2 this.flip (four11 h1).1, left, exact hx.2.symm, have h9 : x ≠ a, intro h_1, rw h_1 at *, have h_2 : col a p r, right, right, exact hx.2, have h_3 : R r a b, exact eight3 h4.symm (six13 h2.2.1) h_2, apply h, exact eight7 h_3 h6.symm, have h10 := seven5 a p, cases seg_cons x x r (S a p) with r' hr', cases seven25 hr'.2 with m hm, have h11 := seven5 m r, have h12 := seven4 h11 hm.symm, have h13 : R x m r, unfold R, rw ←h12 at hr', exact hr'.2.symm, have h14 : R x a p, exact eight3 h4 (ne.symm h) h1, have h15 : ¬col x p (S a p), intro h_1, have h_2 : col p a (S a p), left, exact (seven5 a p).1, have h_3 : p ≠ (S a p), intro h_3, exact (six13 h2.2.1) (seven10.1 h_3.symm).symm, have h_4 : col p a x, exact five4 h_3 (four11 h_2).1 (four11 h_1).2.2.1, cases eight9 h14.symm h_4, exact (six13 h2.2.1) h_5, exact h9 h_5, have h16 : hourglass p (S a p) x r r' a m, focus {repeat {split}}, exact hx.2.symm, exact hr'.1, unfold R at h14, exact h14, exact hr'.2.symm, exact h10.1, exact h10.2, rw ←h12, exact h11.1, rw ←h12, exact h11.2, have h17 := seven22 h16, have h18 : r ≠ m, intro h_1, rw ←h_1 at h17, have h_2 : col a x r, left, exact h17, apply h8, exact five4 h9.symm (four11 h1).1 h_2, have h19 : x ≠ m, intro h_1, have h_2 : col r x p, left, exact hx.2, have h_3 : x ≠ r', intro h_3, rw [←h_1, ←h_3] at h12, rw ←h_1 at h18, apply h18, exact seven9 (eq.trans h12 (seven11 x).symm), have h_4 : col r x (S a p), have h_4 : col x r' (S a p), right, right, exact hr'.1, have h_5 : col x r' r, rw h_1, right, right, exact hm.1.symm, exact (four11 (five4 h_3 h_5 h_4)).2.1, have h_5 : x ≠ r, intro h_5, rw h_5 at h_1, exact h18 h_1, apply h15, exact five4 h_5 (four11 h_2).2.1 (four11 h_4).2.1, have h20 : col a b m, have h_1 : col a x m, left, exact h17, exact five4 h9.symm (four11 h1).1 h_1, have h21 : xperp b (l a b) (l r b), apply eight13.2, split, exact six14 h, split, exact six14 (six26 h8).2.1.symm, split, simp, split, simp, existsi a, existsi r, simp, split, exact h, split, exact (six26 h8).2.1.symm, exact h6, have h22 : xperp m (l a b) (l r m), apply eight13.2, split, exact six14 h, split, exact six14 h18, split, exact h20, split, simp, existsi x, existsi r, split, exact h1, simp, split, exact h19, split, exact h18, exact h13, have h23 : perp (l a b) (l r b), constructor, exact h21, have h24 : perp (l a b) (l r m), constructor, exact h22, have h25 : m = b, apply unique_of_exists_unique (eight18 h8), split, exact h20, exact h24, split, left, exact three1 a b, exact h23, subst m, have h26 : ifs (S a p) a p r r b r' (S a p), focus {repeat {split}}, exact h10.1.symm, exact hm.1.symm, apply two4, apply two11 h10.1 hm.1.symm, exact hr.2.flip, exact eqd.trans h10.2.symm (eqd.trans hr.2 hm.2.symm), exact eqd.trans hr.2 hm.2.symm, exact two4 (eqd.refl r (S a p)), apply two5, apply two11 hx.2.symm hr'.1, unfold R at h14, exact h14.flip, exact hr'.2.symm, have h27 := four2 h26, unfold R at h4, exact eqd.trans h4 h27.symm end theorem eight22 (a b : point) : ∃! x, M a x b := begin cases em (a = b), rw h, existsi b, split, apply seven3.2, refl, intros y hy, exact (seven3.1 hy).symm, apply exists_unique_of_exists_of_unique, cases eight21 h a with p hp, cases eight21 (ne.symm h) p with q hq, cases hp with t' hp, cases hq with t ht, cases five10 a p b q, cases h_1 with r hr, cases eight23 hp ht hr with x hx, constructor, exact hx.1, suffices : ∃ x, M b x a, cases this with x hx, constructor, exact hx.symm, cases h_1 with r hr, have : ∃ x, M b x a ∧ M q x r, apply eight23, split, exact ht.1, split, exact (four11 (four12 b a)).1, exact three3 b q, split, exact hp.1, split, exact (four11 ht.2.1).2.1, exact ht.2.2.symm, exact hr, cases this with x hx, constructor, exact hx.1, intros x y hx hy, exact seven17 hx hy end theorem eight24 {a b p q r t : point} : perp (l p a) (l a b) → perp (l q b) (l a b) → col a b t → B p t q → B b r q → eqd a p b r → ∃ x, M a x b ∧ M p x r := begin intros g3 g4 g5 g6 g7 g8, have g9 := (four11 (four12 a b)).1, have g10 := three3 a p, have g11 := six17 a b, rw g11 at g4, have hp : ((l a b) ⊥ l p a) ∧ col a b a ∧ B a a p, exact ⟨g3.symm, ⟨g9, g10⟩⟩, have ht : ((l b a) ⊥ l q b) ∧ col b a t ∧ B p t q, exact ⟨g4.symm, ⟨(four11 g5).2.1, g6⟩⟩, have hr : B b r q ∧ eqd a p b r, exact ⟨g7, g8⟩, exact eight23 hp ht hr end theorem eight25 {a b : point} : a ≠ b → ∃ c, R a b c ∧ c ≠ b := begin intro h, rcases eight21 h.symm a with ⟨c, p, h1⟩, refine ⟨c, (eight15 h1.1 (six17a b a) (six17b c b)).2.2.2.2 (six17b b a) (six17a c b), _⟩, exact six13 (eight14e h1.1).2 end end Euclidean_plane
{"author": "ImperialCollegeLondon", "repo": "xena-UROP-2018", "sha": "b111fb87f343cf79eca3b886f99ee15c1dd9884b", "save_path": "github-repos/lean/ImperialCollegeLondon-xena-UROP-2018", "path": "github-repos/lean/ImperialCollegeLondon-xena-UROP-2018/xena-UROP-2018-b111fb87f343cf79eca3b886f99ee15c1dd9884b/src/Geometry/tarski_3.lean"}
using HarwellRutherfordBoeing using Krylov using LinearOperators # using ProfileView # M = HarwellBoeingMatrix("data/illc1033.rra"); M = HarwellBoeingMatrix("data/illc1850.rra"); A = M.matrix; (m, n) = size(A); @printf("System size: %d rows and %d columns\n", m, n); # Define a linear operator with preallocation. Ap = zeros(m); Atq = zeros(n); op = LinearOperator(m, n, false, false, p -> A_mul_B!(1.0, A, p, 0.0, Ap), q -> At_mul_B!(1.0, A, q, 0.0, Atq), q -> At_mul_B!(1.0, A, q, 0.0, Atq)); λ = 1.0e-3; λ > 0.0 && (N = 1./λ * opEye(n)) for nrhs = 1 : size(M.rhs, 2) b = M.rhs[:,nrhs]; (x, stats) = lsqr(op, b, λ=λ, sqd=λ > 0, atol=0.0, btol=0.0, N=N); # @profile (x, stats) = lsqr(op, b, λ=λ, sqd=λ > 0, atol=0.0, btol=0.0, N=N); @time (x, stats) = lsqr(op, b, λ=λ, sqd=λ > 0, atol=0.0, btol=0.0, N=N); show(stats); resid = norm(A' * (A * x - b) + λ * x) / norm(b); @printf("LSQR: Relative residual: %8.1e\n", resid); @printf("LSQR: ‖x‖: %8.1e\n", norm(x)); end # ProfileView.view()
{"hexsha": "e471f69d58cb0a28d00dd25ae1a3e9b498f07bf1", "size": 1068, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/test_lsqr.jl", "max_stars_repo_name": "abelsiqueira/Krylov.jl", "max_stars_repo_head_hexsha": "dc0ca5466f7f1f7e65958fe016e3a06b858e3df0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-06-06T18:14:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-06T18:14:15.000Z", "max_issues_repo_path": "examples/test_lsqr.jl", "max_issues_repo_name": "abelsiqueira/Krylov.jl", "max_issues_repo_head_hexsha": "dc0ca5466f7f1f7e65958fe016e3a06b858e3df0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/test_lsqr.jl", "max_forks_repo_name": "abelsiqueira/Krylov.jl", "max_forks_repo_head_hexsha": "dc0ca5466f7f1f7e65958fe016e3a06b858e3df0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-06-05T10:58:57.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-05T10:58:57.000Z", "avg_line_length": 31.4117647059, "max_line_length": 79, "alphanum_fraction": 0.5505617978, "num_tokens": 450}
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= Principal components analysis (PCA) ========================================================= These figures aid in illustrating how a point cloud can be very flat in one direction--which is where PCA comes in to choose a direction that is not flat. """ from __future__ import print_function # Authors: Gael Varoquaux # Jaques Grobler # Kevin Hughes # License: BSD 3 clause #from scipy import stats import vtk import os import argparse import timeit import pickle as pickle import random from imblearn.over_sampling import SMOTE #import matplotlib.pyplot as plt import pprint import inputData #from sklearn.decomposition import PCA import math import inputData import glob import numpy as np import collections from sklearn import svm from sklearn.metrics import accuracy_score #from matplotlib.colors import ListedColormap from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.datasets import make_moons, make_circles from sklearn.neural_network import MLPClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.gaussian_process import GaussianProcessClassifier from sklearn.gaussian_process.kernels import RBF from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier from sklearn.naive_bayes import GaussianNB from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis from sklearn.svm import LinearSVC from sklearn.metrics import confusion_matrix, roc_curve, auc import itertools from sklearn import preprocessing # ############################################################################# # Generate data parser = argparse.ArgumentParser(description='Shape Variation Analyzer', formatter_class=argparse.ArgumentDefaultsHelpFormatter) #parser.add_argument('--model', type=str, help='pickle file with the pca decomposition', required=True) #parser.add_argument('--shapeDir', type=str, help='Directory with vtk files .vtk', required=True) parser.add_argument('--picklefile',dest='picklefile',help='picklefile with the dataset',required=True) #parser.add_argument('--dataPathtrain', action='store', dest='dirwithSubtrain', help='folder with subclasses', required=True) #parser.add_argument('--dataPathtest', action='store', dest='dirwithSubtest', help='folder with subclasses', required=True) #parser.add_argument('--train_size', help='train ratio', type=float, default=0.8) #parser.add_argument('--validation_size', help='validation ratio from test data', default=0.5, type=float) #parser.add_argument('--feature_names', help='Extract the following features from the polydatas', nargs='+', default=["Normals", "Mean_Curvature", "distanceGroup"], type=str) #parser.add_argument('--out', dest="pickle_file_new", help='Pickle file output', default="new_dataset.pickle", type=str) #parser.add_argument('-outputdataPath', action='store', dest='dirwithSubGenerated', help='folder with subclasses after generation of data', required=True) #parser.add_argument('--outputGenerated', help='output folder for shapes', default='./out') #parser.add_argument('--num_shapes', type=int, help='number shapes to be generated', default=10) #parser.add_argument('--meanShape',help='mean shape', required=True) def readData(shapedir): #Read data from vtk files print("loading data ......") print("+++++++Read the surface shape data+++++++") vtkdirshapes = os.listdir(shapedir) y_design = [] numpoints = -1 nshape = 0 firstshapedata = 0 for vtkfilename in vtkdirshapes: if vtkfilename.endswith((".vtk")): print("Reading", vtkfilename) reader = vtk.vtkPolyDataReader() reader.SetFileName(os.path.join(shapedir, vtkfilename)) reader.Update() shapedata = reader.GetOutput() shapedatapoints = shapedata.GetPoints() if firstshapedata == 0: firstshapedata = shapedata y_design.append([]) if numpoints == -1: numpoints = shapedatapoints.GetNumberOfPoints() if numpoints != shapedatapoints.GetNumberOfPoints(): print("WARNING! The number of points is not the same for the shape:", vtkfilename) for i in range(shapedatapoints.GetNumberOfPoints()): p = shapedatapoints.GetPoint(i) y_design[nshape].append(p) nshape+=1 y_design = np.array(y_design) return y_design.reshape(y_design.shape[0], -1), firstshapedata def writeData(data_for_training,outputdataPath): #write data in a vtk file vtkdirshapes = os.listdir(outputdataPath) for vtkfilename in vtkdirshapes: if vtkfilename.endswith((".vtk")): print("Writing", vtkfilename) writer = vtk.vtkPolyDataWriter() writer.SetInput(data_for_training) writer.SetFileName(os.path.join(outputdataPath),vtkfilename) writer.Write() def get_labels(pickle_file): #get labels of a dataset and returns the labels array and the dataset with features #num_classes=len(pickle_file) #num_shapes = 268 #should be changed!! labels = [] shape =[] dataset_concatenated =[] for label, pickle_file in enumerate(pickle_file): try: with open(pickle_file,'rb') as f: dataset=pickle.load(f) shape_dataset = np.shape(dataset) num_shapes_per_group = shape_dataset[0] print('num shapes per group',label,num_shapes_per_group) l=[label]*num_shapes_per_group labels.extend(l) dataset_concatenated.extend(dataset) except Exception as e: print('Unable to process', pickle_file,':',e) raise features=np.array(dataset_concatenated) shape_features=np.shape(features) return features.reshape(-1,shape_features[1]*shape_features[2]), np.array(labels) def generate_data(pca_model): #generate data thanks to pca decomposition (not used) print("Generating data ...") pca = pca_model["pca"] X_ = pca_model["X_"] X_pca_ = pca_model["X_pca_"] X_pca_var = pca_model["X_pca_var"] print('Variance',X_pca_var) print('Mean',X_pca_) #between -1 and 1 alpha = 2.0*(np.random.random_sample(np.size(X_pca_))) - 1.0 print('alpha', alpha) data_compressed = 1.5*X_pca_var * alpha + X_pca_ print('data compressed',data_compressed) data_generated = pca.inverse_transform(data_compressed) + X_ return data_generated def generate_with_SMOTE(dataset,labels): #generate data thanks to SMOTE algorithm, it balances different groups sm=SMOTE(random_state=42,kind='borderline1') print('shape dataset',dataset.shape) print('shape labels',labels.shape) dataset_res, labels_res = sm.fit_sample(dataset,labels) print('shape dataset resampled',np.shape(dataset_res),'shape lables resampled',np.shape(labels_res)) return dataset_res,labels_res # def PCA_plot(dataset,labels,dataset_res,labels_res): # #plot original dat and data resampled after a PCA decomposition # pca = PCA(n_components=200) # pca.fit(dataset) # dataset_pca=pca.transform(dataset) # print('original shape: ',dataset.shape) # print('transformed shape:',dataset_pca.shape) # #print('Ratio variance',pca.explained_variance_ratio_) # #plt.scatter(dataset[:,0],dataset[:,1],alpha=0.2) # #dataset_new = pca.inverse_transform(dataset_pca) # plt.figure(2) # plt.subplot(121) # plt.scatter(dataset_pca[:,0],dataset_pca[:,1],edgecolor='none',alpha=0.5,c=labels,cmap=plt.cm.get_cmap('nipy_spectral',np.shape(np.unique(labels))[0])) # plt.title('Original data with pca (' + str(dataset.shape[0]) + ' samples)') # #pca.fit(dataset_res) # dataset_res_pca=pca.transform(dataset_res) # plt.subplot(122) # plt.scatter(dataset_res_pca[:,0],dataset_res_pca[:,1],edgecolor='none',alpha=0.5,c=labels_res,cmap=plt.cm.get_cmap('nipy_spectral',np.shape(np.unique(labels_res))[0])) # plt.title('Resampled data with pca (' + str(dataset_res_pca.shape[0]) + ' samples)') # for i in range(1,3): # plt.subplot(1,2,i) # plt.xlabel('component 1') # plt.ylabel('component 2') # plt.colorbar() # cumsum = np.cumsum(pca.explained_variance_ratio_) # plt.figure(1) # plt.plot(cumsum) # plt.xlabel('nb of components') # plt.ylabel('cumulative explained variance') # plt.axhline(y=0.95, linestyle=':', label='.95 explained', color="#f23e3e") # numcomponents = len(np.where(cumsum < 0.95)[0]) # plt.axvline(x=numcomponents, linestyle=':', label=(str(numcomponents) + ' components'), color="#31f9ad") # plt.legend(loc=0) # histo = np.bincount(labels) # histo_range = np.array(range(histo.shape[0])) # plt.figure(3) # plt.bar(histo_range, histo) # plt.xlabel('Groups') # plt.ylabel('Number of samples') # for xy in zip(histo_range, histo): # plt.annotate(xy[1], xy=xy, ha="center", color="#4286f4") # plt.show() # def plot_confusion_matrix(cm, classes, # normalize=False, # title='Confusion matrix', # cmap=plt.cm.Blues): # """ # This function prints and plots the confusion matrix. # Normalization can be applied by setting `normalize=True`. # """ # if normalize: # cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] # print("Normalized confusion matrix") # else: # print('Confusion matrix, without normalization') # print(cm) # plt.figure() # plt.imshow(cm, interpolation='nearest', cmap=cmap) # plt.title(title) # plt.colorbar() # tick_marks = np.arange(len(classes)) # plt.xticks(tick_marks, classes, rotation=45) # plt.yticks(tick_marks, classes) # fmt = '.2f' if normalize else 'd' # thresh = cm.max() / 2. # for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): # plt.text(j, i, format(cm[i, j], fmt), # horizontalalignment="center", # color="white" if cm[i, j] > thresh else "black") # #plt.tight_layout() # plt.ylabel('True label') # plt.xlabel('Predicted label') def training_acc(X_train,y_train,X_test,y_test,classifiers): tab_score=[] for clf in classifiers: clf.fit(X_train, y_train) score = clf.score(X_test, y_test) print('score',score) tab_score.append(score) print('TAB score',tab_score) return tab_score def SVM_classification(X_dataset,y_labels,dataset_test,labels_test): # model = svm.SVC(decision_function_shape='ovr',kernel='rbf',C=100,gamma=10) # model.fit(X_dataset,y_labels) # model.score(X_dataset,y_labels) print('data shape',X_dataset.shape) # predicted_labels = model.predict(test_dataset) # acc = accuracy_score(test_labels,predicted_labels,normalize=True) # print('accuracy',acc) h = .02 # step size in the mesh names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Gaussian Process", "Decision Tree", "Random Forest", "MLP Classifier", "AdaBoost", "Naive Bayes", "QDA"] classifiers = [ KNeighborsClassifier(3), SVC(kernel="linear", C=0.025), SVC(gamma=2, C=1), GaussianProcessClassifier(1.0 * RBF(1.0)), DecisionTreeClassifier(max_depth=5), RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1), MLPClassifier(alpha=1), AdaBoostClassifier(), GaussianNB(), QuadraticDiscriminantAnalysis()] #linearly_separable = (X_dataset,y_labels) #datasets = [linearly_separable] i=1 #standardizing features #X_stand = StandardScaler().fit_transform(X) X_train,X_test, y_train, y_test = train_test_split(X_dataset,y_labels, test_size =0.4) # just plot the dataset first cm = plt.cm.RdBu #cm_bright = ListedColormap(['#FF0000', '#0000FF','#48FF00']) #fig1,ax1=plt.subplots(3,4) #fig2,ax2=plt.subplots(3,4) #if ds_cnt == 0: # ax.set_title("Input data") # Plot the training points #score=training_acc(X_train,y_train,X_test,y_test,classifiers) # iterate over classifiers #for name, clf in zip(names, classifiers): # ax = plt.subplot(3, 4, i) # clf.fit(X_train, y_train) #score = clf.score(X_test[:,2:], y_test) #print('score 2 features',score) #make meshgrid # x_min, x_max = X_train[:, 0].min()-1, X_train[:, 0].max()+1 # y_min, y_max = X_train[:, 1].min()-1, X_train[:, 1].max()+1 # xx, yy= np.meshgrid(np.arange(x_min, x_max,1) ,np.arange(y_min, y_max, 1)) # print('shape xx',xx.shape,'shape yy',yy.shape) # print('shape ravel',np.c_[xx.ravel(),yy.ravel()].shape) # if hasattr(clf, "decision_function"): # Z = clf.decision_function(np.c_[xx.ravel(),yy.ravel()]) # print('xx shape',xx.shape,'yy shape',yy.shape,'Z shape',Z.shape) # Z=Z[:,1] # else: # Z = clf.predict_proba(np.c_[xx.ravel(),yy.ravel()])[:,1] # print('Z shape',Z.shape) # Z = Z.reshape(xx.shape) # ax.contourf(xx,yy,Z, cmap=plt.cm.get_cmap('nipy_spectral',np.shape(np.unique(y_test))[0]), alpha=.4) # Plot also the training points # CS=ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=plt.cm.get_cmap('nipy_spectral',np.shape(np.unique(y_train))[0])) # and testing points # ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=plt.cm.get_cmap('nipy_spectral',np.shape(np.unique(y_test))[0]),edgecolors='k', alpha=0.6) # ax.set_xticks(()) # ax.set_yticks(()) # ax.set_title(name) #plt.suptitle(score,y=1.05,fontsize=18) #a=0.98 #if name=="Current neural network": # ax.text(240, -40, a,size=15, horizontalalignment='right') #else: #ax.text(240, -40, ('%.2f' % score).lstrip('0'),size=15, horizontalalignment='right') #clf.fit(X_train, y_train) #score_training = clf.score(X_train, y_train) #print('score training',score_training) #ax.text(x_max-0.5, y_min+0.5, ('%.2f' % score).lstrip('0'),size=15, horizontalalignment='right') for name,clf in zip(names,classifiers): clf.fit(X_train, y_train) y_prediction = clf.predict(dataset_test) print('y_prediction',y_prediction,'labels_test',labels_test) test_score = accuracy_score(labels_test,y_prediction) confusion = confusion_matrix(labels_test,y_prediction) print('The accuracy of ',name,'is',test_score) name_labels=["group0","group1","group2","group3","group4","group5"] plot_confusion_matrix(confusion,name_labels,title=name) if hasattr(clf, "decision_function"): #binarize labels lb = preprocessing.LabelBinarizer() lb.fit([0,1,2,3,4,5]) print('y_test',y_test) y_test_bin=lb.transform(y_test) fpr=dict() tpr=dict() roc_auc=dict() y_score = clf.fit(X_train,y_train).decision_function(X_test) print('y_score',y_score) print('y_score shape',y_score.shape,'y_test shape',y_test.shape) #compute ROC curve and ROC area for each class for j in range(6): print(j) fpr[j], tpr[j], _ = roc_curve(y_test_bin[:,j],y_score[:,j]) roc_auc[j]=auc(fpr[j],tpr[j]) plt.figure() lw=2 plt.plot(fpr[2],tpr[2],color='darkorange',lw=lw,label='ROC curve (area = %0.2f)'%roc_auc[2]) plt.show #ax.text(x_max-1,y_min+0.1,('%.3f' % score_training),size=10,horizontalalignment='right') #score=trainin_acc(X_train,y_train,X_test,y_test,classifiers) #ax.text(x_max-0.5,y_min+0.1,('%.3f' % score[i-1]),size=8,horizontalalignment='right') print('score testing',test_score) #plt.colorbar(CS) i += 1 #printing our neural network accuracy # ax=plt.subplot(3,4,11) # ax.scatter(X_dataset[:, 0], X_dataset[:, 1], c=y_labels, cmap=plt.cm.get_cmap('nipy_spectral',np.shape(np.unique(y_labels))[0])) # ax.scatter(dataset_test[:, 0], dataset_test[:, 1], c=labels_test, cmap=plt.cm.get_cmap('nipy_spectral',np.shape(np.unique(labels_test))[0]),edgecolors='k', alpha=0.6) # ax.set_xticks(()) # ax.set_yticks(()) # ax.set_title("5-layers neural network") # score_fake=0.971 # maxi=X_dataset[:,0].max() # mini=X_dataset[:,0].min() # maxi1=X_dataset[:,1].max() # mini1=X_dataset[:,1].min() # print('x_max',maxi,'x_min',mini,'y_max',maxi1,'y_min',mini1) # #plt.axis([0,1,-1,1]) # ax.text(maxi,mini1,('%.3f' % score_fake),size=8,horizontalalignment='right') plt.tight_layout() plt.show() #def generate(args): if __name__ == '__main__': np.set_printoptions(threshold='nan') args = parser.parse_args() pickle_file = args.picklefile #pickle_file_output= args.pickle_file_new # Get the data from the folders with vtk files inputdata = inputData.inputData() fi = open(pickle_file,'rb') dataset=pickle.load(fi) test_labels =dataset["test_labels"] train_labels =dataset["train_labels"] valid_labels =dataset["valid_labels"] test_dataset =dataset["test_dataset"] train_dataset =dataset["train_dataset"] valid_dataset =dataset["valid_dataset"] print('counter',collections.Counter(train_labels)) #data_folders_train = inputdata.get_folder_classes_list(dataPathtrain) #data_folders_test = inputdata.get_folder_classes_list(dataPathtest) #pickled_datasets_train,vtklisttrain = inputdata.maybe_pickle(data_folders_train, 6, feature_points=args.feature_names) #pickled_datasets_test,vtklisttest = inputdata.maybe_pickle(data_folders_test, 0, feature_points=args.feature_names) #Create the labels, i.e., enumerate the groups #dataset_train,labels_train = get_labels(pickled_datasets_train) #print('pickled_datasets_train',pickled_datasets_train,'pickled_datasets_test',pickled_datasets_test) #dataset_test,labels_test = get_labels(pickled_datasets_test) # Compute the total number of shapes and train/test size total_number_shapes_train=train_dataset.shape[0] total_number_shapes_test=test_dataset.shape[0] print('total number of shapes train',np.shape(train_dataset)) print('total number of shapes test', np.shape(test_dataset)) print('labels to train',train_labels,'labels to test',test_labels) #num_train = int(args.train_size*total_number_shapes_train) #num_valid = int((total_number_shapes_train - num_train)*args.validation_size) # Randomize the original dataset #print('shape before randomize',dataset_train.shape) shuffled_dataset, shuffled_labels = inputdata.randomize(train_dataset, train_labels) #print('shape after randomize',shuffled_dataset.shape) #shuffled_dataset_test,shuffled_labels_test = inputdata.randomize(dataset_test,labels_test) shuffled_dataset = np.reshape(shuffled_dataset, (total_number_shapes_train, -1)) #print('shape after reshape',shuffled_dataset.shape) #shuffled_dataset_test = np .reshape(shuffled_dataset_test,(total_number_shapes_test,-1)) # Generate SMOTE with out including the valid/test samples, in some cases, this may raise an error # as the number of samples in one class is less than 5 and SMOTE cannot continue. Just run it again dataset_res,labels_res=generate_with_SMOTE(np.nan_to_num(shuffled_dataset),shuffled_labels) # SANITY CHECKS print('dataset train',np.shape(train_dataset)) print('labels train',np.shape(train_labels)) #print('dataset_res',np.shape(dataset_res)) #print('labels_res',np.shape(labels_res)) #print('num_train', num_train) #print('num_valid', num_valid) print('number of labels',np.shape(np.unique(train_labels))) #print('number of labels resampled',np.shape(np.unique(labels_res))) #print('Labels resampled',np.unique(labels_res).tolist()) print('test labels', test_labels) print('counter after SMOTE',collections.Counter(labels_res)) #SVM_classification(dataset_res,labels_res,dataset_test,labels_test) #clf=LinearSVC(random_state=0) #clf=GaussianProcessClassifier(1.0 * RBF(1.0)) #clf.fit(dataset_res,labels_res) #prediction = clf.predict(dataset_test) #for i in range(0,total_number_shapes_test): # head,tail = os.path.split(vtklisttest[i]) # print(tail,prediction[i]) #PCA_plot(dataset,labels,dataset_res,labels_res) try: f = open(pickle_file, 'wb') save = { 'train_dataset': dataset_res, 'train_labels': labels_res, 'valid_dataset': valid_dataset, 'valid_labels': valid_labels, 'test_dataset': test_dataset, 'test_labels': test_labels } pickle.dump(save, f, pickle.HIGHEST_PROTOCOL) except Exception as e: print('Unable to save data to', pickle_file, ':', e) raise
{"hexsha": "0e57569e0da77d69da74031c9a26bb6efc835074", "size": 20161, "ext": "py", "lang": "Python", "max_stars_repo_path": "ShapeVariationAnalyzer/Resources/Classifier/generation_shapes.py", "max_stars_repo_name": "lbumbolo/ShapeVariationAnalyzer", "max_stars_repo_head_hexsha": "976e22cbacc87fb593d92e24cbdbba6c99a64060", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2018-09-05T19:49:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T16:48:37.000Z", "max_issues_repo_path": "ShapeVariationAnalyzer/Resources/Classifier/generation_shapes.py", "max_issues_repo_name": "lbumbolo/ShapeVariationAnalyzer", "max_issues_repo_head_hexsha": "976e22cbacc87fb593d92e24cbdbba6c99a64060", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 19, "max_issues_repo_issues_event_min_datetime": "2018-02-15T21:15:53.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T21:15:53.000Z", "max_forks_repo_path": "ShapeVariationAnalyzer/Resources/Classifier/generation_shapes.py", "max_forks_repo_name": "lbumbolo/ShapeVariationAnalyzer", "max_forks_repo_head_hexsha": "976e22cbacc87fb593d92e24cbdbba6c99a64060", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2018-02-23T21:17:25.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T15:23:57.000Z", "avg_line_length": 33.9410774411, "max_line_length": 174, "alphanum_fraction": 0.7052725559, "include": true, "reason": "import numpy,from scipy", "num_tokens": 5357}
/** @file @author Alexander Sherikov @copyright 2017 Alexander Sherikov. Licensed under the Apache License, Version 2.0. (see LICENSE or http://www.apache.org/licenses/LICENSE-2.0) @brief */ #include "utf_common.h" #include <boost/mpl/vector.hpp> #include <qpmad/solver.h> #include <qpmad/testing.h> //=========================================================================== // ResolveFixture //=========================================================================== template <class t_Solver> class ResolveFixture { public: Eigen::VectorXd x; Eigen::MatrixXd H; Eigen::MatrixXd H_copy; Eigen::VectorXd h; Eigen::MatrixXd A; Eigen::VectorXd Alb; Eigen::VectorXd Aub; Eigen::VectorXd lb; Eigen::VectorXd ub; t_Solver solver; typename t_Solver::ReturnStatus status; qpmad::SolverParameters param; public: ResolveFixture() { qpmad::MatrixIndex size = 20; qpmad::MatrixIndex num_general_ctr = 1; qpmad_utils::getRandomPositiveDefiniteMatrix(H, size); H_copy = H; h.setOnes(size); A.resize(num_general_ctr, size); A.setOnes(); Alb.resize(num_general_ctr); Aub.resize(num_general_ctr); Alb << -1.5; Aub << 1.5; lb.resize(size); ub.resize(size); lb << 1, 2, 3, 4, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5; ub << 1, 2, 3, 4, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5; } void solve() { status = solver.solve(x, H, h, lb, ub, A, Alb, Aub, param); BOOST_CHECK_EQUAL(status, qpmad::Solver::OK); } }; using TypeListResolve = boost::mpl::vector<qpmad::Solver, qpmad::SolverTemplate<double, 20, 1, 1> >; BOOST_FIXTURE_TEST_CASE_TEMPLATE(resolve_with_cholesky, t_Solver, TypeListResolve, ResolveFixture<t_Solver>) { this->solve(); // Hessian changed; BOOST_CHECK(not this->H_copy.isApprox(this->H, g_default_tolerance)); // next iteration this->H_copy = this->H; Eigen::VectorXd x_copy = this->x; this->param.hessian_type_ = this->solver.getHessianType(); BOOST_CHECK_EQUAL(this->param.hessian_type_, qpmad::SolverParameters::HESSIAN_CHOLESKY_FACTOR); this->solve(); // Hessian not changed BOOST_CHECK(this->H_copy.isApprox(this->H, g_default_tolerance)); // solution is the same BOOST_CHECK(x_copy.isApprox(this->x, g_default_tolerance)); } BOOST_FIXTURE_TEST_CASE_TEMPLATE(resolve_with_inverted_cholesky, t_Solver, TypeListResolve, ResolveFixture<t_Solver>) { this->param.return_inverted_cholesky_factor_ = true; this->solve(); // Hessian changed; BOOST_CHECK(not this->H_copy.isApprox(this->H, g_default_tolerance)); // next iteration this->H_copy = this->H; Eigen::VectorXd x_copy = this->x; this->param.hessian_type_ = this->solver.getHessianType(); BOOST_CHECK_EQUAL(this->param.hessian_type_, qpmad::SolverParameters::HESSIAN_INVERTED_CHOLESKY_FACTOR); this->solve(); // Hessian not changed BOOST_CHECK(this->H_copy.isApprox(this->H, g_default_tolerance)); // solution is the same BOOST_CHECK(x_copy.isApprox(this->x, g_default_tolerance)); } //=========================================================================== // ResolveUnconstrainedFixture //=========================================================================== template <class t_Solver> class ResolveUnconstrainedFixture { public: Eigen::VectorXd x; Eigen::MatrixXd H; Eigen::MatrixXd H_copy; Eigen::VectorXd h; Eigen::VectorXd lb; Eigen::VectorXd ub; t_Solver solver; typename t_Solver::ReturnStatus status; qpmad::SolverParameters param; public: ResolveUnconstrainedFixture() { qpmad::MatrixIndex size = 20; qpmad_utils::getRandomPositiveDefiniteMatrix(H, size); H_copy = H; h.setOnes(size); lb.setConstant(size, -1e20); ub.setConstant(size, 1e20); lb(0) = 1; ub(0) = 1; } void solve() { status = solver.solve(x, H, h, lb, ub, param); BOOST_CHECK_EQUAL(status, qpmad::Solver::OK); } }; using TypeListResolveUnconstrained = boost::mpl::vector<qpmad::Solver, qpmad::SolverTemplate<double, 20, 1, 0> >; BOOST_FIXTURE_TEST_CASE_TEMPLATE( resolve_unconstrained_with_cholesky, t_Solver, TypeListResolveUnconstrained, ResolveUnconstrainedFixture<t_Solver>) { this->solve(); // Hessian changed; BOOST_CHECK(not this->H_copy.isApprox(this->H, g_default_tolerance)); // next iteration this->H_copy = this->H; Eigen::VectorXd x_copy = this->x; this->param.hessian_type_ = this->solver.getHessianType(); BOOST_CHECK_EQUAL(this->param.hessian_type_, qpmad::SolverParameters::HESSIAN_CHOLESKY_FACTOR); this->solve(); // Hessian not changed BOOST_CHECK(this->H_copy.isApprox(this->H, g_default_tolerance)); // solution is the same BOOST_CHECK(x_copy.isApprox(this->x, g_default_tolerance)); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( resolve_unconstrained_with_inverted_cholesky, t_Solver, TypeListResolveUnconstrained, ResolveUnconstrainedFixture<t_Solver>) { this->param.return_inverted_cholesky_factor_ = true; this->solve(); // Hessian changed; BOOST_CHECK(not this->H_copy.isApprox(this->H, g_default_tolerance)); // next iteration this->H_copy = this->H; Eigen::VectorXd x_copy = this->x; this->param.hessian_type_ = this->solver.getHessianType(); BOOST_CHECK_EQUAL(this->param.hessian_type_, qpmad::SolverParameters::HESSIAN_INVERTED_CHOLESKY_FACTOR); this->solve(); // Hessian not changed BOOST_CHECK(this->H_copy.isApprox(this->H, g_default_tolerance)); // solution is the same BOOST_CHECK(x_copy.isApprox(this->x, g_default_tolerance)); }
{"hexsha": "96d46a7a4af4b0affbadb75ed9c261c8a80116cf", "size": 5950, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/resolve.cpp", "max_stars_repo_name": "Aerobotics/qpmad", "max_stars_repo_head_hexsha": "b687654e2eb97f121c6161fe05abbc7594d7c7d4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 48.0, "max_stars_repo_stars_event_min_datetime": "2019-02-27T12:48:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T17:32:30.000Z", "max_issues_repo_path": "test/resolve.cpp", "max_issues_repo_name": "Aerobotics/qpmad", "max_issues_repo_head_hexsha": "b687654e2eb97f121c6161fe05abbc7594d7c7d4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 4.0, "max_issues_repo_issues_event_min_datetime": "2020-12-01T11:26:01.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-31T17:16:40.000Z", "max_forks_repo_path": "test/resolve.cpp", "max_forks_repo_name": "Aerobotics/qpmad", "max_forks_repo_head_hexsha": "b687654e2eb97f121c6161fe05abbc7594d7c7d4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 13.0, "max_forks_repo_forks_event_min_datetime": "2017-06-23T02:45:11.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-28T12:22:40.000Z", "avg_line_length": 27.9342723005, "max_line_length": 117, "alphanum_fraction": 0.6334453782, "num_tokens": 1596}
from abc import ABC, abstractmethod from collections import Counter from functools import reduce from typing import List, Tuple import numpy as np from sklearn.utils.linear_assignment_ import linear_assignment class Scorer(ABC): precision: float recall: float def get_scores(self, predicted_chains: List[List[int]], label_chains: List[List[int]]) \ -> Tuple[float, float, float]: self._clear_memo() precision = self._compute_precision(predicted_chains, label_chains) recall = self._compute_recall(predicted_chains, label_chains) f1 = self._compute_f1(predicted_chains, label_chains) return precision, recall, f1 def _clear_memo(self): self._precision = None self._recall = None def _compute_f1(self, predicted_chains: List[List[int]], label_chains: List[List[int]]) -> float: precision = self._compute_precision(predicted_chains, label_chains) recall = self._compute_recall(predicted_chains, label_chains) if precision + recall == 0: return 0 return 2 * precision * recall / (precision + recall) def _compute_precision(self, predicted_chains: List[List[int]], label_chains: List[List[int]]) -> float: if self._precision is None: self._precision = self.compute_precision(predicted_chains, label_chains) return self._precision def _compute_recall(self, predicted_chains: List[List[int]], label_chains: List[List[int]]) -> float: if self._recall is None: self._recall = self.compute_recall(predicted_chains, label_chains) return self._recall @abstractmethod def compute_precision(self, predicted_chains: List[List[int]], label_chains: List[List[int]]) -> float: pass @abstractmethod def compute_recall(self, predicted_chains: List[List[int]], label_chains: List[List[int]]) -> float: pass class MUCScorer(Scorer): def compute_precision(self, predicted_chains: List[List[int]], label_chains: List[List[int]]) -> float: return self._general_compute(predicted_chains, label_chains) def compute_recall(self, predicted_chains: List[List[int]], label_chains: List[List[int]]) -> float: return self._general_compute(label_chains, predicted_chains) def _general_compute(self, chain1: List[List[int]], chain2: List[List[int]]) -> float: nominator = 0 denominator = 0 for c1 in chain1: ki = len(c1) part_left = ki partition = 0 for c2 in chain2: found = False for s in c2: if s in c1: found = True part_left -= 1 if found: partition += 1 nominator += (ki - (partition + part_left)) denominator += ki - 1 if denominator == 0: return 0 return nominator / denominator class B3Scorer(Scorer): def compute_precision(self, predicted_chains: List[List[int]], label_chains: List[List[int]]) -> float: return self._general_compute(predicted_chains, label_chains) def compute_recall(self, predicted_chains: List[List[int]], label_chains: List[List[int]]) -> float: return self._general_compute(label_chains, predicted_chains) def _general_compute(self, chain1: List[List[int]], chain2: List[List[int]]) -> float: mention_to_gold = {} for c in chain2: for m in c: mention_to_gold[m] = c num, dem = 0, 0 for c in chain1: if len(c) == 1: continue gold_counts = Counter() correct = 0 for m in c: if m in mention_to_gold: gold_counts[tuple(mention_to_gold[m])] += 1 for c2, count in gold_counts.items(): if len(c2) != 1: correct += count * count num += correct / float(len(c)) dem += len(c) if dem == 0: return 0 return num / dem class CEAFeScorer(Scorer): similarity: int = None def reset(self) -> None: self.similarity = None def compute_precision(self, predicted_chains: List[List[int]], label_chains: List[List[int]]) -> float: return self._compute_similarity(predicted_chains, label_chains) / len(predicted_chains) def compute_recall(self, predicted_chains: List[List[int]], label_chains: List[List[int]]) -> float: return self._compute_similarity(predicted_chains, label_chains) / len(label_chains) def _compute_similarity(self, predicted_chains: List[List[int]], label_chains: List[List[int]]) -> int: if self.similarity is not None: return self.similarity predicted_chains = [c for c in predicted_chains if len(c) != 1] label_chains = [c for c in label_chains if len(c) != 1] scores = np.zeros((len(label_chains), len(predicted_chains))) for i in range(len(label_chains)): for j in range(len(predicted_chains)): scores[i, j] = self._compute_phi4(label_chains[i], predicted_chains[j]) matching = linear_assignment(-scores) similarity = sum(scores[matching[:, 0], matching[:, 1]]) self.similarity = similarity return self.similarity def _compute_phi4(self, c1: List[List[int]], c2: List[List[int]]) -> float: return 2 * len([m for m in c1 if m in c2]) / float(len(c1) + len(c2)) class AverageScorer(Scorer): score: float def __init__(self, scorers: List[Scorer]): self.scorers = scorers def get_scores(self, predicted_chains: List[List[int]], label_chains: List[List[int]]) \ -> Tuple[float, float, float]: self.score = None return super().get_scores(predicted_chains, label_chains) def compute_precision(self, predicted_chains: List[List[int]], label_chains: List[List[int]]) -> float: return self._compute_score(predicted_chains, label_chains) def compute_recall(self, predicted_chains: List[List[int]], label_chains: List[List[int]]) -> float: return self._compute_score(predicted_chains, label_chains) def _compute_score(self, predicted_chains: List[List[int]], label_chains: List[List[int]]) -> float: if self.score is not None: return self.score if len(self.scorers) == 0: self.score = 0 return self.score sum_f1 = reduce(lambda prv, scorer: prv + scorer.get_scores(predicted_chains, label_chains)[2], self.scorers, 0) self.score = sum_f1 / len(self.scorers) return self.score
{"hexsha": "b85af3d096aa994d215bdfe67dea5b5a58fc89aa", "size": 6745, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/scorers.py", "max_stars_repo_name": "tugas-akhir-nlp/coreference-resolution-cnn-v2", "max_stars_repo_head_hexsha": "b112893b3bd7b893e3830e183aa79acff8af9896", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils/scorers.py", "max_issues_repo_name": "tugas-akhir-nlp/coreference-resolution-cnn-v2", "max_issues_repo_head_hexsha": "b112893b3bd7b893e3830e183aa79acff8af9896", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/scorers.py", "max_forks_repo_name": "tugas-akhir-nlp/coreference-resolution-cnn-v2", "max_forks_repo_head_hexsha": "b112893b3bd7b893e3830e183aa79acff8af9896", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.4132653061, "max_line_length": 120, "alphanum_fraction": 0.6314306894, "include": true, "reason": "import numpy", "num_tokens": 1601}
! This test checks lowering of OpenMP threadprivate Directive. // RUN: not flang-new -fc1 -emit-fir -fopenmp %s 2>&1 | FileCheck %s program main integer, save :: x, y // CHECK: not yet implemented: OpenMPThreadprivate !$omp threadprivate(x, y) end
{"hexsha": "da1981a299b71c7e48a6d61914385ecb03e9e81c", "size": 255, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "flang/test/Lower/OpenMP/Todo/omp-threadprivate.f90", "max_stars_repo_name": "ornata/llvm-project", "max_stars_repo_head_hexsha": "494913b8b4e4bce0b3525e5569d8e486e82b9a52", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "flang/test/Lower/OpenMP/Todo/omp-threadprivate.f90", "max_issues_repo_name": "ornata/llvm-project", "max_issues_repo_head_hexsha": "494913b8b4e4bce0b3525e5569d8e486e82b9a52", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "flang/test/Lower/OpenMP/Todo/omp-threadprivate.f90", "max_forks_repo_name": "ornata/llvm-project", "max_forks_repo_head_hexsha": "494913b8b4e4bce0b3525e5569d8e486e82b9a52", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.1818181818, "max_line_length": 68, "alphanum_fraction": 0.7137254902, "num_tokens": 77}
import utils import sklearn import tensorflow.compat.v1 as tf import numpy as np def tf_dataset(batch_pc_gen): while True: yield next(batch_pc_gen) def get_dataset(batch_pc_gen, batch_size): with tf.device('/device:CPU:0'): ds = tf.data.Dataset.from_generator(lambda: tf_dataset(batch_pc_gen), tf.float32, (batch_size, None, 6)) if tf.test.is_gpu_available(): ds = ds.apply(tf.data.experimental.prefetch_to_device('/device:GPU:0', 4)) return ds def pc_batcher(x): return np.array(list(y[0][:, :6] for y in x)) class Dataset: def __init__(self, gen, batch_size): # Set up data pipelines self.gen = gen self.batcher = lambda x: pc_batcher(x) self.batch_gen = utils.generators.BatchGenerator(self.gen, batch_size, self.batcher) # TF graph data pipeline self.batch_ds = get_dataset(self.batch_gen, batch_size) self.iterator = tf.data.make_one_shot_iterator(self.batch_ds) self.output_types = tf.data.get_output_types(self.batch_ds) self.output_shapes = tf.data.get_output_shapes(self.batch_ds) self.string_handle = self.iterator.string_handle class InputPipeline: def __init__(self, files, batch_size, train_test_split, infinite_data, test_size=0.1): self.files = files def data_transform(data): if infinite_data: return utils.generators.sampling_generator(data) else: return iter(data) if train_test_split: files_train, files_test = sklearn.model_selection.train_test_split(files, test_size=test_size) self.len_train, self.len_test = len(files_train) // batch_size, len(files_test) // batch_size data_train = utils.pc_io.load_points(files_train) data_test = utils.pc_io.load_points(files_test) self.pc_ds_test = Dataset(data_transform(data_test), batch_size) self.pc_ds_train = Dataset(data_transform(data_train), batch_size) # Train/Test switching self.handle = tf.placeholder(tf.string, shape=[]) iterator = tf.data.Iterator.from_string_handle(self.handle, self.pc_ds_train.output_types, self.pc_ds_train.output_shapes) self.next_element = iterator.get_next() else: self.data = utils.pc_io.load_points(files) self.pc_ds = Dataset(data_transform(self.data), batch_size) self.next_element = self.pc_ds.iterator.get_next()
{"hexsha": "94178e5727f84851a6d7bb83c9989a52416c0940", "size": 2578, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/80_input.py", "max_stars_repo_name": "mauriceqch/pcc_attr_folding", "max_stars_repo_head_hexsha": "2fc37de7fb146a724ebada2e39df51de272fa01a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2020-06-26T12:47:27.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-03T08:33:53.000Z", "max_issues_repo_path": "src/80_input.py", "max_issues_repo_name": "mauriceqch/pcc_attr_folding", "max_issues_repo_head_hexsha": "2fc37de7fb146a724ebada2e39df51de272fa01a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-06-24T04:35:06.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-12T06:32:16.000Z", "max_forks_repo_path": "src/80_input.py", "max_forks_repo_name": "mauriceqch/pcc_attr_folding", "max_forks_repo_head_hexsha": "2fc37de7fb146a724ebada2e39df51de272fa01a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-06-24T02:26:25.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-20T04:51:42.000Z", "avg_line_length": 37.9117647059, "max_line_length": 112, "alphanum_fraction": 0.659038014, "include": true, "reason": "import numpy", "num_tokens": 543}
// Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2017 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <pow.h> #include <arith_uint256.h> #include <boost/multiprecision/cpp_int.hpp> #include <chain.h> #include <primitives/block.h> #include <uint256.h> #include <chainparams.h> #include <crypto/equihash.h> #include <streams.h> #include <util.h> #include <ed25519/ed25519.h> #include "validation.h" unsigned int GetNextWorkRequired(const CBlockIndex* pindexLast, const CBlockHeader *pblock, const Consensus::Params& params) { assert(pindexLast != nullptr); // Only change once per difficulty adjustment interval if ((pindexLast->nHeight+1) % params.DifficultyAdjustmentInterval() != 0) { unsigned int nProofOfWorkLimit = UintToArith256(params.powLimit).GetCompact(); if (pindexLast->nHeight < 60500) { if (params.fPowAllowMinDifficultyBlocks) { // Special difficulty rule for testnet: // If the new block's timestamp is more than 2* 10 minutes // then allow mining of a min-difficulty block. if (pblock->GetBlockTime() > pindexLast->GetBlockTime() + params.nPowTargetSpacing*2) return nProofOfWorkLimit; else { // Return the last non-special-min-difficulty-rules-block const CBlockIndex* pindex = pindexLast; while (pindex->pprev && pindex->nHeight % params.DifficultyAdjustmentInterval() != 0 && pindex->nBits == nProofOfWorkLimit) pindex = pindex->pprev; return pindex->nBits; } } return pindexLast->nBits; } else { // Return the last non-special-min-difficulty-rules-block const CBlockIndex* pindex = pindexLast; while (pindex->pprev && pindex->nHeight % params.DifficultyAdjustmentInterval() != 0 && pindex->nBits == nProofOfWorkLimit) pindex = pindex->pprev; return pindex->nBits; } } // Go back by what we want to be 14 days worth of blocks int nHeightFirst = pindexLast->nHeight - (params.DifficultyAdjustmentInterval()-1); assert(nHeightFirst >= 0); const CBlockIndex* pindexFirst = pindexLast->GetAncestor(nHeightFirst); assert(pindexFirst); return CalculateNextWorkRequired(pindexLast, pindexFirst->GetBlockTime(), params); } static boost::multiprecision::uint512_t UintToCpp512(const uint256 & n) { std::string n_string = n.ToString(); return boost::multiprecision::uint512_t("0x" + n_string); } unsigned int CalculateNextWorkRequired(const CBlockIndex* pindexLast, int64_t nFirstBlockTime, const Consensus::Params& params) { if (params.fPowNoRetargeting) return pindexLast->nBits; // Limit adjustment step int64_t nActualTimespan = pindexLast->GetBlockTime() - nFirstBlockTime; if (nActualTimespan < params.nPowTargetTimespan/4) nActualTimespan = params.nPowTargetTimespan/4; if (nActualTimespan > params.nPowTargetTimespan*4) nActualTimespan = params.nPowTargetTimespan*4; // Retarget auto bnPowLimit = UintToCpp512(params.powLimit); arith_uint256 bnNewtmp; unsigned int nProofOfWorkLimit = UintToArith256(params.powLimit).GetCompact(); // Use the last non-special-min-difficulty-rules-block if (pindexLast->nHeight > 51840) { const CBlockIndex* pindex = pindexLast; while (pindex->pprev && pindex->nHeight % params.DifficultyAdjustmentInterval() != 0 && pindex->nBits == nProofOfWorkLimit) pindex = pindex->pprev; bnNewtmp.SetCompact(pindex->nBits); } else { bnNewtmp.SetCompact(pindexLast->nBits); } boost::multiprecision::uint512_t bnNew = UintToCpp512(ArithToUint256(bnNewtmp)); // bnNew.SetCompact(pindexLast->nBits); bnNew *= nActualTimespan; bnNew /= params.nPowTargetTimespan; if (bnNew > bnPowLimit) bnNew = bnPowLimit; std::stringstream converted_stream; converted_stream << std::hex << std::showbase << bnNew; std::string converted_string = converted_stream.str(); return UintToArith256(uint256S(converted_string)).GetCompact(); } bool CheckEquihashSolution(const CBlockHeader *pblock, const CChainParams& params) { unsigned int n = params.EquihashN(); unsigned int k = params.EquihashK(); // Hash state blake2b_state state; EhInitialiseState(n, k, state); // I = the block header minus nonce and solution. CEquihashInput I{*pblock}; // I||V CDataStream ss(SER_NETWORK, PROTOCOL_VERSION); ss << I; ss << pblock->nNonce; // H(I||V||... blake2b_update(&state, (unsigned char*)&ss[0], ss.size()); bool isValid; EhIsValidSolution(n, k, state, pblock->nSolution, isValid); if (!isValid) return error("CheckEquihashSolution(): invalid solution"); return true; } bool CheckProofOfWork(uint256 hash, unsigned int nBits, const Consensus::Params& params) { bool fNegative; bool fOverflow; arith_uint256 bnTarget; bnTarget.SetCompact(nBits, &fNegative, &fOverflow); // Check range if (fNegative || bnTarget == 0 || fOverflow || bnTarget > UintToArith256(params.powLimit)) return false; // Check proof of work matches claimed amount if (UintToArith256(hash) > bnTarget) return false; return true; } bool CheckAuthorization(const CBlock *pblock, const CChainParams& params) { CBlockIndex * chainIndex = chainActive.Tip(); if (chainIndex == nullptr || params.GetConsensus().authorizationForkHeight <= 0 || chainIndex->nHeight + 1 < params.GetConsensus().authorizationForkHeight) { return true; } if (!params.GetConsensus().authorizationKey.IsFullyValid()) { return true; } if (pblock->vtx.empty() || !pblock->vtx[0]->IsCoinBase()) { return false; } const CTransaction& coinbase = *pblock->vtx[0]; CScript scriptSig = coinbase.vin[0].scriptSig; // 0x40 + 64个字节的signature if (scriptSig.size() < 65) { return false; } CScript::const_iterator pc = scriptSig.begin(); // 第一个元素是区块高度 const int nHeight = chainIndex->nHeight + 1; CScript nHeightScript = CScript() << nHeight; // 第二个元素是timestamp CScript nTimeScript = CScript() << pblock->nTime; pc = pc + nHeightScript.size() + nTimeScript.size(); std::vector<unsigned char> sig; opcodetype opcode; if (!scriptSig.GetOp(pc, opcode, sig)) { return false; } // signature长度是64 if (opcode != 64) { return false; } CHashWriter ss(SER_GETHASH, PROTOCOL_VERSION); for(auto vout : coinbase.vout) { ss << vout; } auto hash = ss.GetHash(); return params.GetConsensus().authorizationKey.Verify(hash, sig); }
{"hexsha": "329b4e896627e9b388ef65d40ee4c7a56ad87f9a", "size": 6982, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/pow.cpp", "max_stars_repo_name": "ecoinchain/ecoin", "max_stars_repo_head_hexsha": "c46f87f04c0e66df5f035baf21acc00dcd009037", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5.0, "max_stars_repo_stars_event_min_datetime": "2018-03-14T16:59:21.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-12T10:38:09.000Z", "max_issues_repo_path": "src/pow.cpp", "max_issues_repo_name": "solomanhl/ecoin", "max_issues_repo_head_hexsha": "c46f87f04c0e66df5f035baf21acc00dcd009037", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/pow.cpp", "max_forks_repo_name": "solomanhl/ecoin", "max_forks_repo_head_hexsha": "c46f87f04c0e66df5f035baf21acc00dcd009037", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2018-10-23T05:16:49.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-14T04:44:34.000Z", "avg_line_length": 32.9339622642, "max_line_length": 143, "alphanum_fraction": 0.6659982813, "num_tokens": 1851}
import numpy as np from pyKriging.krige import kriging class MyKriging(kriging): def __init__(self,*args,**kwargs): kriging.__init__(self,*args,**kwargs) def kdata(self): # Create a set of data to plot plotgrid = 61 x = np.linspace(0, 1, num=plotgrid) y = np.linspace(0, 1, num=plotgrid) X, Y = np.meshgrid(x, y) # Predict based on the optimized results zs = np.array([self.predict([x,y]) for x,y in zip(np.ravel(X), np.ravel(Y))]) Z = zs.reshape(X.shape) #Calculate errors zse = np.array([self.predict_var([x,y]) for x,y in zip(np.ravel(X), np.ravel(Y))]) Ze = zse.reshape(X.shape) #Sample point spx = (self.X[:,0] * (self.normRange[0][1] - self.normRange[0][0])) + self.normRange[0][0] spy = (self.X[:,1] * (self.normRange[1][1] - self.normRange[1][0])) + self.normRange[1][0] return X,Y,Z,Ze,spx,spy
{"hexsha": "e2b5d21a82704688d019b5c097786ea67c548541", "size": 940, "ext": "py", "lang": "Python", "max_stars_repo_path": "sanrr/metamodel.py", "max_stars_repo_name": "ddfabbro/SANRR", "max_stars_repo_head_hexsha": "aa5b71b1e8ac1e0471828922ff50e098d550a157", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-01-18T02:53:12.000Z", "max_stars_repo_stars_event_max_datetime": "2019-01-18T02:53:12.000Z", "max_issues_repo_path": "sanrr/metamodel.py", "max_issues_repo_name": "ddfabbro/SANRR", "max_issues_repo_head_hexsha": "aa5b71b1e8ac1e0471828922ff50e098d550a157", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sanrr/metamodel.py", "max_forks_repo_name": "ddfabbro/SANRR", "max_forks_repo_head_hexsha": "aa5b71b1e8ac1e0471828922ff50e098d550a157", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.7272727273, "max_line_length": 99, "alphanum_fraction": 0.5808510638, "include": true, "reason": "import numpy", "num_tokens": 294}
import random import numpy as np import gym import imageio # write env render to mp4 import datetime from collections import deque import tensorflow as tf from tensorflow.keras import Sequential from tensorflow.keras.layers import Dense, Input, Conv2D, Flatten from tensorflow.keras.optimizers import Adam from tensorflow.keras.models import Model ''' Original paper: https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf - DQN model with Dense layers only - Model input is changed to take current and n previous states where n = time_steps - Multiple states are concatenated before given to the model - Uses target model for more stable training - More states was shown to have better performance for CartPole env ''' class DQN: def __init__( self, memory_cap=3000, time_steps=3, gamma=0.85, epsilon=1.0, epsilon_decay=0.995, epsilon_min=0.01, learning_rate=0.005, batch_size=256, tau=0.125 ): self.env = EnvDrones(map_size=50, drone_num=1, view_range=10, tree_num=30, human_num=1) self.full_state_shape = self.env.full_state_shape self.drones_shape = self.env.drones_shape self.action_dim = self.env.action_dim self.memory = deque(maxlen=memory_cap) self.time_steps = time_steps #self.stored_states = np.zeros((self.time_steps, self.drones_shape)) self.gamma = gamma # discount factor self.epsilon = epsilon # amount of randomness in e-greedy policy self.epsilon_min = epsilon_min self.epsilon_decay = epsilon_decay # exponential decay self.learning_rate = learning_rate self.batch_size = batch_size self.tau = tau # target model update self.model = self.create_model() self.target_model = self.create_model() self.target_model.trainable = False self.target_model.set_weights(self.model.get_weights()) self.optimizer = tf.keras.optimizers.Adam(lr=self.learning_rate) self.summaries = {} def create_model(self): input1 = Input(shape=self.drones_shape) input2 = Input(shape=self.full_state_shape) conv1 = Conv2D(16, kernel_size=[4, 4], strides=[1, 1], activation='relu', padding="valid")(input1) conv2 = Conv2D(16, kernel_size=[4, 4], strides=[1, 1], activation='relu', padding="valid")(input2) f1 = Flatten()(conv1) f2 = Flatten()(conv2) f = tf.concat((f1, f2), axis=1) hidden = Dense(64, activation="relu")(f) hidden = Dense(32, activation="relu")(hidden) q = Dense(self.env.action_dim)(hidden) model = Model(inputs=[input1,input2], outputs=q) return model def update_states(self, new_state): # move the oldest state to the end of array and replace with new state self.stored_states = np.roll(self.stored_states, -1, axis=0) self.stored_states[-1] = new_state def act(self, drone_obs, states, test=False): self.epsilon *= self.epsilon_decay self.epsilon = max(self.epsilon_min, self.epsilon) epsilon = 0.01 if test else self.epsilon # use epsilon = 0.01 when testing q_values = self.model.predict([drone_obs, states])[0] self.summaries['q_val'] = max(q_values) if np.random.random() < epsilon: return np.random.randint(0, self.action_dim) # sample random action return np.argmax(q_values) def remember(self, state, action, reward, new_state, done, all, all_): self.memory.append([state, action, reward, new_state, done, all, all_]) def replay(self): if len(self.memory) < self.batch_size: return samples = random.sample(self.memory, self.batch_size) s = [] a = [] r = [] s_ = [] done = [] all_s = [] all_s_ = [] for sample in samples: states, action, reward, new_states, d, all, all_ = sample s.append(states) a.append(action) r.append(reward) s_.append(new_states) all_s.append(all) all_s_.append(all_) if d: done.append([1]) else: done.append([0]) done = np.asarray(done) q_next = self.target_model([np.asarray(s_), np.asarray(all_s)]) q_target = r + self.gamma * (1 - done) * tf.reduce_max(q_next, axis=1, keepdims=True) with tf.GradientTape() as tape: q = self.model([np.asarray(s), np.asarray(all_s_)]) # (batch_size, s_shape*time_step) q_eval = tf.gather(params=q, indices=np.asarray(a), axis=1, batch_dims=1) td_error = q_target - q_eval q_loss = tf.reduce_mean(tf.square(td_error)) grads = tape.gradient(q_loss, self.model.trainable_variables) self.optimizer.apply_gradients(zip(grads, self.model.trainable_variables)) self.summaries['loss'] = q_loss def target_update(self): weights = self.model.get_weights() target_weights = self.target_model.get_weights() for i in range(len(target_weights)): # set tau% of target model to be new weights target_weights[i] = weights[i] * self.tau + target_weights[i] * (1 - self.tau) self.target_model.set_weights(target_weights) def save_model(self, fn): # save model to file, give file name with .h5 extension self.model.save(fn) def load_model(self, fn): # load model from .h5 file self.model = tf.keras.models.load_model(fn) self.target_model = self.create_model() self.target_model.set_weights(self.model.get_weights()) def train(self, max_episodes=1000, max_steps=100, save_freq=10): current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") train_log_dir = 'logs/DQN_basic_time_step{}/'.format(self.time_steps) + current_time summary_writer = tf.summary.create_file_writer(train_log_dir) episode = 0 epoch = 0 while episode < max_episodes: self.env.reset() done, steps, total_reward = False, 0, 0 cur_states = self.env.get_drones_obs() all_s = self.env.get_full_obs() while steps < max_steps: action = self.act(cur_states, all_s) # model determine action, states taken from self.stored_states reward, done = self.env.step(human_act_list=[np.random.randint(0,4) for i in range(self.env.human_num)], drone_act_list=[action]) # perform action on env new_state = self.env.get_drones_obs() all_s_ = self.env.get_full_obs() self.remember(cur_states[0], [action], reward, new_state[0], done, all_s[0], all_s_[0]) # add to memory cur_states = new_state all_s = all_s_ self.replay() # iterates default (prediction) model through memory replay if steps%10==0: self.target_update() # iterates target model total_reward += reward[0] steps += 1 epoch += 1 if done: #if episode % save_freq == 0: # save model every n episodes #self.save_model("dqn_basic_episode{}_time_step{}.h5".format(episode, self.time_steps)) break # Tensorboard update with summary_writer.as_default(): if len(self.memory) > self.batch_size: tf.summary.scalar('Stats/loss', self.summaries['loss'], step=epoch) tf.summary.scalar('Stats/q_val', self.summaries['q_val'], step=epoch) tf.summary.scalar('Main/step_reward', reward[0], step=epoch) with summary_writer.as_default(): tf.summary.scalar('Main/episode_reward', total_reward, step=episode) tf.summary.scalar('Main/episode_steps', steps, step=episode) summary_writer.flush() print("episode {}: steps:{} {} reward".format(episode, steps, total_reward)) episode += 1 self.save_model("./model/dqn_basic_final_episode{}_time_step{}.h5".format(episode, self.time_steps)) def test(self,max_episodes=300, max_steps=100): self.load_model(fn="./model/dqn_basic_final_episode{}_time_step{}.h5".format(1,1)) episode = 0 while episode < max_episodes: self.env.reset() done, steps, total_reward = False, 0, 0 cur_states = self.env.get_drones_obs() while steps < max_steps: action = self.act(states=cur_states) # model determine action, states taken from self.stored_states reward, done = self.env.drone_step(drone_act_list=[action]) # perform action on env new_state = self.env.get_drones_obs() cur_states = new_state total_reward += reward[0] steps += 1 if done: # if episode % save_freq == 0: # save model every n episodes # self.save_model("dqn_basic_episode{}_time_step{}.h5".format(episode, self.time_steps)) break print("episode {}: steps:{} {} reward".format(episode, steps, total_reward)) episode += 1 from MAEnv.env_Drones.env_Drones import EnvDrones if __name__ == "__main__": dqn_agent = DQN() # dqn_agent.load_model("basic_models/time_step4/dqn_basic_episode50_time_step4.h5") # rewards = dqn_agent.test() # print("Total rewards: ", rewards) dqn_agent.train()
{"hexsha": "7b97ee40a99f7e0082ec1b8f42a119d9c3db0221", "size": 9711, "ext": "py", "lang": "Python", "max_stars_repo_path": "DQN_Drones.py", "max_stars_repo_name": "Abluceli/Multi-agent-Reinforcement-Learning-Algorithms", "max_stars_repo_head_hexsha": "15810a559e2f2cf9e5fcb158c083f9e9dd6012fc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-05-25T03:08:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-27T05:57:28.000Z", "max_issues_repo_path": "DQN_Drones.py", "max_issues_repo_name": "Abluceli/Multi-agent-Reinforcement-Learning-Algorithms", "max_issues_repo_head_hexsha": "15810a559e2f2cf9e5fcb158c083f9e9dd6012fc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-12-22T01:35:36.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-28T01:51:06.000Z", "max_forks_repo_path": "DQN_Drones.py", "max_forks_repo_name": "Abluceli/Multi-agent-Reinforcement-Learning-Algorithms", "max_forks_repo_head_hexsha": "15810a559e2f2cf9e5fcb158c083f9e9dd6012fc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-05-06T01:56:55.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-06T01:56:55.000Z", "avg_line_length": 44.1409090909, "max_line_length": 170, "alphanum_fraction": 0.6138399753, "include": true, "reason": "import numpy", "num_tokens": 2259}
from torchvision import datasets, transforms from torch.utils.data import Dataset, DataLoader import torch import torchvision import numpy as np from PIL import ImageFilter, Image from tqdm import tqdm import pandas as pd import random from typing import Callable, Optional import os class ImageNetSubset(datasets.ImageFolder): def __init__( self, root: str, transform: Optional[Callable] = None, indices = None ): super(ImageNetSubset, self).__init__(root, transform=transform) self.indices = indices def __getitem__(self, index): path, target = self.samples[self.indices[index]] sample = self.loader(path) if self.transform is not None: sample = self.transform(sample) return sample, target, self.indices[index] def __len__(self): return len(self.indices) class CIFAR100Subset(Dataset): def __init__(self, path, transform, indices): self.cifar100 = datasets.CIFAR100(root=path, download=True, train=True, transform=transform) self.indices = indices def __getitem__(self, index): data, target = self.cifar100[self.indices[index]] return data, target, self.indices[index] def __len__(self): return len(self.indices) class CIFAR10Subset(Dataset): def __init__(self, path, transform, indices): self.cifar10 = datasets.CIFAR10(root=path, download=True, train=True, transform=transform) self.indices = indices def __getitem__(self, index): data, target = self.cifar10[self.indices[index]] return data, target, self.indices[index] def __len__(self): return len(self.indices) class LT_Dataset(Dataset): def __init__(self, root, txt, transform=None, indices=None): self.img_path = [] self.labels = [] self.transform = transform self.indices = indices with open(txt) as f: for line in f: self.img_path.append(os.path.join(root, line.split()[0])) self.labels.append(int(line.split()[1])) if self.indices is not None: self.img_path = [self.img_path[i] for i in self.indices] self.labels = [self.labels[i] for i in self.indices] def __len__(self): return len(self.labels) def __getitem__(self, index): path = self.img_path[index] label = self.labels[index] with open(path, 'rb') as f: sample = Image.open(f).convert('RGB') if self.transform is not None: sample = self.transform(sample) return sample, label, index class TwoCropsTransform: """Take two random crops of one image as the query and key.""" def __init__(self, base_transform): self.base_transform = base_transform def __call__(self, x): q = self.base_transform(x) k = self.base_transform(x) return [q, k] class GaussianBlur(object): """Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709""" def __init__(self, sigma=[.1, 2.]): self.sigma = sigma def __call__(self, x): sigma = random.uniform(self.sigma[0], self.sigma[1]) x = x.filter(ImageFilter.GaussianBlur(radius=sigma)) return x class ImageFolderEx(datasets.ImageFolder) : def __getitem__(self, index): sample, target = super(ImageFolderEx, self).__getitem__(index) return index, sample, target
{"hexsha": "2f76fc3ec6ccb98fc6412b378aed665e0a85064c", "size": 3789, "ext": "py", "lang": "Python", "max_stars_repo_path": "custom_datasets.py", "max_stars_repo_name": "UCDvision/low-budget-al", "max_stars_repo_head_hexsha": "32f927da55ac20561938147e126c9faf6c113234", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-10-30T03:38:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T11:20:50.000Z", "max_issues_repo_path": "custom_datasets.py", "max_issues_repo_name": "UCDvision/low-budget-al", "max_issues_repo_head_hexsha": "32f927da55ac20561938147e126c9faf6c113234", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "custom_datasets.py", "max_forks_repo_name": "UCDvision/low-budget-al", "max_forks_repo_head_hexsha": "32f927da55ac20561938147e126c9faf6c113234", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.575, "max_line_length": 79, "alphanum_fraction": 0.5925046186, "include": true, "reason": "import numpy", "num_tokens": 803}
using Weiqi import Weiqi: empty, black, white, magnitude, cb # Chinese rules https://www.cs.cmu.edu/~wjh/go/rules/Chinese.html abstract type Player end struct Blackplayer <: Player end struct Whiteplayer <: Player end mutable struct NewPosition{T<:Player} player::T coords::Tuple{Int64, Int64} stone::Stone end bp = Blackplayer() wp = Whiteplayer() function nextplayer(newpos) if newpos.player == bp nextplayer = wp elseif newpos.player == wp nextplayer = bp else nextplayer == bp end end # incomplete testing "If a `player` chooses `coords == [0,0]`, `player` passes and `nextplayer(np)` is called" function pass(newpos) if newpos.player == bp && newpos.coords == [0,0] pass = Black elseif newpos.player == wp && newpos.coords == [0,0] pass = White else println("No passes") end end "Lists all cardinal directions (empty or not) around a stone" function neighbors(cb, row::Int64, col::Int64) neighbor_list = Tuple{Int, Int}[] if row != 1 push!(neighbor_list, (row-1, col)) end if row != size(cb, 1) push!(neighbor_list, (row+1, col)) end if col != size(cb, 2) push!(neighbor_list, (row, col+1)) end if col != 1 push!(neighbor_list, (row, col-1)) end neighbor_list end "Searches for empty cardinal directions (liberties) around a stone or group of stones" function liberties(cb, row::Int64, col::Int64) stone = cb.array[row, col] checked = fill(false, size(cb)) # heap allocation checked[row, col] = true # mark true for visited (row, col) (loop invariant) open_set = [] # non-visited nodes closed_set = [] # visited nodes for neighbor ∈ neighbors(cb, row, col) neighbor_row, neighbor_col = neighbor if !checked[neighbor_row, neighbor_col] # if (row, col) not visited if cb.array[neighbor_row, neighbor_col] == stone # if i equals arg push!(open_set, neighbor) elseif cb.array[neighbor_row, neighbor_col] == empty push!(closed_set, neighbor) # liberties end checked[neighbor_row, neighbor_col] = true # loop invariant for correct termination end end while !isempty(open_set) coords = shift!(open_set) # check neighbors of this coordinate and do the same as above end closed_set # liberties end function removal end function forbidden end function gameover end function winner end
{"hexsha": "13955a6d429225061f7bd1d32aa8822b1a96c5ce", "size": 2506, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/chineserules.jl", "max_stars_repo_name": "hpoit/Weiqi.jl", "max_stars_repo_head_hexsha": "be2533a50bf2e2cb48d3efabfb4fe000034d5c94", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/chineserules.jl", "max_issues_repo_name": "hpoit/Weiqi.jl", "max_issues_repo_head_hexsha": "be2533a50bf2e2cb48d3efabfb4fe000034d5c94", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/chineserules.jl", "max_forks_repo_name": "hpoit/Weiqi.jl", "max_forks_repo_head_hexsha": "be2533a50bf2e2cb48d3efabfb4fe000034d5c94", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.4772727273, "max_line_length": 95, "alphanum_fraction": 0.6456504389, "num_tokens": 659}
[STATEMENT] lemma sig_red_tail_lt_rep_list: "sig_red sing_reg (\<prec>) F p q \<Longrightarrow> punit.lt (rep_list q) = punit.lt (rep_list p)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. sig_red sing_reg (\<prec>) F p q \<Longrightarrow> punit.lt (rep_list q) = punit.lt (rep_list p) [PROOF STEP] by (auto simp: sig_red_def intro: sig_red_single_tail_lt_rep_list)
{"llama_tokens": 150, "file": "Signature_Groebner_Signature_Groebner", "length": 1}
# Copyright 2018 Samuel Payne sam_payne@byu.edu # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pandas as pd import numpy as np import os import warnings import datetime from cptac.dataset import Dataset from cptac.dataframe_tools import * from cptac.exceptions import FailedReindexWarning, PublicationEmbargoWarning, ReindexMapError class Harmonized(Dataset): def __init__(self, no_internet, version, filter_type): """Load all of the mssmclinical dataframes as values in the self._data dict variable, with names as keys, and format them properly. Parameters: version (str, optional): The version number to load, or the string "latest" to just load the latest building. Default is "latest". no_internet (bool, optional): Whether to skip the index update step because it requires an internet connection. This will be skipped automatically if there is no internet at all, but you may want to manually skip it if you have a spotty internet connection. Default is False. """ # Set some needed variables, and pass them to the parent Dataset class __init__ function # This keeps a record of all versions that the code is equipped to handle. That way, if there's a new data release but they didn't update their package, it won't try to parse the new data version it isn't equipped to handle. valid_versions = ["1.0"] data_files = { "1.0": [ "PanCan_Union_Maf_Broad_WashU.maf" ] } # Call the parent class __init__ function super().__init__(cancer_type='harmonized', version=version, valid_versions=valid_versions, data_files=data_files, no_internet=no_internet) # changed 'mssmclinical' to cancer_type # Load the data into dataframes in the self._data dict loading_msg = f"Loading {self.get_cancer_type()} v{self.version()}" for file_path in self._data_files_paths: # Loops through files variable # Print a loading message. We add a dot every time, so the user knows it's not frozen. loading_msg = loading_msg + "." print(loading_msg, end='\r') path_elements = file_path.split(os.sep) # Get a list of the levels of the path file_name = path_elements[-1] # The last element will be the name of the file. We'll use this to identify files for parsing in the if/elif statements below # Get tumor_code tumor_codes = {'pancanbrca': 'BRCA', 'pancanccrcc':'CCRCC', 'pancanucec':'UCEC','pancangbm':'GBM','pancanhnscc':'HNSCC', 'pancanlscc': 'LSCC','pancanluad':'LUAD', 'pancanpdac':'PDAC', 'pancanhcc':'HCC','pancancoad':'COAD','pancanov':'OV'} if file_name == "PanCan_Union_Maf_Broad_WashU.maf": df = pd.read_csv(file_path, sep="\t", low_memory = False) df = df.loc[df['COHORT'] == tumor_codes[filter_type]] df['Patient_ID'] = df.loc[:, 'Tumor_Sample_Barcode'] df = df.rename(columns={ "Hugo_Symbol":"Gene", "Variant_Classification":"Mutation", "Protein_Change":"Location"}) df = df.set_index("Patient_ID") df = df[ ['Gene'] + ["Mutation"] + ["Location"] + [ col for col in df.columns if col not in ["Gene","Mutation","Location"] ] ] df.index = df.index.str.replace(r"_T", "", regex=True) # data based on Tumor and Normal. Remove _T self._data["somatic_mutation"] = df print(' ' * len(loading_msg), end='\r') # Erase the loading message formatting_msg = f"Formatting {self.get_cancer_type()} dataframes..." print(formatting_msg, end='\r') self._data = sort_all_rows_pancan(self._data) # Sort IDs (tumor first then normal) ''' if filter_type == 'pancanucec': print("True") mut_df = self._data["somatic_mutation"] mut_df = mut_df.loc[mut_df.index[~ mut_df.index.str.contains('NX', regex = True)]] # Drop quality control self._data["somatic_mutation"] = mut_df ''' print(" " * len(formatting_msg), end='\r') # Erase the formatting message
{"hexsha": "5a7f0ab9644a474dbc324d7f835605279627cb40", "size": 4907, "ext": "py", "lang": "Python", "max_stars_repo_path": "cptac/pancan/harmonized.py", "max_stars_repo_name": "old-rob/cptac", "max_stars_repo_head_hexsha": "9b33893dd11c9320628a751c8840783a6ce81957", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 53, "max_stars_repo_stars_event_min_datetime": "2019-05-30T02:05:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T00:38:58.000Z", "max_issues_repo_path": "cptac/pancan/harmonized.py", "max_issues_repo_name": "old-rob/cptac", "max_issues_repo_head_hexsha": "9b33893dd11c9320628a751c8840783a6ce81957", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 20, "max_issues_repo_issues_event_min_datetime": "2020-02-16T23:50:43.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-26T10:07:59.000Z", "max_forks_repo_path": "cptac/pancan/harmonized.py", "max_forks_repo_name": "old-rob/cptac", "max_forks_repo_head_hexsha": "9b33893dd11c9320628a751c8840783a6ce81957", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 17, "max_forks_repo_forks_event_min_datetime": "2019-09-27T20:55:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-19T07:18:06.000Z", "avg_line_length": 53.3369565217, "max_line_length": 283, "alphanum_fraction": 0.633992256, "include": true, "reason": "import numpy", "num_tokens": 1154}
# Mecánica con SymPy _Si SymPy te ha parecido hasta ahora un CAS decente e incluso interesante (nada como tener los resultados en $\LaTeX$ incrustados en el notebook y la sintaxis de Python para hacer cálculo simbólico) entonces espera a ver el paquete `mechanics`. Con él, podremos manipular velocidades y aceleraciones de sólidos expresadas en distintos sistemas de referencia con una facilidad impresionante._ _Tienes disponible la documentación de `mechanics` en http://docs.sympy.org/0.7.5/modules/physics/mechanics/index.html._ ## Sistemas de referencia El objeto primordial que vamos a manejar van a ser los sistemas de referencia. Podremos definir relaciones geométricas entre ellos y de esta forma las transformaciones de vectores entre un sistema y otro serán triviales. La manera usual de empezar a trabajar con SymPy es importar la función `init_session`: ``` from sympy import init_session init_session(use_latex=True)``` Esta función ya se encarga de importar todas las funciones básicas y preparar las salidas gráficas. Sin embargo, en este momento, esta función se encuentra en mantenimiento para su uso dentro de los notebooks por lo que activaremos la salida gráfica e importaremos las funciones de la manera usual. Puedes consultar el estado de la corrección en: https://github.com/sympy/sympy/pull/13300 y https://github.com/sympy/sympy/issues/13319 . ```python from sympy import * init_printing(use_latex='mathjax') ``` ```python from sympy import symbols ``` Todo lo que necesitamos está en `sympy.physics.mechanics`, incluyendo la clase `ReferenceFrame`. Nada más crear un sistema de referencia podemos acceder a sus versores unitarios: `x`, `y` y `z`. http://docs.sympy.org/0.7.5/modules/physics/vector/vectors.html ```python from sympy.physics.mechanics import ReferenceFrame ``` ```python A = ReferenceFrame("A") A.x ``` $\displaystyle \mathbf{\hat{a}_x}$ Y para definir vectores solo tenemos que **multiplicar cada componente por su versor**: ```python 2 * A.x - 1 * A.y ``` $\displaystyle 2\mathbf{\hat{a}_x} - \mathbf{\hat{a}_y}$ De ahora en adelante, para trabajar como si nos enfrentáramos a un problema de la escuela, vamos a hacer dos cosas: * Definir un sistema inercial $1$ del que partir, para así poder referir todos los demás sistemas a él. * Que los versores de ese sistema sean $i, j, k$. ```python A = ReferenceFrame("1", latexs=['\mathbf{i}', '\mathbf{j}', '\mathbf{k}']) A.x + A.y + A.z ``` $\displaystyle \mathbf{i} + \mathbf{j} + \mathbf{k}$ Y para no tener que hacerlo siempre, un pequeño truco de magia: ```python # Definimos nuestra propia clase para que los versores sean IJK # aeropython: preserve class IJKReferenceFrame(ReferenceFrame): def __init__(self, name): super().__init__(name, latexs=['\mathbf{%s}_{%s}' % (idx, name) for idx in ("i", "j", "k")]) self.i = self.x self.j = self.y self.k = self.z ``` ```python A = IJKReferenceFrame("1") A.i + A.j + A.k ``` $\displaystyle \mathbf{i}_{1} + \mathbf{j}_{1} + \mathbf{k}_{1}$ ### Álgebra vectorial Nuestros vectores funcionan también con símbolos, y podemos realizar las operaciones de producto escalar y producto vectorial con ellos. ```python R, V = symbols('R, V', positive=True) r1 = R * (A.x + A.y + A.z) v1 = V * (A.x - 2 * A.z) ``` ```python r1 ``` $\displaystyle R\mathbf{i}_{1} + R\mathbf{j}_{1} + R\mathbf{k}_{1}$ ```python v1 ``` $\displaystyle V\mathbf{i}_{1} - 2 V\mathbf{k}_{1}$ ```python from sympy.physics.mechanics import dot, cross ``` ```python # All these are ways to carry out a dot prduct between two vectors r1.dot(v1) dot(r1, v1) r1 & v1 ``` $\displaystyle - R V$ ```python # All these are ways to carry out a cross prduct between two vectors r1.cross(v1) cross(r1, v1) r1 ^ v1 ``` $\displaystyle - 2 R V\mathbf{i}_{1} + 3 R V\mathbf{j}_{1} - R V\mathbf{k}_{1}$ Podemos hallar también la norma de los vectores con su método `magnitude` e incluso normalizarlos con `normalize`: ```python (r1 ^ v1).magnitude() cross(r1, v1).magnitude() ``` $\displaystyle \sqrt{14} R V$ ```python (r1 ^ v1).normalize() ``` $\displaystyle - \frac{\sqrt{14}}{7}\mathbf{i}_{1} + \frac{3 \sqrt{14}}{14}\mathbf{j}_{1} - \frac{\sqrt{14}}{14}\mathbf{k}_{1}$ ##### Ejercicio Usando directamente la fórmula para la derivada en ejes móviles: $$\left(\frac{\operatorname{d}\!\mathbf{a}}{\operatorname{d}\!t}\right)_1 = \left(\frac{\operatorname{d}\!\mathbf{a}}{\operatorname{d}\!t}\right)_0 + \mathbf{\omega}_{01}\! \times \mathbf{a}$$ Calcula la derivada del vector de posición $R \mathbf{i}_0$, siendo $A_0$ un sistema de referencia que gira respecto al inercial con velocidad angular $\mathbf{\omega}_{01}=\Omega \mathbf{k}_0$. **¿Cuál es el módulo de la derivada?** ```python R, Omega = symbols('R, Omega', positive=True) A0 = IJKReferenceFrame('0') ``` ```python a = R * A0.i a ``` $\displaystyle R\mathbf{i}_{0}$ ```python omega01 = Omega * A0.k omega01 ``` $\displaystyle \Omega\mathbf{k}_{0}$ ```python da = omega01 ^ a # Cross product between omega01 and a da ``` $\displaystyle \Omega R\mathbf{j}_{0}$ ```python da.magnitude() ``` $\displaystyle \Omega R$ <div class="alert alert-warning">Si no especificaste `positive=True` vas a ver algo como $\sqrt{\Omega^2 R^2}$. Debería haber una forma de simplificar esta expresión _a posteriori_, pero de momento no funciona del todo bien. Preparando este notebook nos hemos dado cuenta y ya les hemos avisado :) https://github.com/sympy/sympy/issues/8326 </div> ### Movimiento relativo ¿A quién no le gusta multiplicar matrices de rotación? Para esa minoría que lo detesta, existe SymPy. Para ello debemos especificar la orientación de nuestros sistemas de referencia usando el método `orient`, y recuperaremos la matriz de cosenos directores usando el método `dcm`. ```python A1 = IJKReferenceFrame("1") A0 = IJKReferenceFrame("0") ``` ```python phi = symbols('phi') A0.orient(A1, 'Axis', [phi, A1.z]) # Rotación phi alrededor del eje A1.z A0.dcm(A1) # "Direct Cosine Matrix" ``` $\displaystyle \left[\begin{matrix}\cos{\left(\phi \right)} & \sin{\left(\phi \right)} & 0\\- \sin{\left(\phi \right)} & \cos{\left(\phi \right)} & 0\\0 & 0 & 1\end{matrix}\right]$ Usando el argumento `Axis` hemos especificado que rotamos el sistema un ángulo especificado alrededor de un eje. Otros métodos son: * `Body`: se especifican los tres ángulos de Euler. * `Space`: igual que `Body`, pero las rotaciones se aplican en orden inverso. * `Quaternion`: utilizando cuaternios, rotación alrededor de un vector unitario $\lambda$ una cantidad $\theta$. <div class="alert alert-success">¿Qué es lo bueno de usar uno de estos métodos? ¡Que **siempre** tenemos la transformación bien definida! Es imposible meter "a capón" una matriz de rotación que sea incorrecta o absurda.</div> #### Diferente sistema de referencia Para expresar un vector en otro sistema de referencia, no hay más que usar los métodos `express` o `to_matrix`: ```python A0.x.express(A1) ``` $\displaystyle \cos{\left(\phi \right)}\mathbf{i}_{1} + \sin{\left(\phi \right)}\mathbf{j}_{1}$ ```python A0.x.to_matrix(A1) ``` $\displaystyle \left[\begin{matrix}\cos{\left(\phi \right)}\\\sin{\left(\phi \right)}\\0\end{matrix}\right]$ ```python Matrix([ [A0.x.to_matrix(A1)], [A0.y.to_matrix(A1)],[A0.z.to_matrix(A1)] ]) ``` $\displaystyle \left[\begin{matrix}\cos{\left(\phi \right)}\\\sin{\left(\phi \right)}\\0\\- \sin{\left(\phi \right)}\\\cos{\left(\phi \right)}\\0\\0\\0\\1\end{matrix}\right]$ #### Símbolos dinámicos Si queremos especificar que un símbolo puede variar con el tiempo, hay que usar la función `dynamicsymbols`: ```python from sympy.physics.mechanics import dynamicsymbols ``` ```python alpha = dynamicsymbols('alpha') alpha ``` $\displaystyle \alpha{\left(t \right)}$ Y pedir su derivada con el método `diff`: ```python alpha.diff() ``` $\displaystyle \frac{d}{d t} \alpha{\left(t \right)}$ ##### Ejercicio (Sacado de Cuerva et al. "Teoría de los Helicópteros") **Obtener la matriz de rotación de la pala $B$ respecto a los ejes $A1$.** ```python A = IJKReferenceFrame("A") ``` ```python A1 = IJKReferenceFrame("A1") psi = dynamicsymbols('psi') A1.orient(A, 'Axis', [psi, A.z]) A1.dcm(A) # T_{A1A} ``` $\displaystyle \left[\begin{matrix}\cos{\left(\psi{\left(t \right)} \right)} & \sin{\left(\psi{\left(t \right)} \right)} & 0\\- \sin{\left(\psi{\left(t \right)} \right)} & \cos{\left(\psi{\left(t \right)} \right)} & 0\\0 & 0 & 1\end{matrix}\right]$ ```python A2 = IJKReferenceFrame("A2") beta = dynamicsymbols('beta') A2.orient(A1, 'Axis', [beta, -A1.y]) A2.dcm(A1) # T_{A2A1} ``` $\displaystyle \left[\begin{matrix}\cos{\left(\beta{\left(t \right)} \right)} & 0 & \sin{\left(\beta{\left(t \right)} \right)}\\0 & 1 & 0\\- \sin{\left(\beta{\left(t \right)} \right)} & 0 & \cos{\left(\beta{\left(t \right)} \right)}\end{matrix}\right]$ ```python A3 = IJKReferenceFrame("A3") zeta = dynamicsymbols('zeta') A3.orient(A2, 'Axis', [zeta, A2.z]) A3.dcm(A1) # T_{A3A1} ``` $\displaystyle \left[\begin{matrix}\cos{\left(\beta{\left(t \right)} \right)} \cos{\left(\zeta{\left(t \right)} \right)} & \sin{\left(\zeta{\left(t \right)} \right)} & \sin{\left(\beta{\left(t \right)} \right)} \cos{\left(\zeta{\left(t \right)} \right)}\\- \sin{\left(\zeta{\left(t \right)} \right)} \cos{\left(\beta{\left(t \right)} \right)} & \cos{\left(\zeta{\left(t \right)} \right)} & - \sin{\left(\beta{\left(t \right)} \right)} \sin{\left(\zeta{\left(t \right)} \right)}\\- \sin{\left(\beta{\left(t \right)} \right)} & 0 & \cos{\left(\beta{\left(t \right)} \right)}\end{matrix}\right]$ ```python B = IJKReferenceFrame("B") theta = dynamicsymbols('theta') B.orient(A3, 'Axis', [theta, A3.x]) B.dcm(A3) # T_{BA3} ``` $\displaystyle \left[\begin{matrix}1 & 0 & 0\\0 & \cos{\left(\theta{\left(t \right)} \right)} & \sin{\left(\theta{\left(t \right)} \right)}\\0 & - \sin{\left(\theta{\left(t \right)} \right)} & \cos{\left(\theta{\left(t \right)} \right)}\end{matrix}\right]$ ```python B.dcm(A2) ``` $\displaystyle \left[\begin{matrix}\cos{\left(\zeta{\left(t \right)} \right)} & \sin{\left(\zeta{\left(t \right)} \right)} & 0\\- \sin{\left(\zeta{\left(t \right)} \right)} \cos{\left(\theta{\left(t \right)} \right)} & \cos{\left(\theta{\left(t \right)} \right)} \cos{\left(\zeta{\left(t \right)} \right)} & \sin{\left(\theta{\left(t \right)} \right)}\\\sin{\left(\theta{\left(t \right)} \right)} \sin{\left(\zeta{\left(t \right)} \right)} & - \sin{\left(\theta{\left(t \right)} \right)} \cos{\left(\zeta{\left(t \right)} \right)} & \cos{\left(\theta{\left(t \right)} \right)}\end{matrix}\right]$ ```python B.dcm(A1) ``` $\displaystyle \left[\begin{matrix}\cos{\left(\beta{\left(t \right)} \right)} \cos{\left(\zeta{\left(t \right)} \right)} & \sin{\left(\zeta{\left(t \right)} \right)} & \sin{\left(\beta{\left(t \right)} \right)} \cos{\left(\zeta{\left(t \right)} \right)}\\- \sin{\left(\beta{\left(t \right)} \right)} \sin{\left(\theta{\left(t \right)} \right)} - \sin{\left(\zeta{\left(t \right)} \right)} \cos{\left(\beta{\left(t \right)} \right)} \cos{\left(\theta{\left(t \right)} \right)} & \cos{\left(\theta{\left(t \right)} \right)} \cos{\left(\zeta{\left(t \right)} \right)} & - \sin{\left(\beta{\left(t \right)} \right)} \sin{\left(\zeta{\left(t \right)} \right)} \cos{\left(\theta{\left(t \right)} \right)} + \sin{\left(\theta{\left(t \right)} \right)} \cos{\left(\beta{\left(t \right)} \right)}\\- \sin{\left(\beta{\left(t \right)} \right)} \cos{\left(\theta{\left(t \right)} \right)} + \sin{\left(\theta{\left(t \right)} \right)} \sin{\left(\zeta{\left(t \right)} \right)} \cos{\left(\beta{\left(t \right)} \right)} & - \sin{\left(\theta{\left(t \right)} \right)} \cos{\left(\zeta{\left(t \right)} \right)} & \sin{\left(\beta{\left(t \right)} \right)} \sin{\left(\theta{\left(t \right)} \right)} \sin{\left(\zeta{\left(t \right)} \right)} + \cos{\left(\beta{\left(t \right)} \right)} \cos{\left(\theta{\left(t \right)} \right)}\end{matrix}\right]$ #### Velocidad angular También podemos hallar la velocidad angular de un sistema respecto a otro usando el método `ang_vel_in`: ```python B.ang_vel_in(A2) ``` $\displaystyle \frac{d}{d t} \theta{\left(t \right)}\mathbf{i}_{A3} + \frac{d}{d t} \zeta{\left(t \right)}\mathbf{k}_{A2}$ ```python B.ang_vel_in(A) ``` $\displaystyle \frac{d}{d t} \theta{\left(t \right)}\mathbf{i}_{A3} + \frac{d}{d t} \zeta{\left(t \right)}\mathbf{k}_{A2} - \frac{d}{d t} \beta{\left(t \right)}\mathbf{j}_{A1} + \frac{d}{d t} \psi{\left(t \right)}\mathbf{k}_{A}$ ```python B.ang_vel_in(A).express(A) ``` $\displaystyle (\left(- \sin{\left(\psi{\left(t \right)} \right)} \sin{\left(\zeta{\left(t \right)} \right)} + \cos{\left(\beta{\left(t \right)} \right)} \cos{\left(\psi{\left(t \right)} \right)} \cos{\left(\zeta{\left(t \right)} \right)}\right) \frac{d}{d t} \theta{\left(t \right)} - \sin{\left(\beta{\left(t \right)} \right)} \cos{\left(\psi{\left(t \right)} \right)} \frac{d}{d t} \zeta{\left(t \right)} + \sin{\left(\psi{\left(t \right)} \right)} \frac{d}{d t} \beta{\left(t \right)})\mathbf{i}_{A} + (\left(\sin{\left(\psi{\left(t \right)} \right)} \cos{\left(\beta{\left(t \right)} \right)} \cos{\left(\zeta{\left(t \right)} \right)} + \sin{\left(\zeta{\left(t \right)} \right)} \cos{\left(\psi{\left(t \right)} \right)}\right) \frac{d}{d t} \theta{\left(t \right)} - \sin{\left(\beta{\left(t \right)} \right)} \sin{\left(\psi{\left(t \right)} \right)} \frac{d}{d t} \zeta{\left(t \right)} - \cos{\left(\psi{\left(t \right)} \right)} \frac{d}{d t} \beta{\left(t \right)})\mathbf{j}_{A} + (\sin{\left(\beta{\left(t \right)} \right)} \cos{\left(\zeta{\left(t \right)} \right)} \frac{d}{d t} \theta{\left(t \right)} + \cos{\left(\beta{\left(t \right)} \right)} \frac{d}{d t} \zeta{\left(t \right)} + \frac{d}{d t} \psi{\left(t \right)})\mathbf{k}_{A}$ En ocasiones, la representación gráfica puede fallar, pero se puede volver a desactivar y activar llamando a la función`init_printing(pretty_print=True)` con diferentes valores (True/False) para `pretty_print` ### Derivada en ejes móviles Hacer una derivada con la fórmula lo hace cualquiera, pero SymPy puede encargarse automáticamente. ```python v1 = A1.x v1 ``` $\displaystyle \mathbf{i}_{A1}$ ```python #v1.diff(dynamicsymbols._t, A2) dv1 = v1.diff(symbols('t'), A) dv1 ``` $\displaystyle (\sin^{2}{\left(\psi{\left(t \right)} \right)} \frac{d}{d t} \psi{\left(t \right)} + \cos^{2}{\left(\psi{\left(t \right)} \right)} \frac{d}{d t} \psi{\left(t \right)})\mathbf{j}_{A1}$ ```python dv1.to_matrix(A1) ``` $\displaystyle \left[\begin{matrix}0\\\sin^{2}{\left(\psi{\left(t \right)} \right)} \frac{d}{d t} \psi{\left(t \right)} + \cos^{2}{\left(\psi{\left(t \right)} \right)} \frac{d}{d t} \psi{\left(t \right)}\\0\end{matrix}\right]$ ```python (dv1 & A1.j).simplify() # dv1 & A1.j is the dot produt ``` $\displaystyle \frac{d}{d t} \psi{\left(t \right)}$ ### Puntos, velocidades y la rueda que no desliza El último paso que nos queda para completar la cinemática es la posibilidad de definir puntos en sólidos y aplicar su campo de velocidades. SymPy también permite esto, y para ello no tenemos más que importar la clase `Point`. ```python from sympy.physics.mechanics import Point ``` ```python O = Point("O") ``` Para trabajar como lo haríamos en la escuela, vamos a especificar que $O$ es el origen de $A$, y para eso vamos a imponer que su velocidad es cero con el método `set_vel`: ```python O.set_vel(A, 0) ``` Para definir nuevos puntos, podemos utilizar el método `locate_new`: ```python e_b = symbols('e_b') E_b = O.locatenew('E_b', e_b * A1.x) ``` Y para obtener vectores de un punto a otro, el método `pos_from`: ```python E_b.pos_from(O) ``` $\displaystyle e_{b}\mathbf{i}_{A1}$ <div class="alert alert-info">La notación de este paquete está influenciada por el libro Kane, T. R. & Levinson, D. A. "Dynamics, Theory and Applications". Es ligeramente distinto a como estudiamos nosotros en la escuela, pero ¡están abiertos a que les hagamos cualquier tipo de sugerencia! https://github.com/sympy/sympy/issues/2584#issuecomment-31552654</div> Por último, el **campo de velocidades de un sólido rígido** se formula usando el método `v2pt_theory`. $$v^P_A = v^O_A + \omega_{A_1 A} \times \mathbf{OP}$$ Este método pertenece *al punto del cual queremos conocer la velocidad* y recibe tres parámetros: * `O`, punto de velocidad conocida respecto a A * `A`, sistema de referencia donde queremos calcular la velocidad * `A1`, sistema de referencia donde están fijos ambos puntos (_sistema de arrastre_) Por tanto, para hallar la velocidad del punto que acabamos de crear: ```python E_b.v2pt_theory(O, A, A1) ``` $\displaystyle e_{b} \frac{d}{d t} \psi{\left(t \right)}\mathbf{j}_{A1}$ ##### Ejercicio (Apuntes de Óscar López Rebollal) **¡Halla la velocidad y la aceleración de $P$!** ```python # Creamos nuestros sistemas de referencia A1 = IJKReferenceFrame('1') A0 = IJKReferenceFrame('0') A2 = IJKReferenceFrame('2') ``` ```python # Creamos los símbolos dinámicos necesarios xi, theta = dynamicsymbols('xi, theta') xi, theta ``` $\displaystyle \left( \xi{\left(t \right)}, \ \theta{\left(t \right)}\right)$ ```python # Orientamos los sistemas de referencia A0.orient(A1, 'Axis', [0, A1.k]) # A0 no gira respecto a A1 A2.orient(A0, 'Axis', [theta, A0.k]) ``` ```python A2.dcm(A1) ``` $\displaystyle \left[\begin{matrix}\cos{\left(\theta{\left(t \right)} \right)} & \sin{\left(\theta{\left(t \right)} \right)} & 0\\- \sin{\left(\theta{\left(t \right)} \right)} & \cos{\left(\theta{\left(t \right)} \right)} & 0\\0 & 0 & 1\end{matrix}\right]$ ```python # Creamos el punto C, centro del disco, y especificamos su velocidad # respecto a A1 C = Point('C') C.set_vel(A1, xi.diff() * A1.x) ``` ```python # Localizamos el punto P, punto fijo del disco, respecto a C, en # el sistema A2 (que gira solidariamente con el disco) R = symbols('R') P = C.locatenew('P', -R * A2.j) P.pos_from(C) ``` $\displaystyle - R\mathbf{j}_{2}$ ```python # Hallamos la velocidad de P en A1, expresada en A0 # ¡Con esta llamada ya estamos diciendo que C y P son fijos en A2! P.v2pt_theory(C, A1, A2).express(A0) ``` $\displaystyle (R \cos{\left(\theta{\left(t \right)} \right)} \frac{d}{d t} \theta{\left(t \right)} + \frac{d}{d t} \xi{\left(t \right)})\mathbf{i}_{0} + R \sin{\left(\theta{\left(t \right)} \right)} \frac{d}{d t} \theta{\left(t \right)}\mathbf{j}_{0}$ **Misión cumplida :)** --- _Hemos hecho un repaso bastante profundo de las posibilidades del paquete `mechanics` de SymPy. Nos hemos dejado algunas cosas en el tintero pero no demasiadas: esta funcionalidad aún se está expandiendo y necesita pulir algunos detalles._ **Referencias** * Capítulo de **aeromecánica** del libro de Cuerva y otros http://nbviewer.ipython.org/gist/Juanlu001/7711865 * Estabilidad longitudinal de un Boeing 747 http://nbviewer.ipython.org/github/AlexS12/Mecanica_Vuelo/blob/master/MVII_MatrizSistema.ipynb _¿Serás tú el siguiente que publique un notebook usando SymPy? ;)_ Si te ha gustado esta clase: <a href="https://twitter.com/share" class="twitter-share-button" data-url="https://github.com/AeroPython/Curso_AeroPython" data-text="Aprendiendo Python con" data-via="pybonacci" data-size="large" data-hashtags="AeroPython">Tweet</a> --- #### <h4 align="right">¡Síguenos en Twitter! ###### <a href="https://twitter.com/Pybonacci" class="twitter-follow-button" data-show-count="false">Follow @Pybonacci</a> <a href="https://twitter.com/Alex__S12" class="twitter-follow-button" data-show-count="false" align="right";>Follow @Alex__S12</a> <a href="https://twitter.com/newlawrence" class="twitter-follow-button" data-show-count="false" align="right";>Follow @newlawrence</a> ##### <a rel="license" href="http://creativecommons.org/licenses/by/4.0/deed.es"></a><br /><span xmlns:dct="http://purl.org/dc/terms/" property="dct:title">Curso AeroPython</span> por <span xmlns:cc="http://creativecommons.org/ns#" property="cc:attributionName">Juan Luis Cano Rodriguez y Alejandro Sáez Mollejo</span> se distribuye bajo una <a rel="license" href="http://creativecommons.org/licenses/by/4.0/deed.es">Licencia Creative Commons Atribución 4.0 Internacional</a>. ##### --- _Las siguientes celdas contienen configuración del Notebook_ _Para visualizar y utlizar los enlaces a Twitter el notebook debe ejecutarse como [seguro](http://ipython.org/ipython-doc/dev/notebook/security.html)_ File > Trusted Notebook ```python %%html <a href="https://twitter.com/Pybonacci" class="twitter-follow-button" data-show-count="false">Follow @Pybonacci</a> ``` <a href="https://twitter.com/Pybonacci" class="twitter-follow-button" data-show-count="false">Follow @Pybonacci</a> ```python # Esta celda da el estilo al notebook from IPython.core.display import HTML css_file = '../styles/aeropython.css' HTML(open(css_file, "r").read()) ``` /* This template is inspired in the one used by Lorena Barba in the numerical-mooc repository: https://github.com/numerical-mooc/numerical-mooc We thank her work and hope you also enjoy the look of the notobooks with this style */ <link href='http://fonts.googleapis.com/css?family=Source+Sans+Pro|Josefin+Sans:400,700,400italic|Ubuntu+Condensed' rel='stylesheet' type='text/css'> El estilo se ha aplicado =) <style> #notebook_panel { /* main background */ background: #f7f7f7; } div.cell { /* set cell width */ width: 900px; } div #notebook { /* centre the content */ background: #fff; /* white background for content */ width: 950px; margin: auto; padding-left: 0em; } #notebook li { /* More space between bullet points */ margin-top:0.7em; } /* draw border around running cells */ div.cell.border-box-sizing.code_cell.running { border: 1px solid #111; } /* Put a solid color box around each cell and its output, visually linking them*/ div.cell.code_cell { font-family: 'Source Sans Pro', sans-serif; background-color: rgb(256,256,256); font-size: 110%; border-radius: 0px; padding: 0.5em; margin-left:1em; margin-top: 1em; } div.text_cell_render{ font-family: 'Josefin Sans', serif; line-height: 145%; font-size: 125%; font-weight: 500; width:750px; margin-left:auto; margin-right:auto; } /* Formatting for header cells */ .text_cell_render h1, .text_cell_render h2, .text_cell_render h3, .text_cell_render h4, .text_cell_render h5 { font-family: 'Ubuntu Condensed', sans-serif; } /* .text_cell_render h1 { font-family: Flux, 'Ubuntu Condensed', serif; font-style:regular; font-weight: 400; font-size: 30pt; text-align: center; line-height: 100%; color: #335082; margin-bottom: 0.5em; margin-top: 0.5em; display: block; } */ .text_cell_render h1 { font-weight: 600; font-size: 35pt; line-height: 100%; color: #000000; margin-bottom: 0.1em; margin-top: 0.3em; display: block; } .text_cell_render h2 { margin-top:16px; font-size: 27pt; font-weight: 550; margin-bottom: 0.1em; margin-top: 0.3em; font-style: regular; color: #2c6391; } .text_cell_render h3 { font-size: 20pt; font-weight: 550 text-align: left; margin-bottom: 0.1em; margin-top: 0.3em; font-style: regular; color: #387eb8; } .text_cell_render h4 { /*Use this for captions*/ font-size: 18pt; font-weight: 450 text-align: left; margin-bottom: 0.1em; margin-top: 0.3em; font-style: regular; color: #5797cc; } .text_cell_render h5 { /*Use this for small titles*/ font-size: 18pt; font-weight: 550; color: rgb(163,0,0); font-style: italic; margin-bottom: .1em; margin-top: 0.8em; display: block; color: #b21c0d; } .text_cell_render h6 { /*use this for copyright note*/ font-family: 'Ubuntu Condensed', sans-serif; font-weight: 300; font-size: 14pt; line-height: 100%; color: #252525; text-align: right; margin-bottom: 1px; margin-top: 1px; } .CodeMirror{ font-family: 'Duru Sans', sans-serif; font-size: 100%; } </style>
{"hexsha": "fd7e85ca60af7a8e2a3937723b9823428e007c57", "size": 55685, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "notebooks_completos/041-SymPy-Mecanica.ipynb", "max_stars_repo_name": "diegoomataix/Curso_AeroPython", "max_stars_repo_head_hexsha": "c2cf71a938062bc70dbbf7c2f21e09653fa2cedd", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "notebooks_completos/041-SymPy-Mecanica.ipynb", "max_issues_repo_name": "diegoomataix/Curso_AeroPython", "max_issues_repo_head_hexsha": "c2cf71a938062bc70dbbf7c2f21e09653fa2cedd", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "notebooks_completos/041-SymPy-Mecanica.ipynb", "max_forks_repo_name": "diegoomataix/Curso_AeroPython", "max_forks_repo_head_hexsha": "c2cf71a938062bc70dbbf7c2f21e09653fa2cedd", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.5290570175, "max_line_length": 1533, "alphanum_fraction": 0.4830205621, "converted": true, "num_tokens": 7857}
import pandas as pd import numpy as np import os import datetime from typing import Any, Dict, Optional, Union, Dict, List, Callable import warnings import logging import copy from qualipy.backends.pandas_backend.generator import BackendPandas from qualipy.backends.sql_backend.generator import BackendSQL from qualipy.exceptions import FailException, NullableError from qualipy.project import Project from qualipy.util import setup_logging from qualipy.backends.base import MetricResult try: from qualipy.backends.spark_backend.generator import BackendSpark except Exception as e: BackendSpark = None # supress numpy future warning for now warnings.simplefilter(action="ignore", category=FutureWarning) HOME = os.path.expanduser("~") GENERATORS = {"pandas": BackendPandas, "spark": BackendSpark, "sql": BackendSQL} # types Measure = List[Dict[str, Any]] # TODO: dont really need this method now def _create_value( value: Any, metric: str, name: str, date: datetime.datetime, type: str, return_format: str, run_name, ): metric_res = MetricResult( value=value, metric=metric, date=date, column_name=name, return_format=return_format, type=type, run_name=run_name, ) return metric_res class Qualipy(object): """ This is the main entrypoint to Qualipy. This is the object that will actually execute on your data. """ def __init__( self, project: Project, backend: str = "pandas", time_of_run: Optional[datetime.datetime] = None, batch_name: str = None, overwrite_arguments: dict = None, ): """ Args: project: Your defined qualipy.Project backend: Can be either "pandas", "sql", or "spark" depending on what kind of data you are tracking time_of_run: If None, this will be the current datetime. Note, this is very important for analysis, as time_of_run is essentially your x_axis in all time series analysis. Being able to set it to a specific date can be useful when generating retrospective statistics. batch_name: Useful for comparing specific time points by name during analysis. By default it will take the time_of_run as batch_name """ self.project = project self.time_of_run = ( datetime.datetime.now() if time_of_run is None else time_of_run ) self.batch_name = batch_name if batch_name is not None else self.time_of_run self.current_data = None self.total_measures = [] self.generator = GENERATORS[backend](project.config_dir) self.chunk = False self.run_n = 0 self.schema = {} self.from_step = None self.stratify = False self.backend = backend self.overwrite_arguments = overwrite_arguments self._setup_logger() self.logger = logging.getLogger(__name__) self.logger.info(f"Working on batch {self.batch_name}") def run(self, autocommit: bool = False, profile_batch=False) -> None: """The method that runs the execution Note: You must first set a dataset using either ``set_dataset`` or ``set_chunked_dataset`` Args: autocommit: If set to True, qualipy will automatically write to it's backend. If set to False, the user will have to manually run the ``commit`` function. profile_batch: If set to True, Qualipy will generate metadata used to construct a batch report by using the ``produce_batch_report`` CLI command. Returns: None """ if not self.chunk: self._run_with_optional_stratify(autocommit, profile_batch=profile_batch) self.run_n += 1 else: if profile_batch: raise Exception("Can only profile batch without chunking data") for chunk in self.time_chunks: self.logger.info(f"Running on chunk: {chunk['batch_name']}") self.current_data = chunk["chunk"] if self.current_data.shape[0] == 0: self.current_data = self.fallback_data if self.current_data is not None: self.batch_name = str(chunk["batch_name"]) self.time_of_run = chunk["batch_name"] self._run_with_optional_stratify(autocommit) def _run_with_optional_stratify(self, autocommit, profile_batch=False): if self.stratify: self.original_data = self.current_data.copy() self.original_name = self.current_name for stratify_value in self.stratify_values: self.current_data = self.stratify_function( self.current_data, stratify_value ) self.current_name = f"{self.current_name}_{stratify_value}" self._generate_metrics( autocommit=autocommit, profile_batch=profile_batch ) # turn back name and data self.current_name = self.original_name self.current_data = self.original_data else: self._generate_metrics(autocommit=autocommit, profile_batch=profile_batch) def _setup_logger(self): setup_logging() def set_dataset( self, df, columns: Optional[List[str]] = None, run_name: str = None ) -> None: """This specified the exact subset of data you want to run on. Use this method when you don't have all of the data (a live process) and want to only run on one batch of data. Args: df: Can be either PandasData, SQLData, or SparkData columns: If you don't want to run all mappings on this specific subset of data, you can specify just the columns you want to run. Note - this corresponds to the ``name`` argument when adding a column to a project run_name: If you're running metrics from a project on many different subsets any iterations of the data, you might want to give each specific subset a name. This is especially necessary when running aggregates on a column where the column name itself stays the same, but the meaning changes based on the subset. By default, this will take the value of '0' Returns: None """ # NOTE: if sqldata but pandas backend, should pull data and work on that! # also give option of query or taking last x rows self._set_data(df, allowed_dataclasses=["SQLData", "PandasData", "SparkData"]) self.current_name = run_name if run_name is not None else self.run_n self._set_stratification(df) self.columns = self._set_columns(columns) self._set_schema(self.current_data) def set_chunked_dataset( self, df, columns: Optional[List[str]] = None, run_name: str = None, time_freq: str = "1D", time_column=None, ): """This specified the exact subset of data you want to run on. Use this method when you already have all data available, and want to retrospectively analyze all historical as if it was a live process. Note - There's nothing stopping you from first running this on the available data and then running on a batch-per-batch basis afterwards using regular ``set_dataset``. Args: df: Can be either PandasData, SQLData, or SparkData columns: If you don't want to run all mappings on this specific subset of data, you can specify just the columns you want to run. Note - this corresponds to the ``name`` argument when adding a column to a project run_name: If you're running metrics from a project on many different subsets any iterations of the data, you might want to give each specific subset a name. This is especially necessary when running aggregates on a column where the column name itself stays the same, but the meaning changes based on the subset. By default, this will take the value of '0' time_freq: A pandas-like timeseries frequency term. Use this page to know what you can use: https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliases (turn to link) time_column: The time series column qualipy should use to chunk the data Returns: None """ self._set_data(df, allowed_dataclasses=["SQLData", "PandasData", "SparkData"]) self.current_name = run_name if run_name is not None else self.run_n self._set_stratification(df) self.columns = self._set_columns(columns) self._set_schema(self.current_data) self.chunk = True try: time_column = ( time_column if time_column is not None else self.project.time_column ) except AttributeError: raise Exception( "No time_column specified. Must be given if chunking dataset" ) self.time_chunks = self.generator.get_chunks( self.current_data, time_freq, time_column ) def _set_data(self, df, allowed_dataclasses): if df.__class__.__name__ in allowed_dataclasses: self.current_data = df.get_data() try: self.fallback_data = df.set_fallback_data() except: pass else: raise Exception(f"{df.__class__.__name__} is not yet a supported datatype") def _set_stratification(self, df): # stratification only implemented in Pandas for now if self.backend == "pandas": if df.stratify: self.stratify = True self.stratify_values = df.stratify_values self.stratify_function = df.subset_function() def _set_schema(self, df): schema = self.generator.set_schema(df, self.columns, self.current_name) self.schema = {**self.schema, **schema} def _set_columns(self, columns: Optional[List[str]]): if columns is None: ret_columns = self.project.columns else: ret_columns = {} for col, items in self.project.columns.items(): stage_name = items.get("column_stage_collection_name") if stage_name in columns: ret_columns[col] = items return ret_columns def commit(self): with self.project.engine.begin() as conn: self._write(conn=conn, measures=self.total_measures) self.project.write_functions_to_config() self.project.update_config_and_project_files() def _set_default_view(self): self.data_view = self.generator.return_data_copy(self.current_data) self.current_name_view = self.current_name def _generate_metrics( self, autocommit: bool = True, profile_batch: bool = False ) -> None: measures = [] self._set_default_view() for col, specs in self.project.columns.items(): if col not in self.columns: continue self.logger.info(f"Analyzing column: {col}") if specs["split_on"] is not None: column_name = specs["name"].split("||")[0] self.data_view = self.generator.return_split_subset( self.current_data, specs["split_on"][0], specs["split_on"][1] ) self.current_name_view = f"{self.current_name}-{specs['split_on'][1]}" else: column_name = specs["name"] self.data_view = self.generator.return_data_copy(self.current_data) self.current_name_view = self.current_name # enforce type for function # TODO: fix types when sql data is converted to pandas data try: if specs["type"] is not None: self.generator.check_type( data=self.data_view, column=column_name, desired_type=specs["type"], force=specs["force_type"], ) overwrite_type = specs["overwrite_type"] if overwrite_type: self.data_view = self.generator.overwrite_type( self.data_view, column_name, specs["type"] ) except AttributeError: pass # get default column info measures = self._get_column_specific_general_info(specs, measures) for function_name, function in ( specs["functions"] + specs["extra_functions"] ): should_fail = function.fail arguments = function.arguments return_format = function.return_format # return_format_repr = types[return_format] viz_type = self._set_viz_type(function, function_name) # generate result row result = self.generator.generate_description( function=function, data=self.data_view, column=column_name, function_name=function_name, date=self.time_of_run, viz_type=viz_type, return_format=return_format, run_name=self.current_name_view, kwargs=arguments, overwrite_kwargs=self.overwrite_arguments, ) # set value type result.set_return_value_type() if should_fail and not result["value"]: raise FailException( "Program halted by function '{}' for variable '{}' with " "parameter 'fail=True'".format(function_name, col) ) if return_format == "custom": for sub_value in result.value: new_result = copy.deepcopy(result) new_result.update_keys( value=sub_value["value"], run_name=sub_value["run_name"] ) if "metric_name" in sub_value: new_result.update_keys(metric=sub_value["metric_name"]) measures.append(new_result) else: measures.append(result) measures = self._get_general_info(measures) # measures = [{**m, **{"run_name": self.current_name_view}} for m in measures] self._add_to_total_measures(measures) if profile_batch: self.generator.profile_batch( self.data_view, self.batch_name, self.current_name_view, self.columns, self.project.config_dir, self.project.project_name, ) if autocommit: self.commit() def _add_to_total_measures(self, measures: List[Dict]): self.total_measures.extend(measures) def _get_column_specific_general_info(self, specs, measures: Measure): col_name = specs["name"] unique, perc_missing, value_props = self.generator.generate_column_general_info( specs, self.data_view, self.time_of_run, self.current_name_view ) if unique is not None: measures.append(unique) if value_props is not None: measures.append(value_props) measures.append(perc_missing) if perc_missing.value > 0 and specs["force_null"] and not specs["null"]: raise NullableError( "Column {} has {} percent missing even" " though it is not nullable".format(col_name, perc_missing["value"]) ) measures.append( _create_value( str(self.generator.get_dtype(self.data_view, col_name)), "dtype", col_name, self.time_of_run, "data-characteristic", str, self.current_name_view, ) ) return measures def _get_general_info(self, measures: Measure) -> Measure: rows, cols = self.generator.get_shape(self.data_view) measures.append( _create_value( rows, "count", "rows", self.time_of_run, "data-characteristic", int, self.current_name, ) ) measures.append( _create_value( cols, "count", "columns", self.time_of_run, "data-characteristic", int, self.current_name, ) ) return measures def _set_viz_type(self, function: Callable, function_name: str) -> str: return_format = function.return_format if return_format == "custom": return_format = function.custom_value_return_format types = { float: "numerical", int: "numerical", bool: "boolean", dict: "categorical", str: "not_sure", } viz_type = types[return_format] return viz_type def _write(self, conn, measures: Measure) -> None: if self.chunk: batch_name = "from_chunked" else: batch_name = self.batch_name self.generator.write( conn, measures, self.project, batch_name, schema=self.project.db_schema )
{"hexsha": "2db5736a31b682de36ea05a3f9d4e6980c2f0a64", "size": 18521, "ext": "py", "lang": "Python", "max_stars_repo_path": "qualipy/run.py", "max_stars_repo_name": "baasman/qualipy", "max_stars_repo_head_hexsha": "e246a44ea3a5dcc92291983c52a89189338f808f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-07-15T15:16:44.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-15T15:16:44.000Z", "max_issues_repo_path": "qualipy/run.py", "max_issues_repo_name": "baasman/qualipy", "max_issues_repo_head_hexsha": "e246a44ea3a5dcc92291983c52a89189338f808f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "qualipy/run.py", "max_forks_repo_name": "baasman/qualipy", "max_forks_repo_head_hexsha": "e246a44ea3a5dcc92291983c52a89189338f808f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.8301075269, "max_line_length": 138, "alphanum_fraction": 0.5747529831, "include": true, "reason": "import numpy", "num_tokens": 3579}
// This file is part of snark, a generic and flexible library for robotics research // Copyright (c) 2011 The University of Sydney // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // 1. Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // 3. Neither the name of the University of Sydney nor the // names of its contributors may be used to endorse or promote products // derived from this software without specific prior written permission. // // NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE // GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT // HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR // BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE // OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN // IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "battery.h" #include <boost/static_assert.hpp> namespace snark { namespace ocean { std::string battery_t::state_to_string( int st ) { switch( st ) { case battery_state::initialised: return "IN"; break; case battery_state::uninitialised: return "UN"; break; case battery_state::fully_discharged: return "FD"; break; case battery_state::fully_charged: return "FC"; break; case battery_state::discharging: return "DC"; break; default: return "CH"; break; } } // Removes checksum wrapper, TODO: throws exception on incorrect checksum std::string& battery_t::strip( std::string& line ) { /// '$B15,....,FF00%B2' becomes $B15,....,FF00 std::size_t pos = line.find_first_of( '%', line.size() - 4 ); if( pos != std::string::npos ) { line = line.substr( 0, pos); } return line; } void battery_t::operator&(const data_t& data) { // std::cerr << " address " << data.address() << std::endl; switch( data.address() ) { case address::temperature: { static const double unit = 0.1; // Kelvin temperature = static_cast< celcius_t >( data.value() * unit * kelvin ); // 0.1k unit // std::cerr << "got temperature: " << temperature.value() << std::endl; break; } case address::voltage: { voltage = data.value() / 1000.0 * volt; // millivolts to volts // std::cerr << "got voltage: " << voltage.value() << std::endl; break; } case address::current: { current = data.value.cast() / 1000.0 * ampere; //mAmp to Amps // std::cerr << "got current: " << current.value() << std::endl; break; } case address::average_current: { average_current = data.value.cast() / 1000.0 * ampere; //mAmp to Amps // std::cerr << "got average_current: " << average_current.value() << std::endl; break; } case address::rel_state_of_charge: { charge_pc = data.value(); // percentage, unit is % break; } case address::remaining_capacity: { remaining_capacity = data.value.cast() / 100.0 * watt; // eacho unit is 10mWh - to Watts break; } case address::run_time_to_empty: { time_to_empty = boost::posix_time::minutes( data.value() ); break; } case address::status: { if( !(data.value() & battery_state::initialised) ) { state = battery_state::uninitialised; return; } comma::uint16 val = data.value() & 0x0070; // masks out everything including 'initialised' flag switch( val ) { case battery_state::discharging: state = battery_state::discharging; break; case battery_state::fully_charged: state = battery_state::fully_charged; break; case battery_state::fully_discharged: state = battery_state::fully_discharged; break; default: state = battery_state::charging; break; } // std::cerr << "battery: " << int(id) << " state: " << state << " value: " << data.value() << " val: " << val <<std::endl; break; } default: { return; } } } } } // namespace snark { namespace ocean {
{"hexsha": "703677029b3a4a60a4d4761b496d9c6ba9db4737", "size": 5578, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "batteries/ocean/battery.cpp", "max_stars_repo_name": "mission-systems-pty-ltd/snark", "max_stars_repo_head_hexsha": "2bc8a20292ee3684d3a9897ba6fee43fed8d89ae", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 63.0, "max_stars_repo_stars_event_min_datetime": "2015-01-14T14:38:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-01T09:56:03.000Z", "max_issues_repo_path": "batteries/ocean/battery.cpp", "max_issues_repo_name": "NEU-LC/snark", "max_issues_repo_head_hexsha": "db890f73f4c4bbe679405f3a607fd9ea373deb2c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 39.0, "max_issues_repo_issues_event_min_datetime": "2015-01-21T00:57:38.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-22T04:22:35.000Z", "max_forks_repo_path": "batteries/ocean/battery.cpp", "max_forks_repo_name": "NEU-LC/snark", "max_forks_repo_head_hexsha": "db890f73f4c4bbe679405f3a607fd9ea373deb2c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 36.0, "max_forks_repo_forks_event_min_datetime": "2015-01-15T04:17:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-17T17:13:35.000Z", "avg_line_length": 37.6891891892, "max_line_length": 136, "alphanum_fraction": 0.5812119039, "num_tokens": 1227}
from numpy.distutils.core import Extension, setup ext = Extension(name='finite_diff', sources=['finite_diff.f90']) setup( name="kdv", description="Python version of the KdV solver", install_requires=['scipy', 'matplotlib'], ext_modules=[ext], script_name='setup.py', script_args=['build_ext', '--inplace'] )
{"hexsha": "f67717338dea946e0add61ad8a6d6300e1d2e087", "size": 334, "ext": "py", "lang": "Python", "max_stars_repo_path": "Python/setup.py", "max_stars_repo_name": "ashwinvis/kdv-compact", "max_stars_repo_head_hexsha": "065f6a543692f3a3c7848bf7b7bd02cb1451b253", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-09-04T07:43:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T22:46:22.000Z", "max_issues_repo_path": "Python/setup.py", "max_issues_repo_name": "ashwinvis/kdv-compact", "max_issues_repo_head_hexsha": "065f6a543692f3a3c7848bf7b7bd02cb1451b253", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Python/setup.py", "max_forks_repo_name": "ashwinvis/kdv-compact", "max_forks_repo_head_hexsha": "065f6a543692f3a3c7848bf7b7bd02cb1451b253", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-05-02T03:02:41.000Z", "max_forks_repo_forks_event_max_datetime": "2018-05-02T03:02:41.000Z", "avg_line_length": 25.6923076923, "max_line_length": 64, "alphanum_fraction": 0.6886227545, "include": true, "reason": "from numpy", "num_tokens": 80}
[STATEMENT] lemma ascii_of_idem: "ascii_of c = c" if "\<not> digit7 c" [PROOF STATE] proof (prove) goal (1 subgoal): 1. ascii_of c = c [PROOF STEP] using that [PROOF STATE] proof (prove) using this: \<not> digit7 c goal (1 subgoal): 1. ascii_of c = c [PROOF STEP] by (cases c) simp
{"llama_tokens": 133, "file": null, "length": 2}
# Copyright (c) 2013, Aakvatech and contributors # For license information, please see license.txt import frappe from frappe import msgprint, _ import pandas as pd import numpy as np def execute(filters=None): columns = get_columns(filters) data = [] lab_details = get_lab_results(filters) if not lab_details: msgprint(frappe.bold( "No Record Found for Filters You Specified, Please Choose Different Filters and Try Again..!! ")) else: lab_colnames = [key for key in lab_details[0].keys()] df = pd.DataFrame.from_records(lab_details, columns = lab_colnames) pvt = pd.pivot_table( df, values="result_value", index="lab_test_name", columns="result_date", fill_value=" ", aggfunc="first" ) columns += pvt.columns.values.tolist() data += pvt.reset_index().values.tolist() return columns, data def get_columns(filters): columns = [ { "fieldname": "lab_test_name", "fieldtype": "Data", "label": _("Lab Test Name") } ] return columns def get_lab_results(filters): conditions = "" if filters.get("patient"): conditions += "and lb.patient = %(patient)s" if filters.get("from_date"): conditions += "and lb.result_date >= %(from_date)s" if filters.get("to_date"): conditions += "and lb.result_date <= %(to_date)s" if filters.get("department"): conditions += "and lb.department = %(department)s" return frappe.db.sql(""" select lb.lab_test_name as lab_test_name, date_format(lb.result_date, '%%Y-%%m-%%d') as result_date, n.result_value as result_value from `tabLab Test` lb inner join `tabNormal Test Result` n on lb.name = n.parent where lb.docstatus = 1 and lb.lab_test_name not in (select lbt.lab_test_name from `tabLab Test Template` lbt where lbt.lab_test_template_type="Grouped") and lb.status = "Completed" {conditions} union select lb.lab_test_name as lab_test_name, date_format(lb.result_date, '%%Y-%%m-%%d') as result_date, d.result_value as result_value from `tabLab Test` lb inner join `tabDescriptive Test Result` d on lb.name = d.parent where lb.docstatus = 1 and lb.lab_test_name not in (select lbt.lab_test_name from `tabLab Test Template` lbt where lbt.lab_test_template_type="Grouped") and lb.status = "Completed" {conditions} union select lb.lab_test_name as lab_test_name, date_format(lb.result_date, '%%Y-%%m-%%d') as result_date, org.colony_population as result_value from `tabLab Test` lb inner join `tabOrganism Test Result` org on lb.name = org.parent where lb.docstatus = 1 and lb.lab_test_name not in (select lbt.lab_test_name from `tabLab Test Template` lbt where lbt.lab_test_template_type="Grouped") and lb.status = "Completed" {conditions} union select lb.lab_test_name as lab_test_name, date_format(lb.result_date, '%%Y-%%m-%%d') as result_date, ss.antibiotic_sensitivity as result_value from `tabLab Test` lb inner join `tabSensitivity Test Result` ss on lb.name = ss.parent where lb.docstatus = 1 and lb.lab_test_name not in (select lbt.lab_test_name from `tabLab Test Template` lbt where lbt.lab_test_template_type="Grouped") and lb.status = "Completed" {conditions} """.format(conditions=conditions), filters, as_dict=1 )
{"hexsha": "2db4a62cb6087900bb12bb57bc2c873aca68e231", "size": 3425, "ext": "py", "lang": "Python", "max_stars_repo_path": "hms_tz/hms_tz/report/lab_report_chart/lab_report_chart.py", "max_stars_repo_name": "av-dev2/hms_tz", "max_stars_repo_head_hexsha": "a36dbe8bfacf6a770913b1bfa000d43edd2cd87a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-04-20T06:11:25.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-18T15:37:25.000Z", "max_issues_repo_path": "hms_tz/hms_tz/report/lab_report_chart/lab_report_chart.py", "max_issues_repo_name": "av-dev2/hms_tz", "max_issues_repo_head_hexsha": "a36dbe8bfacf6a770913b1bfa000d43edd2cd87a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 90, "max_issues_repo_issues_event_min_datetime": "2021-04-05T13:36:34.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T07:26:25.000Z", "max_forks_repo_path": "hms_tz/hms_tz/report/lab_report_chart/lab_report_chart.py", "max_forks_repo_name": "av-dev2/hms_tz", "max_forks_repo_head_hexsha": "a36dbe8bfacf6a770913b1bfa000d43edd2cd87a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2021-03-26T06:43:20.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-18T06:36:58.000Z", "avg_line_length": 39.367816092, "max_line_length": 145, "alphanum_fraction": 0.6794160584, "include": true, "reason": "import numpy", "num_tokens": 869}
\chapter{Conclusion} This paper uses the Gibbs sampler with a Metropolis-Hastings step to generate new samples from a NHPP. Test statistics are used to check if the new samples are from the NHPP. The NHPP used has a rate function which is a combination of a log-linear function and a power-law function. By testing the samples for different parameters in the rate function, non of the null hypothesis were rejected. Hence this sampler can be used to generate new samples from a NHPP given a data set.
{"hexsha": "84929ea6bd073ca98547d614a10b74a59431fe98", "size": 500, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Thesis/Thesis/chapters/concludingremarks.tex", "max_stars_repo_name": "mariufa/ProsjektOppgave", "max_stars_repo_head_hexsha": "3ef2fda314c55322de20f19ca861e4268a5e2d08", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Thesis/Thesis/chapters/concludingremarks.tex", "max_issues_repo_name": "mariufa/ProsjektOppgave", "max_issues_repo_head_hexsha": "3ef2fda314c55322de20f19ca861e4268a5e2d08", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Thesis/Thesis/chapters/concludingremarks.tex", "max_forks_repo_name": "mariufa/ProsjektOppgave", "max_forks_repo_head_hexsha": "3ef2fda314c55322de20f19ca861e4268a5e2d08", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 250.0, "max_line_length": 479, "alphanum_fraction": 0.806, "num_tokens": 106}
import numpy as np import matplotlib.pyplot as plt from matplotlib.gridspec import GridSpec import astropy.units as u from astropy.constants import h, c, k_B from astropy.visualization import quantity_support from .chemistry import chemistry from .opacity import kappa __all__ = [ 'dashboard' ] def dashboard( lam, F_2_up, binned_phoenix_spectrum, dtaus, pressures, temps, temperature_history, opacities ): """ Generate a dashboard plot. Parameters ---------- lam : ~astropy.units.Quantity Wavelength grid F_2_up : ~astropy.units.Quantity Emission spectrum binned_phoenix_spectrum : ~astropy.units.Quantity Binned PHOENIX spectrum dtaus : list of lists, or ~numpy.ndarray Change in optical depth pressures : ~astropy.units.Quantity Pressure grid temps : ~astropy.units.Quantity Final temperatures after iteration for radiative equilibrium temperature_history : ~astropy.units.Quantity Grid of temperatures for each timestep and pressure opacities : dict Opacity dictionary of xarray.DataArray's Returns ------- fig, ax : ~matplotlib.axes.Figure, ~matplotlib.axes.Axes """ from .chemistry import iso_to_species flux_unit = u.erg/u.cm**3/u.s fig = plt.figure(figsize=(12, 7)) gs = GridSpec(2, 4, figure=fig) ax = [fig.add_subplot(ax) for ax in [gs[0, :], gs[1, 0], gs[1, 1], gs[1, 2], gs[1, 3]]] with quantity_support(): if np.any(binned_phoenix_spectrum.value != 0): ax[0].loglog( lam, binned_phoenix_spectrum.to(flux_unit), color='C1', label='PHOENIX' ) ax[0].loglog(lam, F_2_up.to(flux_unit), color='C0', label='frei') ax[0].legend() tau = np.cumsum(dtaus[::-1], axis=0) nus = lam.to(u.cm**-1, u.spectral()) hcperk = h * c / k_B dlogP = (np.log10(pressures.max().to(u.bar).value) - np.log10(pressures.min().to(u.bar).value) ) / (len(pressures) - 1) k = 10 ** -dlogP dParr = (1 - k) * pressures cf = ( np.exp(-tau) * np.array(dtaus)[::-1] * (pressures[::-1, None] / dParr[::-1, None]) * nus**3 / np.expm1(hcperk * nus / temps[::-1, None])) cf /= np.sum(cf, axis=0) lg, pg = np.meshgrid(lam.value, pressures.to(u.bar).value) cax = ax[1].pcolormesh(lg, pg, cf[::-1], cmap=plt.cm.Greys, shading='auto') plt.colorbar(cax, ax=ax[1]) ax[1].set_yscale('log') ax[1].invert_yaxis() ax[1].set( xlabel=r'Wavelength [$\mu$m]', ylabel='Pressure [bar]', title='Contrib Func', xlim=[lam.value.min(), lam.value.max()], ylim=[pressures.to(u.bar).value.max(), pressures.to(u.bar).value.min()] ) ax[0].set( xlabel=r'Wavelength [$\mu$m]', title='Emission spectrum', ) ax[1].set_xscale('log') cmap = plt.cm.winter_r for i in range(temperature_history.shape[1]): color = cmap(i / temperature_history.shape[1]) if np.all(temperature_history[:, i] != 0): ax[2].semilogy(temperature_history[:, i], pressures[:].to(u.bar), c=color, alpha=0.3) ax[2].semilogy(temps[:], pressures[:].to(u.bar), '-', color='k', lw=3) ax[2].invert_yaxis() ax[2].annotate("Initial", (0.1, 0.18), color=cmap(0), xycoords='axes fraction') ax[2].annotate("Final", (0.1, 0.1), xycoords='axes fraction') ax[2].set( xlabel='Temperature [K]', ylabel='Pressure [bar]', ) fastchem_mmr, fastchem_vmr = chemistry( temps[:], pressures[:], opacities.keys(), return_vmr=True ) for isotopologue in fastchem_vmr: species_name = iso_to_species(isotopologue) ax[3].semilogy( np.log10(fastchem_vmr[isotopologue]), pressures.to(u.bar), label=species_name.replace('2', '$_2$'), lw=2 ) ax[3].legend() ax[3].invert_yaxis() ax[3].set( xlabel='log(VMR)', ylabel='Pressure [bar]', title='Chemistry (FastChem)', ylim=ax[1].get_ylim() ) k, sigma_scattering = kappa( opacities, np.interp(1 * u.bar, pressures[::-1].to(u.bar), temps[::-1]), 1 * u.bar, lam ) with quantity_support(): ax[4].loglog(lam, k.to(u.cm ** 2 / u.g).flatten(), label='Total') ax[4].loglog(lam, sigma_scattering.to(u.cm ** 2 / u.g).flatten(), label='Scattering') ax[4].set( xlabel=r'Wavelength [$\mu$m]', ylabel='Opacity [cm$^2$ g$^{-1}$]' ) ax[4].legend() for axis in ax: for sp in ['right', 'top']: axis.spines[sp].set_visible(False) fig.tight_layout() return fig, ax
{"hexsha": "ebf8c6e2f2a9c1c5a2fc2587e345a5418a5412cf", "size": 4716, "ext": "py", "lang": "Python", "max_stars_repo_path": "frei/plot.py", "max_stars_repo_name": "bmorris3/frei", "max_stars_repo_head_hexsha": "ab81b494da131dd24cb796d0bae6b45ab08f5b3c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "frei/plot.py", "max_issues_repo_name": "bmorris3/frei", "max_issues_repo_head_hexsha": "ab81b494da131dd24cb796d0bae6b45ab08f5b3c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "frei/plot.py", "max_forks_repo_name": "bmorris3/frei", "max_forks_repo_head_hexsha": "ab81b494da131dd24cb796d0bae6b45ab08f5b3c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.0816326531, "max_line_length": 95, "alphanum_fraction": 0.5871501272, "include": true, "reason": "import numpy,import astropy,from astropy", "num_tokens": 1399}
import cv2 import numpy as np frontal_face = cv2.CascadeClassifier('classifier/haarcascade_frontalface_default.xml') #eye_cascade = cv2.CascadeClassifier('classifier/eye_pair_big.xml') #eye_cascade = cv2.CascadeClassifier('classifier/eye_pair_small.xml') eye_cascade = cv2.CascadeClassifier('classifier/haarcascade_eye.xml') kernel = np.ones((4,4),np.uint8) kernel2 = np.ones((8,8),np.uint8) capture = cv2.VideoCapture(0) while True: ret, frame = capture.read() frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = frontal_face.detectMultiScale(frame_gray) for face in faces: x,y,w,h = face frame = cv2.rectangle(frame, (x,y), (x+w,y+h), (0,255,0), 3) faceROI = frame_gray[y:y+h,x:x+w] eyes = eye_cascade.detectMultiScale(faceROI, minNeighbors = 20) gaze_left = [False,False] gaze_right = [False,False] for i,(x2,y2,w2,h2) in enumerate(eyes): if i == 2: break frame = cv2.rectangle(frame, (x+x2,y+y2), (x+x2+h2,y+y2+w2), (255, 0, 0 ), 4) eyeROI = faceROI[y2:y2+w2,x2:x2+h2] scale_percent = 500 # percent of original size width = int(eyeROI.shape[1] * scale_percent / 100) height = int(eyeROI.shape[0] * scale_percent / 100) dim = (width, height) # resize image resized = cv2.resize(eyeROI, dim, interpolation = cv2.INTER_AREA) resized = cv2.equalizeHist(resized) resized = cv2.GaussianBlur(resized,(5,5), cv2.BORDER_DEFAULT) rows, cols = resized.shape _, threshold = cv2.threshold(resized, 10, 255, cv2.THRESH_BINARY_INV) dilation = cv2.dilate(threshold,kernel,iterations = 4) erosion = cv2.erode(dilation,kernel,iterations = 2) #_, threshold2 = cv2.threshold(resized,40 , 255, cv2.THRESH_BINARY_INV) #erosion2 = cv2.erode(threshold2,kernel2,iterations = 3) #dilation2 = cv2.dilate(erosion2,kernel2,iterations = 1) contours,_ = cv2.findContours(dilation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contours = sorted(contours, key=lambda x: cv2.contourArea(x), reverse=True) contours = contours[:1] #contours = sorted(contours, key=lambda ctr: cv2.boundingRect(ctr)[0]) #contours2,_ = cv2.findContours(dilation2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) #contours2 = sorted(contours2, key=lambda x: cv2.contourArea(x), reverse=True) #contours2 = contours2[:2] #contours2 = sorted(contours2, key=lambda ctr: cv2.boundingRect(ctr)[0]) #i = 0 #while i<2: # try: # eye = contours2[i] # pupil = contours[i] # (xe, ye, we, he) = cv2.boundingRect(eye) # (xp, yp, wp, hp) = cv2.boundingRect(pupil) # cv2.rectangle(resized, (xe, ye), (xe + we, ye + he), (255, 0, 0), 2) # cv2.line(resized, (xe + int(we/2), 0), (xe + int(we/2), rows), (0, 255, 0), 2) # cv2.line(resized, (0, ye + int(he/2)), (cols, ye + int(he/2)), (0, 255, 0), 2) # cv2.rectangle(resized, (xp, yp), (xp + wp, yp + hp), (255, 0, 0), 2) # cv2.line(resized, (xp + int(wp/2), 0), (xp + int(wp/2), rows), (0, 255, 0), 2) # cv2.line(resized, (0, yp + int(hp/2)), (cols, yp + int(hp/2)), (0, 255, 0), 2) # if (xp+(xp+wp)/2)-(xe+(xe+we)/2)>17: # gaze_left[i] = True # elif (xp+(xp+wp)/2)-(xe+(xe+we)/2)<-17: # gaze_right[i] = True # except: # pass # i+=1 (xp, yp, wp, hp) = cv2.boundingRect(contours[0]) cv2.rectangle(resized, (xp, yp), (xp + wp, yp + hp), (255, 0, 0), 2) cv2.line(resized, (xp + int(wp/2), 0), (xp + int(wp/2), rows), (0, 255, 0), 2) cv2.line(resized, (0, yp + int(hp/2)), (cols, yp + int(hp/2)), (0, 255, 0), 2) if (xp+int(wp/2))-(cols/2)>18: gaze_left[i] =True elif (xp+int(wp/2))-(cols/2)<-18: gaze_right[i] =True cv2.imshow("pupil"+str(i), erosion) cv2.imshow("eye"+str(i), resized) if gaze_left[0] and gaze_left[1]: print("looking left") elif gaze_right[0] and gaze_right[1]: print("looking right") else: print("looking center") cv2.imshow("webcam", frame) if cv2.waitKey(1) & 0xFF == ord("q"): break
{"hexsha": "296b873cabb51a302315e5d102f9031c507796b1", "size": 4983, "ext": "py", "lang": "Python", "max_stars_repo_path": "eye_tracking.py", "max_stars_repo_name": "batbat99/eye_tracker", "max_stars_repo_head_hexsha": "571cca789623a45d200ece8113d7e42e876711ba", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "eye_tracking.py", "max_issues_repo_name": "batbat99/eye_tracker", "max_issues_repo_head_hexsha": "571cca789623a45d200ece8113d7e42e876711ba", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "eye_tracking.py", "max_forks_repo_name": "batbat99/eye_tracker", "max_forks_repo_head_hexsha": "571cca789623a45d200ece8113d7e42e876711ba", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.185483871, "max_line_length": 99, "alphanum_fraction": 0.5049167168, "include": true, "reason": "import numpy", "num_tokens": 1455}
{- Byzantine Fault Tolerant Consensus Verification in Agda, version 0.9. Copyright (c) 2021, Oracle and/or its affiliates. Licensed under the Universal Permissive License v 1.0 as shown at https://opensource.oracle.com/licenses/upl -} open import LibraBFT.Impl.OBM.Logging.Logging open import LibraBFT.ImplShared.Consensus.Types import LibraBFT.ImplShared.Util.Crypto as Crypto open import Optics.All open import Util.Prelude module LibraBFT.Impl.Types.Ledger2WaypointConverter where new : LedgerInfo → Ledger2WaypointConverter new ledgerInfo = mkLedger2WaypointConverter (ledgerInfo ^∙ liEpoch) (ledgerInfo ^∙ liTransactionAccumulatorHash) (ledgerInfo ^∙ liVersion) --(ledgerInfo ^∙ liTimestamp) (ledgerInfo ^∙ liNextEpochState)
{"hexsha": "f2780f47725ac4082fedbc51f2ad1e7bfca2f2e4", "size": 757, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "src/LibraBFT/Impl/Types/Ledger2WaypointConverter.agda", "max_stars_repo_name": "LaudateCorpus1/bft-consensus-agda", "max_stars_repo_head_hexsha": "a4674fc473f2457fd3fe5123af48253cfb2404ef", "max_stars_repo_licenses": ["UPL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/LibraBFT/Impl/Types/Ledger2WaypointConverter.agda", "max_issues_repo_name": "LaudateCorpus1/bft-consensus-agda", "max_issues_repo_head_hexsha": "a4674fc473f2457fd3fe5123af48253cfb2404ef", "max_issues_repo_licenses": ["UPL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/LibraBFT/Impl/Types/Ledger2WaypointConverter.agda", "max_forks_repo_name": "LaudateCorpus1/bft-consensus-agda", "max_forks_repo_head_hexsha": "a4674fc473f2457fd3fe5123af48253cfb2404ef", "max_forks_repo_licenses": ["UPL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.4090909091, "max_line_length": 111, "alphanum_fraction": 0.7886393659, "num_tokens": 212}
# have all phylogenies in one file # have a file with the chromosome and window for each tree in the correct order # have a popmap with the individual names and groupings wished to test # have the outgroup labeled once in the popmap as "outgroup" library(ape) library(phytools) options(scipen=999) # read in trees, info, and popmap for 50kbp trees x_trees <- read.tree("certhia_50kbp.trees") x_info <- read.table("certhia_50kbp_tree_info.txt", sep="\t", stringsAsFactors=F) x_popmap <- read.table("gsi_popmap.txt", sep="\t", stringsAsFactors=F) x_output <- "b_gsi_50kbp_output.txt" # define outgroup outgroup <- x_popmap[x_popmap[,2] == "outgroup", 1] # remove outgroup from popmap x_popmap <- x_popmap[x_popmap[,2] != "outgroup", ] # write initial line of output write(c("chr", "start", "end", "pop", "gsi"), file=x_output, sep="\t", ncolumns=5) # loop for each tree for(a in 1:length(x_trees)) { # select the tree x <- x_trees[a][[1]] # reroot x <- midpoint.root(x) x <- root(x, outgroup, resolve.root=T) # define the groups groups <- unique(x_popmap[,2]) # loop for each group of interest for(g in 1:length(groups)) { # define the group of interest group_of_interest <- x_popmap[x_popmap[,2] == groups[g],1] # calculate MRCA of group of interest group_mrca <- getMRCA(x, tip = group_of_interest) # calculate number of ndoes to reach common ancestor for each individual in group # and then remove redundant nodes nodes_needed <- c() for(b in 1:length(group_of_interest)) { # what is the number of this individual? b_number <- match(group_of_interest[b], x$tip.label) # loop throup edge table until reaching MRCA while_loop <- 0 while(while_loop != group_mrca) { # add node nodes_needed <- c(nodes_needed, x$edge[x$edge[,2]==b_number,1]) # change new number to that node and update the while_loop object to the node b_number <- x$edge[x$edge[,2]==b_number,1] while_loop <- b_number } # add the MRCA to the nodes_needed object nodes_needed <- c(nodes_needed, group_mrca) # only keep unique nodes nodes_needed <- unique(nodes_needed) } # calculate gsi # gs gs <- (length(group_of_interest) - 1) / length(nodes_needed) # max gs = 1 max_gs <- 1 # min gs = minimum number of nodes to connect all individuals (n - 1) / total number of nodes min_gs <- (length(group_of_interest) - 1) / length(unique(x$edge[,1])) # equation 4 of Cummings et al. 2008 (GSI paper) gsi <- (gs - min_gs) / (max_gs - min_gs) # write output write(c(x_info[a,1], x_info[a,2], x_info[a,3], groups[g], gsi), file=x_output, sep="\t", ncolumns=5, append=T) } } # read in trees, info, and popmap for 100kbp trees x_trees <- read.tree("certhia_100kbp.trees") x_info <- read.table("certhia_100kbp_tree_info.txt", sep="\t", stringsAsFactors=F) x_popmap <- read.table("gsi_popmap.txt", sep="\t", stringsAsFactors=F) x_output <- "b_gsi_100kbp_output.txt" # define outgroup outgroup <- x_popmap[x_popmap[,2] == "outgroup", 1] # remove outgroup from popmap x_popmap <- x_popmap[x_popmap[,2] != "outgroup", ] # write initial line of output write(c("chr", "start", "end", "pop", "gsi"), file=x_output, sep="\t", ncolumns=5) # loop for each tree for(a in 1:length(x_trees)) { # select the tree x <- x_trees[a][[1]] # reroot x <- midpoint.root(x) x <- root(x, outgroup, resolve.root=T) # define the groups groups <- unique(x_popmap[,2]) # loop for each group of interest for(g in 1:length(groups)) { # define the group of interest group_of_interest <- x_popmap[x_popmap[,2] == groups[g],1] # calculate MRCA of group of interest group_mrca <- getMRCA(x, tip = group_of_interest) # calculate number of ndoes to reach common ancestor for each individual in group # and then remove redundant nodes nodes_needed <- c() for(b in 1:length(group_of_interest)) { # what is the number of this individual? b_number <- match(group_of_interest[b], x$tip.label) # loop throup edge table until reaching MRCA while_loop <- 0 while(while_loop != group_mrca) { # add node nodes_needed <- c(nodes_needed, x$edge[x$edge[,2]==b_number,1]) # change new number to that node and update the while_loop object to the node b_number <- x$edge[x$edge[,2]==b_number,1] while_loop <- b_number } # add the MRCA to the nodes_needed object nodes_needed <- c(nodes_needed, group_mrca) # only keep unique nodes nodes_needed <- unique(nodes_needed) } # calculate gsi # gs gs <- (length(group_of_interest) - 1) / length(nodes_needed) # max gs = 1 max_gs <- 1 # min gs = minimum number of nodes to connect all individuals (n - 1) / total number of nodes min_gs <- (length(group_of_interest) - 1) / length(unique(x$edge[,1])) # equation 4 of Cummings et al. 2008 (GSI paper) gsi <- (gs - min_gs) / (max_gs - min_gs) # write output write(c(x_info[a,1], x_info[a,2], x_info[a,3], groups[g], gsi), file=x_output, sep="\t", ncolumns=5, append=T) } }
{"hexsha": "cc6704ea0a28cf05bddcc1176ce25f00e4487c5c", "size": 5037, "ext": "r", "lang": "R", "max_stars_repo_path": "08e_gsi.r", "max_stars_repo_name": "jdmanthey/certhia_phylogeography", "max_stars_repo_head_hexsha": "7830f496419252149f5e4f1a657ce19c80ec05da", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "08e_gsi.r", "max_issues_repo_name": "jdmanthey/certhia_phylogeography", "max_issues_repo_head_hexsha": "7830f496419252149f5e4f1a657ce19c80ec05da", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "08e_gsi.r", "max_forks_repo_name": "jdmanthey/certhia_phylogeography", "max_forks_repo_head_hexsha": "7830f496419252149f5e4f1a657ce19c80ec05da", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-20T17:45:18.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-20T17:45:18.000Z", "avg_line_length": 29.1156069364, "max_line_length": 112, "alphanum_fraction": 0.6799682351, "num_tokens": 1558}
#!/usr/bin/env python from setuptools import setup from setuptools.command.build_ext import build_ext as _build_ext class build_ext(_build_ext): def finalize_options(self): _build_ext.finalize_options(self) # Prevent numpy from thinking it is still in its setup process: __builtins__.__NUMPY_SETUP__ = False import numpy self.include_dirs.append(numpy.get_include()) REQUIRES = ['numpy', 'asteval', 'astropy', 'astroquery', 'batman-package', 'bibtexparser', 'bokeh', 'cython', 'flask', 'h5py', 'lmfit', 'matplotlib', 'numba', 'pandas', 'pysynphot', 'scipy', 'sphinx', 'svo_filters'] SETUP_REQUIRES = ['numpy'] setup(name='exoctk', version='0.2.2', description='Observation reduction and planning tools for exoplanet science', cmdclass={'build_ext': build_ext}, setup_requires=SETUP_REQUIRES, install_requires=REQUIRES, author='The ExoCTK Group', author_email='exoctk@gmail.com', license='MIT', url='https://github.com/ExoCTK/exoctk', long_description='', zip_safe=True, use_2to3=False )
{"hexsha": "875008626982638d5fdfc701f011c21c62054103", "size": 1312, "ext": "py", "lang": "Python", "max_stars_repo_path": "setup.py", "max_stars_repo_name": "jmatuskey/exoctk", "max_stars_repo_head_hexsha": "bfd7e5100014048f73baf23c964598381f691ffd", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "setup.py", "max_issues_repo_name": "jmatuskey/exoctk", "max_issues_repo_head_hexsha": "bfd7e5100014048f73baf23c964598381f691ffd", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "setup.py", "max_forks_repo_name": "jmatuskey/exoctk", "max_forks_repo_head_hexsha": "bfd7e5100014048f73baf23c964598381f691ffd", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.7755102041, "max_line_length": 83, "alphanum_fraction": 0.5777439024, "include": true, "reason": "import numpy", "num_tokens": 306}
\chapter{Related Works} \label{ch:review} In this chapter, I select the most outstanding studies based on a self-defined criteria (either published in a set of pre-selected venues or performed the highest impact by receiving at least fifty citations). To better introduce these papers in a well-organized manner, I categorize them into five following tracks borrowing ideas from aforementioned survey studies \cite{fortunato2010community, fortunato2016community, coscia2011classification}. In detail, papers in the Graph Type track focus on detecting communities in different types of graphs, such as heterogeneous or sparse graphs. In the Task track, the selected studies aim to solve particular tasks in community detection, such as deciding the correct number of communities. In the Methodological track, the introduced studies solve the general community detection problem via different types of model frameworks such as Modularity or spectral methods. In the Application track, selected studies discuss community detection applications and how to apply them to other disciplines. In the last Evaluation track, it lists papers to summarize the evaluation metrics widely used for model justification and comparison. \input{chapter2/chapter2.1.tex} \input{chapter2/chapter2.2.tex} \input{chapter2/chapter2.3.tex} \input{chapter2/chapter2.4.tex} \input{chapter2/chapter2.5.tex}
{"hexsha": "db0d95b793c28e9b96dc41bf48d057646cb3b368", "size": 1384, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "chapter2/chapter2.tex", "max_stars_repo_name": "RoyZhengGao/thesis", "max_stars_repo_head_hexsha": "b73b473d5b8a5d948080420edeb899c60d88c9e9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "chapter2/chapter2.tex", "max_issues_repo_name": "RoyZhengGao/thesis", "max_issues_repo_head_hexsha": "b73b473d5b8a5d948080420edeb899c60d88c9e9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chapter2/chapter2.tex", "max_forks_repo_name": "RoyZhengGao/thesis", "max_forks_repo_head_hexsha": "b73b473d5b8a5d948080420edeb899c60d88c9e9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 125.8181818182, "max_line_length": 1177, "alphanum_fraction": 0.8244219653, "num_tokens": 285}
import os import pickle import sys import warnings from collections import OrderedDict import biosppy.signals.tools as st import numpy as np import wfdb from biosppy.signals.ecg import correct_rpeaks, hamilton_segmenter from hrv.classical import frequency_domain, time_domain from scipy.signal import medfilt from tqdm import tqdm warnings.filterwarnings(action="ignore") base_dir = "dataset" fs = 100 # ECG sample frequency hr_min = 20 hr_max = 300 def feature_extraction(recording, signal, labels): data = [] for i in tqdm(range(len(labels)), desc=recording, file=sys.stdout): segment = signal[i * fs * 60:(i + 1) * fs * 60] segment, _, _ = st.filter_signal(segment, ftype='FIR', band='bandpass', order=int(0.3 * fs), frequency=[3, 45], sampling_rate=fs) # Finding R peaks rpeaks, = hamilton_segmenter(segment, sampling_rate=fs) rpeaks, = correct_rpeaks(segment, rpeaks, sampling_rate=fs, tol=0.1) # Extracting feature label = 0 if labels[i] == "N" else 1 if 40 <= len(rpeaks) <= 200: # Remove abnormal R peaks rri_tm, rri = rpeaks[1:] / float(fs), np.diff(rpeaks, axis=-1) / float(fs) rri = medfilt(rri, kernel_size=3) edr_tm, edr = rpeaks / float(fs), segment[rpeaks] # Remove physiologically impossible HR signal if np.all(np.logical_and(60 / rri >= hr_min, 60 / rri <= hr_max)): rri_time_features, rri_frequency_features = time_domain(rri * 1000), frequency_domain(rri, rri_tm) edr_frequency_features = frequency_domain(edr, edr_tm) # 6 + 6 + 6 + 1 = 19 data.append([ rri_time_features["rmssd"], rri_time_features["sdnn"], rri_time_features["nn50"], rri_time_features["pnn50"], rri_time_features["mrri"], rri_time_features["mhr"], rri_frequency_features["vlf"] / rri_frequency_features["total_power"], rri_frequency_features["lf"] / rri_frequency_features["total_power"], rri_frequency_features["hf"] / rri_frequency_features["total_power"], rri_frequency_features["lf_hf"], rri_frequency_features["lfnu"], rri_frequency_features["hfnu"], edr_frequency_features["vlf"] / edr_frequency_features["total_power"], edr_frequency_features["lf"] / edr_frequency_features["total_power"], edr_frequency_features["hf"] / edr_frequency_features["total_power"], edr_frequency_features["lf_hf"], edr_frequency_features["lfnu"], edr_frequency_features["hfnu"], label ]) else: data.append([np.nan] * 18 + [label]) else: data.append([np.nan] * 18 + [label]) data = np.array(data, dtype="float") return data if __name__ == "__main__": apnea_ecg = OrderedDict() # train data recordings = [ "a01", "a02", "a03", "a04", "a05", "a06", "a07", "a08", "a09", "a10", "a11", "a12", "a13", "a14", "a15", "a16", "a17", "a18", "a19", "a20", "b01", "b02", "b03", "b04", "b05", "c01", "c02", "c03", "c04", "c05", "c06", "c07", "c08", "c09", "c10" ] for recording in recordings: signal = wfdb.rdrecord(os.path.join(base_dir, recording), channels=[0]).p_signal[:, 0] labels = wfdb.rdann(os.path.join(base_dir, recording), extension="apn").symbol apnea_ecg[recording] = feature_extraction(recording, signal, labels) print() # test data recordings = [ "x01", "x02", "x03", "x04", "x05", "x06", "x07", "x08", "x09", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x29", "x30", "x31", "x32", "x33", "x34", "x35" ] answers = {} filename = os.path.join(base_dir, "event-2-answers") with open(filename, "r") as f: for answer in f.read().split("\n\n"): answers[answer[:3]] = list("".join(answer.split()[2::2])) for recording in recordings: signal = wfdb.rdrecord(os.path.join(base_dir, recording), channels=[0]).p_signal[:, 0] labels = answers[recording] apnea_ecg[recording] = feature_extraction(recording, signal, labels) with open(os.path.join(base_dir, "apnea-ecg.pkl"), "wb") as f: pickle.dump(apnea_ecg, f, protocol=2) print("ok")
{"hexsha": "959aea6673bc315fd2a49870629b49b87e1b393a", "size": 4634, "ext": "py", "lang": "Python", "max_stars_repo_path": "preprocessing.py", "max_stars_repo_name": "JackAndCole/Detection-of-sleep-apnea-from-single-lead-ECG-signal-using-a-time-window-artificial-neural-network", "max_stars_repo_head_hexsha": "692bb7d969b7eb4a0ad9b221660901a863bc76e2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-01-22T03:23:39.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-26T05:02:10.000Z", "max_issues_repo_path": "preprocessing.py", "max_issues_repo_name": "JackAndCole/Detection-of-sleep-apnea-from-single-lead-ECG-signal-using-a-time-window-artificial-neural-network", "max_issues_repo_head_hexsha": "692bb7d969b7eb4a0ad9b221660901a863bc76e2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "preprocessing.py", "max_forks_repo_name": "JackAndCole/Detection-of-sleep-apnea-from-single-lead-ECG-signal-using-a-time-window-artificial-neural-network", "max_forks_repo_head_hexsha": "692bb7d969b7eb4a0ad9b221660901a863bc76e2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-05-29T06:32:24.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-29T06:32:24.000Z", "avg_line_length": 44.9902912621, "max_line_length": 120, "alphanum_fraction": 0.5791972378, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1300}
import os import torch import torch.nn.functional as F import random import numpy as np import pandas as pd from config import Config from dataset import THUMOSInferenceDataset, inference_collate_fn from model import SSAD from utils import post_process, temporal_nms os.environ["CUDA_VISIBLE_DEVICES"] = "1" device = torch.device('cuda') torch.backends.cudnn.benchmark = True torch.backends.cudnn.deterministic = True torch.set_default_tensor_type('torch.FloatTensor') def inference(config): # setup data_loader instances inference_loader = torch.utils.data.DataLoader(THUMOSInferenceDataset(config), batch_size=config.batch_size, shuffle=False, num_workers=8, pin_memory=True, drop_last=False, collate_fn=inference_collate_fn) # build model architecture and load checkpoint model = SSAD(config).to(device) checkpoint = torch.load(config.checkpoint_path + "/model_best.pth.tar") model.load_state_dict(checkpoint['state_dict']) model = model.to(device) model.eval() ''' ['xmin', 'xmax', 'conf', 'score_0', 'score_1', 'score_2', 'score_3', 'score_4', 'score_5', 'score_6', 'score_7', 'score_8', 'score_9', 'score_10', 'score_11', 'score_12', 'score_13', 'score_14', 'score_15', 'score_16', 'score_17', 'score_18', 'score_19', 'score_20'] ''' results = [] results_name = [] with torch.no_grad(): for n_iter, (batch_data, batch_video_names, batch_window_start) in enumerate(inference_loader): batch_data = batch_data.to(device) output_x, output_w, output_scores, output_labels = model(batch_data, device) output_labels = F.softmax(output_labels, dim=1) output_x = output_x.cpu().detach().numpy() output_w = output_w.cpu().detach().numpy() output_scores = output_scores.cpu().detach().numpy() output_labels = output_labels.cpu().detach().numpy() output_min = output_x - output_w / 2 output_max = output_x + output_w / 2 for ii in range(len(batch_video_names)): video_name = batch_video_names[ii] window_start = batch_window_start[ii] a_min = output_min[ii, :] a_max = output_max[ii, :] a_scores = output_scores[ii, :] a_labels = output_labels[ii, :, :] for jj in range(output_min.shape[-1]): corrected_min = max(a_min[jj] * config.window_size * config.unit_size, 0.) + window_start corrected_max = min(a_max[jj] * config.window_size * config.unit_size, config.window_size * config.unit_size) + window_start results_name.append([video_name]) results.append([corrected_min, corrected_max, a_scores[jj]] + a_labels[:, jj].tolist()) results_name = np.stack(results_name) results = np.stack(results) df = pd.DataFrame(results, columns=config.outdf_columns) df['video_name'] = results_name result_file = './results.txt' if os.path.isfile(result_file): os.remove(result_file) df = df[df.score_0 < config.filter_neg_threshold] df = df[df.conf > config.filter_conf_threshold] video_name_list = list(set(df.video_name.values[:])) for video_name in video_name_list: tmpdf = df[df.video_name == video_name] tmpdf = post_process(tmpdf, config) temporal_nms(config, tmpdf, result_file, video_name) if __name__ == '__main__': config = Config() random.seed(config.seed) np.random.seed(config.seed) torch.manual_seed(config.seed) torch.cuda.manual_seed(config.seed) torch.cuda.manual_seed_all(config.seed) inference(config)
{"hexsha": "258481eae800585be3fbe02a71f65c1b76f3ba26", "size": 4062, "ext": "py", "lang": "Python", "max_stars_repo_path": "inference.py", "max_stars_repo_name": "Rheelt/SSAD_pytorch", "max_stars_repo_head_hexsha": "785ec81432b706f393fef276ff55485b71fa3eb2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2019-07-09T07:59:30.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-30T16:37:54.000Z", "max_issues_repo_path": "inference.py", "max_issues_repo_name": "Rheelt/SSAD_pytorch", "max_issues_repo_head_hexsha": "785ec81432b706f393fef276ff55485b71fa3eb2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2019-12-28T04:57:57.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-10T13:46:02.000Z", "max_forks_repo_path": "inference.py", "max_forks_repo_name": "Rheelt/SSAD_pytorch", "max_forks_repo_head_hexsha": "785ec81432b706f393fef276ff55485b71fa3eb2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-07-11T09:40:15.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-06T14:44:58.000Z", "avg_line_length": 44.152173913, "max_line_length": 110, "alphanum_fraction": 0.6088133924, "include": true, "reason": "import numpy", "num_tokens": 875}
(* -------------------------------------------------------------------- *) From mathcomp Require Import all_ssreflect all_algebra bigenough. (* ------- *) Require Import finmap boolp reals. (* ------- *) Require (*--*) Setoid. (* -------------------------------------------------------------------- *) Set Implicit Arguments. Unset Strict Implicit. Unset Printing Implicit Defensive. Import GRing.Theory Num.Theory BigEnough. Local Open Scope ring_scope. Local Open Scope real_scope. (* -------------------------------------------------------------------- *) Section ToBeEventuallyMovedToBoolP. Context {T : Type} {P Q : T -> Prop}. Lemma asboolb (b : bool) : `[< b >] = b. Proof. by apply/asboolP/idP. Qed. (* TODO : add its friends... *) Lemma neg_or (A B : Prop) : ~ (A \/ B) <-> ~ A /\ ~ B. Proof. split; last by case=> [nA nB]; case. by move=> nAoB; split => ?; apply: nAoB; [left| right]. Qed. Lemma existsNP : ~ (exists x, P x) -> forall x, ~ P x. Proof. by move/asboolPn/forallp_asboolPn. Qed. Lemma exists2NP : ~ (exists2 x, P x & Q x) -> forall x, ~ P x \/ ~ Q x. Proof. apply: contrapR; case/asboolPn/existsp_asboolPn=> [x]. by case/neg_or => /contrapT Px /contrapT Qx; exists x. Qed. End ToBeEventuallyMovedToBoolP. (* -------------------------------------------------------------------- *) Section ProofIrrelevantChoice. Context {T : choiceType}. Lemma existsP (P : T -> Prop) : (exists x, P x) -> {x : T | P x}. Proof. move/asboolP/exists_asboolP=> h; have/asboolP hxh := (xchooseP h). by exists (xchoose h). Qed. Lemma existsTP (P : T -> Prop) : { x : T | P x } + (forall x, ~ P x). Proof. case: (boolP `[<exists x : T, P x>]) => [/exists_asboolP | /asboolPn] h. by case/existsP: h => w Pw; left; exists w; apply/asboolP. by right=> x Px; apply/h; exists x. Qed. End ProofIrrelevantChoice. (* -------------------------------------------------------------------- *) Section PredSubtype. Section Def. Variable T : Type. Variable E : pred T. Record pred_sub : Type := PSubSub { rsval : T; rsvalP : rsval \in E }. Coercion rsval : pred_sub >-> T. Canonical pred_sub_subType := Eval hnf in [subType for rsval]. End Def. Definition pred_sub_eqMixin (T : eqType) (E : pred T) := Eval hnf in [eqMixin of pred_sub E by <:]. Canonical pred_sub_eqType (T : eqType) (E : pred T) := Eval hnf in EqType (@pred_sub T E) (pred_sub_eqMixin E). Definition pred_sub_choiceMixin (T : choiceType) (E : pred T) := Eval hnf in [choiceMixin of pred_sub E by <:]. Canonical pred_sub_choiceType (T : choiceType) (E : pred T) := Eval hnf in ChoiceType (@pred_sub T E) (pred_sub_choiceMixin E). Definition pred_sub_countMixin (T : countType) (E : pred T) := Eval hnf in [countMixin of pred_sub E by <:]. Canonical pred_sub_countType (T : countType) (E : pred T) := Eval hnf in CountType (@pred_sub T E) (pred_sub_countMixin E). End PredSubtype. Notation "[ 'psub' E ]" := (@pred_sub _ E) (format "[ 'psub' E ]"). (* -------------------------------------------------------------------- *) Section PIncl. Variables (T : Type) (E F : pred T) (le : {subset E <= F}). Definition pincl (x : [psub E]) : [psub F] := PSubSub (le (valP x)). End PIncl. (* -------------------------------------------------------------------- *) Section Countable. Variable (T : Type) (E : pred T). CoInductive countable : Type := Countable (rpickle : [psub E] -> nat) (runpickle : nat -> option [psub E]) of pcancel rpickle runpickle. Definition rpickle (c : countable) := let: Countable p _ _ := c in p. Definition runpickle (c : countable) := let: Countable _ p _ := c in p. Lemma rpickleK c: pcancel (rpickle c) (runpickle c). Proof. by case: c. Qed. End Countable. (* -------------------------------------------------------------------- *) Section CountableTheory. Lemma countable_countable (T : countType) (E : pred T) : countable E. Proof. by exists pickle unpickle; apply/pickleK. Qed. Section CanCountable. Variables (T : Type) (U : countType) (E : pred T). Variables (f : [psub E] -> U) (g : U -> [psub E]). Lemma can_countable : cancel f g -> countable E. Proof. pose p := pickle \o f; pose u n := omap g (unpickle n). move=> can_fg; apply (@Countable _ E p u) => x. by rewrite {}/u {}/p /= pickleK /= can_fg. Qed. End CanCountable. Section CountType. Variables (T : eqType) (E : pred T) (c : countable E). Definition countable_countMixin := CountMixin (rpickleK c). Definition countable_choiceMixin := CountChoiceMixin countable_countMixin. Definition countable_choiceType := ChoiceType [psub E] countable_choiceMixin. Definition countable_countType := CountType countable_choiceType countable_countMixin. End CountType. End CountableTheory. Notation "[ 'countable' 'of' c ]" := (countable_countType c) (format "[ 'countable' 'of' c ]"). (* -------------------------------------------------------------------- *) Section Finite. Variables (T : eqType). CoInductive finite (E : pred T) : Type := | Finite s of uniq s & {subset E <= s}. End Finite. (* -------------------------------------------------------------------- *) Section FiniteTheory. Context {T : choiceType}. Lemma finiteP (E : pred T) : (exists s : seq T, {subset E <= s}) -> finite E. Proof. case/existsP=> s sEs; exists (undup s); first by rewrite undup_uniq. by move=> x; rewrite mem_undup; exact: sEs. Qed. Lemma finiteNP (E : pred T): (forall s : seq T, ~ {subset E <= s}) -> forall n, exists s : seq T, [/\ size s = n, uniq s & {subset s <= E}]. Proof. move=> finN; elim=> [|n [s] [<- uq_s sE]]; first by exists [::]. have [x sxN xE]: exists2 x, x \notin s & x \in E. apply: contrapR (finN (filter (mem E) s)) => /exists2NP finE x Ex. move/or_asboolP: (finE x). by rewrite !asbool_neg !asboolb negbK Ex mem_filter orbF [(mem E) x]Ex. exists (x :: s) => /=; rewrite sxN; split=> // y. by rewrite in_cons => /orP[/eqP->//|/sE]. Qed. End FiniteTheory. (* -------------------------------------------------------------------- *) Section FiniteCountable. Variables (T : eqType) (E : pred T). Lemma finite_countable : finite E -> countable E. Proof. case=> s uqs Es; pose t := pmap (fun x => (insub x : option [psub E])) s. pose f x := index x t; pose g i := nth None [seq Some x | x <- t] i. apply (@Countable _ E f g) => x; rewrite {}/f {}/g /=. have x_in_t: x \in t; first case: x => x h. by rewrite {}/t mem_pmap_sub /= Es. by rewrite (nth_map x) ?index_mem ?nth_index. Qed. End FiniteCountable. (* -------------------------------------------------------------------- *) Section CountSub. Variables (T : eqType) (E F : pred T). Lemma countable_sub: {subset E <= F} -> countable F -> countable E. Proof. move=> le_EF [f g fgK]; pose f' (x : [psub E]) := f (pincl le_EF x). pose g' x := obind (insub (sT := [subType of [psub E]])) (omap val (g x)). by exists f' g' => x; rewrite /f' /g' fgK /= valK. Qed. End CountSub. (* -------------------------------------------------------------------- *) Section CountableUnion. Variables (T : eqType) (E : nat -> pred T). Hypothesis cE : forall i, countable (E i). Lemma cunion_countable : countable [pred x | `[exists i, x \in E i]]. Proof. pose S := { i : nat & [countable of cE i] }; set F := [pred x | _]. have H: forall (x : [psub F]), exists i : nat, val x \in E i. by case=> x /= /existsbP[i] Eix; exists i. have G: forall (x : S), val (tagged x) \in F. by case=> i [x /= Eix]; apply/existsbP; exists i. pose f (x : [psub F]) : S := Tagged (fun i => [psub E i]) (PSubSub (xchooseP (H x))). pose g (x : S) := PSubSub (G x). by have /can_countable: cancel f g by case=> x hx; apply/val_inj. Qed. End CountableUnion.
{"author": "ejgallego", "repo": "coq-alternate-reals", "sha": "8e1ad799ae9ae80d3c1d97d0a5f5b6d772eb6e01", "save_path": "github-repos/coq/ejgallego-coq-alternate-reals", "path": "github-repos/coq/ejgallego-coq-alternate-reals/coq-alternate-reals-8e1ad799ae9ae80d3c1d97d0a5f5b6d772eb6e01/src/discrete.v"}