text
stringlengths
0
1.25M
meta
stringlengths
47
1.89k
[STATEMENT] lemma dynmethd_access_prop: assumes statM: "methd G statC sig = Some statM" and stat_acc: "G\<turnstile>Methd sig statM of statC accessible_from accC" and dynM: "dynmethd G statC dynC sig = Some dynM" and wf: "wf_prog G" shows "G\<turnstile>Methd sig dynM in dynC dyn_accessible_from accC" [PROOF STATE] proof (prove) goal (1 subgoal): 1. G \<turnstile>Methd sig dynM in dynC dyn_accessible_from accC [PROOF STEP] proof - [PROOF STATE] proof (state) goal (1 subgoal): 1. G \<turnstile>Methd sig dynM in dynC dyn_accessible_from accC [PROOF STEP] from wf [PROOF STATE] proof (chain) picking this: wf_prog G [PROOF STEP] have ws: "ws_prog G" [PROOF STATE] proof (prove) using this: wf_prog G goal (1 subgoal): 1. ws_prog G [PROOF STEP] .. [PROOF STATE] proof (state) this: ws_prog G goal (1 subgoal): 1. G \<turnstile>Methd sig dynM in dynC dyn_accessible_from accC [PROOF STEP] from dynM [PROOF STATE] proof (chain) picking this: dynmethd G statC dynC sig = Some dynM [PROOF STEP] have subclseq: "G\<turnstile>dynC \<preceq>\<^sub>C statC" [PROOF STATE] proof (prove) using this: dynmethd G statC dynC sig = Some dynM goal (1 subgoal): 1. G\<turnstile>dynC\<preceq>\<^sub>C statC [PROOF STEP] by (auto simp add: dynmethd_def) [PROOF STATE] proof (state) this: G\<turnstile>dynC\<preceq>\<^sub>C statC goal (1 subgoal): 1. G \<turnstile>Methd sig dynM in dynC dyn_accessible_from accC [PROOF STEP] from stat_acc [PROOF STATE] proof (chain) picking this: G \<turnstile>Methd sig statM of statC accessible_from accC [PROOF STEP] have is_cls_statC: "is_class G statC" [PROOF STATE] proof (prove) using this: G \<turnstile>Methd sig statM of statC accessible_from accC goal (1 subgoal): 1. is_class G statC [PROOF STEP] by (auto dest: accessible_from_commonD member_of_is_classD) [PROOF STATE] proof (state) this: is_class G statC goal (1 subgoal): 1. G \<turnstile>Methd sig dynM in dynC dyn_accessible_from accC [PROOF STEP] with subclseq [PROOF STATE] proof (chain) picking this: G\<turnstile>dynC\<preceq>\<^sub>C statC is_class G statC [PROOF STEP] have is_cls_dynC: "is_class G dynC" [PROOF STATE] proof (prove) using this: G\<turnstile>dynC\<preceq>\<^sub>C statC is_class G statC goal (1 subgoal): 1. is_class G dynC [PROOF STEP] by (rule subcls_is_class2) [PROOF STATE] proof (state) this: is_class G dynC goal (1 subgoal): 1. G \<turnstile>Methd sig dynM in dynC dyn_accessible_from accC [PROOF STEP] from is_cls_statC statM wf [PROOF STATE] proof (chain) picking this: is_class G statC methd G statC sig = Some statM wf_prog G [PROOF STEP] have member_statC: "G\<turnstile>Methd sig statM member_of statC" [PROOF STATE] proof (prove) using this: is_class G statC methd G statC sig = Some statM wf_prog G goal (1 subgoal): 1. G \<turnstile>Methd sig statM member_of statC [PROOF STEP] by (auto intro: methd_member_of) [PROOF STATE] proof (state) this: G \<turnstile>Methd sig statM member_of statC goal (1 subgoal): 1. G \<turnstile>Methd sig dynM in dynC dyn_accessible_from accC [PROOF STEP] from stat_acc [PROOF STATE] proof (chain) picking this: G \<turnstile>Methd sig statM of statC accessible_from accC [PROOF STEP] have statC_acc: "G\<turnstile>Class statC accessible_in (pid accC)" [PROOF STATE] proof (prove) using this: G \<turnstile>Methd sig statM of statC accessible_from accC goal (1 subgoal): 1. G \<turnstile> Class statC accessible_in pid accC [PROOF STEP] by (auto dest: accessible_from_commonD) [PROOF STATE] proof (state) this: G \<turnstile> Class statC accessible_in pid accC goal (1 subgoal): 1. G \<turnstile>Methd sig dynM in dynC dyn_accessible_from accC [PROOF STEP] from statM subclseq is_cls_statC ws [PROOF STATE] proof (chain) picking this: methd G statC sig = Some statM G\<turnstile>dynC\<preceq>\<^sub>C statC is_class G statC ws_prog G [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) using this: methd G statC sig = Some statM G\<turnstile>dynC\<preceq>\<^sub>C statC is_class G statC ws_prog G goal (1 subgoal): 1. G \<turnstile>Methd sig dynM in dynC dyn_accessible_from accC [PROOF STEP] proof (cases rule: dynmethd_cases) [PROOF STATE] proof (state) goal (2 subgoals): 1. dynmethd G statC dynC sig = Some statM \<Longrightarrow> G \<turnstile>Methd sig dynM in dynC dyn_accessible_from accC 2. \<And>dynMa. \<lbrakk>dynmethd G statC dynC sig = Some dynMa; dynMa \<noteq> statM; G \<turnstile> qmdecl sig dynMa overrides qmdecl sig statM\<rbrakk> \<Longrightarrow> G \<turnstile>Methd sig dynM in dynC dyn_accessible_from accC [PROOF STEP] case Static [PROOF STATE] proof (state) this: dynmethd G statC dynC sig = Some statM goal (2 subgoals): 1. dynmethd G statC dynC sig = Some statM \<Longrightarrow> G \<turnstile>Methd sig dynM in dynC dyn_accessible_from accC 2. \<And>dynMa. \<lbrakk>dynmethd G statC dynC sig = Some dynMa; dynMa \<noteq> statM; G \<turnstile> qmdecl sig dynMa overrides qmdecl sig statM\<rbrakk> \<Longrightarrow> G \<turnstile>Methd sig dynM in dynC dyn_accessible_from accC [PROOF STEP] assume dynmethd: "dynmethd G statC dynC sig = Some statM" [PROOF STATE] proof (state) this: dynmethd G statC dynC sig = Some statM goal (2 subgoals): 1. dynmethd G statC dynC sig = Some statM \<Longrightarrow> G \<turnstile>Methd sig dynM in dynC dyn_accessible_from accC 2. \<And>dynMa. \<lbrakk>dynmethd G statC dynC sig = Some dynMa; dynMa \<noteq> statM; G \<turnstile> qmdecl sig dynMa overrides qmdecl sig statM\<rbrakk> \<Longrightarrow> G \<turnstile>Methd sig dynM in dynC dyn_accessible_from accC [PROOF STEP] with dynM [PROOF STATE] proof (chain) picking this: dynmethd G statC dynC sig = Some dynM dynmethd G statC dynC sig = Some statM [PROOF STEP] have eq_dynM_statM: "dynM=statM" [PROOF STATE] proof (prove) using this: dynmethd G statC dynC sig = Some dynM dynmethd G statC dynC sig = Some statM goal (1 subgoal): 1. dynM = statM [PROOF STEP] by simp [PROOF STATE] proof (state) this: dynM = statM goal (2 subgoals): 1. dynmethd G statC dynC sig = Some statM \<Longrightarrow> G \<turnstile>Methd sig dynM in dynC dyn_accessible_from accC 2. \<And>dynMa. \<lbrakk>dynmethd G statC dynC sig = Some dynMa; dynMa \<noteq> statM; G \<turnstile> qmdecl sig dynMa overrides qmdecl sig statM\<rbrakk> \<Longrightarrow> G \<turnstile>Methd sig dynM in dynC dyn_accessible_from accC [PROOF STEP] with stat_acc subclseq wf [PROOF STATE] proof (chain) picking this: G \<turnstile>Methd sig statM of statC accessible_from accC G\<turnstile>dynC\<preceq>\<^sub>C statC wf_prog G dynM = statM [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) using this: G \<turnstile>Methd sig statM of statC accessible_from accC G\<turnstile>dynC\<preceq>\<^sub>C statC wf_prog G dynM = statM goal (1 subgoal): 1. G \<turnstile>Methd sig dynM in dynC dyn_accessible_from accC [PROOF STEP] by (auto intro: static_to_dynamic_accessible_from) [PROOF STATE] proof (state) this: G \<turnstile>Methd sig dynM in dynC dyn_accessible_from accC goal (1 subgoal): 1. \<And>dynMa. \<lbrakk>dynmethd G statC dynC sig = Some dynMa; dynMa \<noteq> statM; G \<turnstile> qmdecl sig dynMa overrides qmdecl sig statM\<rbrakk> \<Longrightarrow> G \<turnstile>Methd sig dynM in dynC dyn_accessible_from accC [PROOF STEP] next [PROOF STATE] proof (state) goal (1 subgoal): 1. \<And>dynMa. \<lbrakk>dynmethd G statC dynC sig = Some dynMa; dynMa \<noteq> statM; G \<turnstile> qmdecl sig dynMa overrides qmdecl sig statM\<rbrakk> \<Longrightarrow> G \<turnstile>Methd sig dynM in dynC dyn_accessible_from accC [PROOF STEP] case (Overrides newM) [PROOF STATE] proof (state) this: dynmethd G statC dynC sig = Some newM newM \<noteq> statM G \<turnstile> qmdecl sig newM overrides qmdecl sig statM goal (1 subgoal): 1. \<And>dynMa. \<lbrakk>dynmethd G statC dynC sig = Some dynMa; dynMa \<noteq> statM; G \<turnstile> qmdecl sig dynMa overrides qmdecl sig statM\<rbrakk> \<Longrightarrow> G \<turnstile>Methd sig dynM in dynC dyn_accessible_from accC [PROOF STEP] assume dynmethd: "dynmethd G statC dynC sig = Some newM" [PROOF STATE] proof (state) this: dynmethd G statC dynC sig = Some newM goal (1 subgoal): 1. \<And>dynMa. \<lbrakk>dynmethd G statC dynC sig = Some dynMa; dynMa \<noteq> statM; G \<turnstile> qmdecl sig dynMa overrides qmdecl sig statM\<rbrakk> \<Longrightarrow> G \<turnstile>Methd sig dynM in dynC dyn_accessible_from accC [PROOF STEP] assume override: "G,sig\<turnstile>newM overrides statM" [PROOF STATE] proof (state) this: G \<turnstile> qmdecl sig newM overrides qmdecl sig statM goal (1 subgoal): 1. \<And>dynMa. \<lbrakk>dynmethd G statC dynC sig = Some dynMa; dynMa \<noteq> statM; G \<turnstile> qmdecl sig dynMa overrides qmdecl sig statM\<rbrakk> \<Longrightarrow> G \<turnstile>Methd sig dynM in dynC dyn_accessible_from accC [PROOF STEP] assume neq: "newM\<noteq>statM" [PROOF STATE] proof (state) this: newM \<noteq> statM goal (1 subgoal): 1. \<And>dynMa. \<lbrakk>dynmethd G statC dynC sig = Some dynMa; dynMa \<noteq> statM; G \<turnstile> qmdecl sig dynMa overrides qmdecl sig statM\<rbrakk> \<Longrightarrow> G \<turnstile>Methd sig dynM in dynC dyn_accessible_from accC [PROOF STEP] from dynmethd dynM [PROOF STATE] proof (chain) picking this: dynmethd G statC dynC sig = Some newM dynmethd G statC dynC sig = Some dynM [PROOF STEP] have eq_dynM_newM: "dynM=newM" [PROOF STATE] proof (prove) using this: dynmethd G statC dynC sig = Some newM dynmethd G statC dynC sig = Some dynM goal (1 subgoal): 1. dynM = newM [PROOF STEP] by simp [PROOF STATE] proof (state) this: dynM = newM goal (1 subgoal): 1. \<And>dynMa. \<lbrakk>dynmethd G statC dynC sig = Some dynMa; dynMa \<noteq> statM; G \<turnstile> qmdecl sig dynMa overrides qmdecl sig statM\<rbrakk> \<Longrightarrow> G \<turnstile>Methd sig dynM in dynC dyn_accessible_from accC [PROOF STEP] from dynmethd eq_dynM_newM wf is_cls_statC [PROOF STATE] proof (chain) picking this: dynmethd G statC dynC sig = Some newM dynM = newM wf_prog G is_class G statC [PROOF STEP] have "G\<turnstile>Methd sig dynM member_in dynC" [PROOF STATE] proof (prove) using this: dynmethd G statC dynC sig = Some newM dynM = newM wf_prog G is_class G statC goal (1 subgoal): 1. G \<turnstile>Methd sig dynM member_in dynC [PROOF STEP] by (auto intro: dynmethd_member_in) [PROOF STATE] proof (state) this: G \<turnstile>Methd sig dynM member_in dynC goal (1 subgoal): 1. \<And>dynMa. \<lbrakk>dynmethd G statC dynC sig = Some dynMa; dynMa \<noteq> statM; G \<turnstile> qmdecl sig dynMa overrides qmdecl sig statM\<rbrakk> \<Longrightarrow> G \<turnstile>Methd sig dynM in dynC dyn_accessible_from accC [PROOF STEP] moreover [PROOF STATE] proof (state) this: G \<turnstile>Methd sig dynM member_in dynC goal (1 subgoal): 1. \<And>dynMa. \<lbrakk>dynmethd G statC dynC sig = Some dynMa; dynMa \<noteq> statM; G \<turnstile> qmdecl sig dynMa overrides qmdecl sig statM\<rbrakk> \<Longrightarrow> G \<turnstile>Methd sig dynM in dynC dyn_accessible_from accC [PROOF STEP] from subclseq [PROOF STATE] proof (chain) picking this: G\<turnstile>dynC\<preceq>\<^sub>C statC [PROOF STEP] have "G\<turnstile>dynC\<prec>\<^sub>C statC" [PROOF STATE] proof (prove) using this: G\<turnstile>dynC\<preceq>\<^sub>C statC goal (1 subgoal): 1. G\<turnstile>dynC\<prec>\<^sub>C statC [PROOF STEP] proof (cases rule: subclseq_cases) [PROOF STATE] proof (state) goal (2 subgoals): 1. dynC = statC \<Longrightarrow> G\<turnstile>dynC\<prec>\<^sub>C statC 2. G\<turnstile>dynC\<prec>\<^sub>C statC \<Longrightarrow> G\<turnstile>dynC\<prec>\<^sub>C statC [PROOF STEP] case Eq [PROOF STATE] proof (state) this: dynC = statC goal (2 subgoals): 1. dynC = statC \<Longrightarrow> G\<turnstile>dynC\<prec>\<^sub>C statC 2. G\<turnstile>dynC\<prec>\<^sub>C statC \<Longrightarrow> G\<turnstile>dynC\<prec>\<^sub>C statC [PROOF STEP] assume "dynC=statC" [PROOF STATE] proof (state) this: dynC = statC goal (2 subgoals): 1. dynC = statC \<Longrightarrow> G\<turnstile>dynC\<prec>\<^sub>C statC 2. G\<turnstile>dynC\<prec>\<^sub>C statC \<Longrightarrow> G\<turnstile>dynC\<prec>\<^sub>C statC [PROOF STEP] moreover [PROOF STATE] proof (state) this: dynC = statC goal (2 subgoals): 1. dynC = statC \<Longrightarrow> G\<turnstile>dynC\<prec>\<^sub>C statC 2. G\<turnstile>dynC\<prec>\<^sub>C statC \<Longrightarrow> G\<turnstile>dynC\<prec>\<^sub>C statC [PROOF STEP] from is_cls_statC [PROOF STATE] proof (chain) picking this: is_class G statC [PROOF STEP] obtain c where "class G statC = Some c" [PROOF STATE] proof (prove) using this: is_class G statC goal (1 subgoal): 1. (\<And>c. class G statC = Some c \<Longrightarrow> thesis) \<Longrightarrow> thesis [PROOF STEP] by auto [PROOF STATE] proof (state) this: class G statC = Some c goal (2 subgoals): 1. dynC = statC \<Longrightarrow> G\<turnstile>dynC\<prec>\<^sub>C statC 2. G\<turnstile>dynC\<prec>\<^sub>C statC \<Longrightarrow> G\<turnstile>dynC\<prec>\<^sub>C statC [PROOF STEP] moreover [PROOF STATE] proof (state) this: class G statC = Some c goal (2 subgoals): 1. dynC = statC \<Longrightarrow> G\<turnstile>dynC\<prec>\<^sub>C statC 2. G\<turnstile>dynC\<prec>\<^sub>C statC \<Longrightarrow> G\<turnstile>dynC\<prec>\<^sub>C statC [PROOF STEP] note statM ws dynmethd [PROOF STATE] proof (state) this: methd G statC sig = Some statM ws_prog G dynmethd G statC dynC sig = Some newM goal (2 subgoals): 1. dynC = statC \<Longrightarrow> G\<turnstile>dynC\<prec>\<^sub>C statC 2. G\<turnstile>dynC\<prec>\<^sub>C statC \<Longrightarrow> G\<turnstile>dynC\<prec>\<^sub>C statC [PROOF STEP] ultimately [PROOF STATE] proof (chain) picking this: dynC = statC class G statC = Some c methd G statC sig = Some statM ws_prog G dynmethd G statC dynC sig = Some newM [PROOF STEP] have "newM=statM" [PROOF STATE] proof (prove) using this: dynC = statC class G statC = Some c methd G statC sig = Some statM ws_prog G dynmethd G statC dynC sig = Some newM goal (1 subgoal): 1. newM = statM [PROOF STEP] by (auto simp add: dynmethd_C_C) [PROOF STATE] proof (state) this: newM = statM goal (2 subgoals): 1. dynC = statC \<Longrightarrow> G\<turnstile>dynC\<prec>\<^sub>C statC 2. G\<turnstile>dynC\<prec>\<^sub>C statC \<Longrightarrow> G\<turnstile>dynC\<prec>\<^sub>C statC [PROOF STEP] with neq [PROOF STATE] proof (chain) picking this: newM \<noteq> statM newM = statM [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) using this: newM \<noteq> statM newM = statM goal (1 subgoal): 1. G\<turnstile>dynC\<prec>\<^sub>C statC [PROOF STEP] by (contradiction) [PROOF STATE] proof (state) this: G\<turnstile>dynC\<prec>\<^sub>C statC goal (1 subgoal): 1. G\<turnstile>dynC\<prec>\<^sub>C statC \<Longrightarrow> G\<turnstile>dynC\<prec>\<^sub>C statC [PROOF STEP] next [PROOF STATE] proof (state) goal (1 subgoal): 1. G\<turnstile>dynC\<prec>\<^sub>C statC \<Longrightarrow> G\<turnstile>dynC\<prec>\<^sub>C statC [PROOF STEP] case Subcls [PROOF STATE] proof (state) this: G\<turnstile>dynC\<prec>\<^sub>C statC goal (1 subgoal): 1. G\<turnstile>dynC\<prec>\<^sub>C statC \<Longrightarrow> G\<turnstile>dynC\<prec>\<^sub>C statC [PROOF STEP] then [PROOF STATE] proof (chain) picking this: G\<turnstile>dynC\<prec>\<^sub>C statC [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) using this: G\<turnstile>dynC\<prec>\<^sub>C statC goal (1 subgoal): 1. G\<turnstile>dynC\<prec>\<^sub>C statC [PROOF STEP] . [PROOF STATE] proof (state) this: G\<turnstile>dynC\<prec>\<^sub>C statC goal: No subgoals! [PROOF STEP] qed [PROOF STATE] proof (state) this: G\<turnstile>dynC\<prec>\<^sub>C statC goal (1 subgoal): 1. \<And>dynMa. \<lbrakk>dynmethd G statC dynC sig = Some dynMa; dynMa \<noteq> statM; G \<turnstile> qmdecl sig dynMa overrides qmdecl sig statM\<rbrakk> \<Longrightarrow> G \<turnstile>Methd sig dynM in dynC dyn_accessible_from accC [PROOF STEP] moreover [PROOF STATE] proof (state) this: G\<turnstile>dynC\<prec>\<^sub>C statC goal (1 subgoal): 1. \<And>dynMa. \<lbrakk>dynmethd G statC dynC sig = Some dynMa; dynMa \<noteq> statM; G \<turnstile> qmdecl sig dynMa overrides qmdecl sig statM\<rbrakk> \<Longrightarrow> G \<turnstile>Methd sig dynM in dynC dyn_accessible_from accC [PROOF STEP] from stat_acc wf [PROOF STATE] proof (chain) picking this: G \<turnstile>Methd sig statM of statC accessible_from accC wf_prog G [PROOF STEP] have "G\<turnstile>Methd sig statM in statC dyn_accessible_from accC" [PROOF STATE] proof (prove) using this: G \<turnstile>Methd sig statM of statC accessible_from accC wf_prog G goal (1 subgoal): 1. G \<turnstile>Methd sig statM in statC dyn_accessible_from accC [PROOF STEP] by (blast intro: static_to_dynamic_accessible_from) [PROOF STATE] proof (state) this: G \<turnstile>Methd sig statM in statC dyn_accessible_from accC goal (1 subgoal): 1. \<And>dynMa. \<lbrakk>dynmethd G statC dynC sig = Some dynMa; dynMa \<noteq> statM; G \<turnstile> qmdecl sig dynMa overrides qmdecl sig statM\<rbrakk> \<Longrightarrow> G \<turnstile>Methd sig dynM in dynC dyn_accessible_from accC [PROOF STEP] moreover [PROOF STATE] proof (state) this: G \<turnstile>Methd sig statM in statC dyn_accessible_from accC goal (1 subgoal): 1. \<And>dynMa. \<lbrakk>dynmethd G statC dynC sig = Some dynMa; dynMa \<noteq> statM; G \<turnstile> qmdecl sig dynMa overrides qmdecl sig statM\<rbrakk> \<Longrightarrow> G \<turnstile>Methd sig dynM in dynC dyn_accessible_from accC [PROOF STEP] note override eq_dynM_newM [PROOF STATE] proof (state) this: G \<turnstile> qmdecl sig newM overrides qmdecl sig statM dynM = newM goal (1 subgoal): 1. \<And>dynMa. \<lbrakk>dynmethd G statC dynC sig = Some dynMa; dynMa \<noteq> statM; G \<turnstile> qmdecl sig dynMa overrides qmdecl sig statM\<rbrakk> \<Longrightarrow> G \<turnstile>Methd sig dynM in dynC dyn_accessible_from accC [PROOF STEP] ultimately [PROOF STATE] proof (chain) picking this: G \<turnstile>Methd sig dynM member_in dynC G\<turnstile>dynC\<prec>\<^sub>C statC G \<turnstile>Methd sig statM in statC dyn_accessible_from accC G \<turnstile> qmdecl sig newM overrides qmdecl sig statM dynM = newM [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) using this: G \<turnstile>Methd sig dynM member_in dynC G\<turnstile>dynC\<prec>\<^sub>C statC G \<turnstile>Methd sig statM in statC dyn_accessible_from accC G \<turnstile> qmdecl sig newM overrides qmdecl sig statM dynM = newM goal (1 subgoal): 1. G \<turnstile>Methd sig dynM in dynC dyn_accessible_from accC [PROOF STEP] by (cases dynM,cases statM) (auto intro: dyn_accessible_fromR.Overriding) [PROOF STATE] proof (state) this: G \<turnstile>Methd sig dynM in dynC dyn_accessible_from accC goal: No subgoals! [PROOF STEP] qed [PROOF STATE] proof (state) this: G \<turnstile>Methd sig dynM in dynC dyn_accessible_from accC goal: No subgoals! [PROOF STEP] qed
{"llama_tokens": 8157, "file": null, "length": 76}
[STATEMENT] lemma set_MkIde_elem_of_img: assumes "A \<subseteq> S.Univ" and "S.ide (S.MkIde (elem_of ` A))" shows "S.set (S.MkIde (elem_of ` A)) = A" [PROOF STATE] proof (prove) goal (1 subgoal): 1. S.set (S.MkIde (elem_of ` A)) = A [PROOF STEP] proof - [PROOF STATE] proof (state) goal (1 subgoal): 1. S.set (S.MkIde (elem_of ` A)) = A [PROOF STEP] have "S.Dom (S.MkIde (elem_of ` A)) = elem_of ` A" [PROOF STATE] proof (prove) goal (1 subgoal): 1. S.Dom (S.MkIde (elem_of ` A)) = elem_of ` A [PROOF STEP] by simp [PROOF STATE] proof (state) this: S.Dom (S.MkIde (elem_of ` A)) = elem_of ` A goal (1 subgoal): 1. S.set (S.MkIde (elem_of ` A)) = A [PROOF STEP] moreover [PROOF STATE] proof (state) this: S.Dom (S.MkIde (elem_of ` A)) = elem_of ` A goal (1 subgoal): 1. S.set (S.MkIde (elem_of ` A)) = A [PROOF STEP] have "arr_of ` elem_of ` A = A" [PROOF STATE] proof (prove) goal (1 subgoal): 1. arr_of ` elem_of ` A = A [PROOF STEP] using assms arr_of_elem_of [PROOF STATE] proof (prove) using this: A \<subseteq> S.Univ S.ide (S.MkIde (elem_of ` A)) ?t \<in> S.Univ \<Longrightarrow> arr_of (elem_of ?t) = ?t goal (1 subgoal): 1. arr_of ` elem_of ` A = A [PROOF STEP] by force [PROOF STATE] proof (state) this: arr_of ` elem_of ` A = A goal (1 subgoal): 1. S.set (S.MkIde (elem_of ` A)) = A [PROOF STEP] ultimately [PROOF STATE] proof (chain) picking this: S.Dom (S.MkIde (elem_of ` A)) = elem_of ` A arr_of ` elem_of ` A = A [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) using this: S.Dom (S.MkIde (elem_of ` A)) = elem_of ` A arr_of ` elem_of ` A = A goal (1 subgoal): 1. S.set (S.MkIde (elem_of ` A)) = A [PROOF STEP] using assms Setp_elem_of_img set_char S.ide_MkIde [PROOF STATE] proof (prove) using this: S.Dom (S.MkIde (elem_of ` A)) = elem_of ` A arr_of ` elem_of ` A = A A \<subseteq> S.Univ S.ide (S.MkIde (elem_of ` A)) ?A \<in> S.set ` Collect S.ide \<Longrightarrow> Setp (elem_of ` ?A) S.ide ?a \<Longrightarrow> S.set ?a = arr_of ` S.Dom ?a ?A \<in> Collect Setp \<Longrightarrow> S.ide (S.MkIde ?A) goal (1 subgoal): 1. S.set (S.MkIde (elem_of ` A)) = A [PROOF STEP] by auto [PROOF STATE] proof (state) this: S.set (S.MkIde (elem_of ` A)) = A goal: No subgoals! [PROOF STEP] qed
{"llama_tokens": 1100, "file": "Category3_SetCat", "length": 12}
#!/usr/bin/env python from __future__ import print_function import sys import itertools from copy import deepcopy version_help = "Python 2.7 or 3.4+ required." if sys.version_info[0] == 2: assert sys.version_info[1] == 7, version_help elif sys.version_info[0] == 3: assert sys.version_info[1] >= 4, version_help else: assert False, version_help from math import * golden = (1 + 5**0.5) / 2 # {{{ utility functions def clog(x, base): assert type(x) is int assert x > 0 assert type(base) is int assert base > 1 return int(ceil(log(x,base))) def flog(x, base): assert type(x) is int assert x > 0 assert type(base) is int assert base > 1 return int(floor(log(x,base))) def int2base(x, base): assert type(x) is int assert x >= 0 assert type(base) is int assert base > 1 numerals = '0123456789abcdefghijklmnopqrstuvwxyz' if x == 0: return numerals[0] r = [] while x: r.append(numerals[x % base]) x /= base r.reverse() return ''.join(r) # }}} utility functions # {{{ mappings # Mappings are from leafnode index to function index. def map_fwd(i, width, b): return i def map_pingpong(i, width, b): n = b**width return i*2 if (i < n/2.0) else 2*(n-i)-1 def map_pongping(i, width, b): return int(b**width-((i+1)/2)) if (i % 2) else int(i/2) def map_strideb(i, width, b): n = b**width return ((i*b) % n) + int((i*b) / n) def map_strodeb(i, width, b): n = b**width return (int((i*n) / b) % n) + int(i/b) def map_basebrev(i, width, b): d = int2base(i, b) # Represent in base b str from int. z = ('0'*(width - len(d)) + d)[::-1] # Zero fill and reverse. return int(z, b) # Re-interpret int from base b str. def map_grayfwd(i, width, b): r = 0 for k in range(width): j = int(i/(b**k)) d = (j-int(j/b)) % b r += d * b**k return r def map_grayrev(i, width, b): r = 0 for k in range(width): j = int(i/(b**k)) d = (j-int(j/b)) % b r += d * b**(width-k-1) return r available_mappings = [ (map_fwd, 'FWD'), (map_basebrev, 'BASEBREV'), (map_pingpong, 'PINGPONG'), (map_pongping, 'PONGPING'), (map_strideb, 'STRIDEB'), (map_strodeb, 'STRODEB'), (map_grayfwd, 'GRAYFWD'), (map_grayrev, 'GRAYREV'), ] # These base2 functions are elegant so kept for reference. def map_binrev(i, width, b=0): # b is fixed at 2 r = 0 j = 0 while (j < width): r |= ((i >> j) & 1) << (width - j - 1) j += 1 return r def map_bingrayfwd(i, width, b=0): # b is fixed at 2 return (i ^ (i >> 1)) def map_bingrayrev(i, width, b=0): # b is fixed at 2 r = 0 j = 0 while (j < width): r |= (((i ^ (i >> 1)) >> j) & 1) << (width - j - 1) j += 1 return r def map_alt_grayfwd(i, width, b): n = b**width gb = b**2 numerals = '0123456789abcdefghijklmnopqrstuvwxyz' gnumerals = ''.join([numerals[(j-int(j/b)) % b] for j in range(gb)]) if i == 0: return 0 r = [] while i > 0: r.append(gnumerals[i % gb]) i /= b r += numerals[0]*(width-len(r)) r.reverse() r = ''.join(r) return int(r, b) def map_alt_grayrev(i, width, b): n = b**width gb = b**2 numerals = '0123456789abcdefghijklmnopqrstuvwxyz' gnumerals = ''.join([numerals[(j-int(j/b)) % b] for j in range(gb)]) if i == 0: return 0 r = [] while i > 0: r.append(gnumerals[i % gb]) i /= b r += numerals[0]*(width-len(r)) #r.reverse() r = ''.join(r) return int(r, b) def print_maps(N_INs, bases, mappings, fd): for w in N_INs: for b in bases: n = clog(w, b) print("Maps for b=%d, w=%d." % (b, w), file=fd) fmt = " | ".join(['{:^9}' for m in mappings]) head_line = [nm for fn, nm in mappings] print(fmt.format(*head_line), file=fd) print(fmt.format(*['-'*9 for m in mappings]), file=fd) for i in range(0, w): data_line = [str(fn(i, n, b)) for fn, nm in mappings] print(fmt.format(*data_line), file=fd) print('', file=fd) # }}} mappings def calculate_ops(b, w, fn): # {{{ '''Return a list of operations with the index of the output wire (or None), and the indices (or None) of the input wires. If all input wires are None then the operation can be removed. ''' n = b**clog(w, b) n_levels = clog(w, b) n_ops = int('1'*(n_levels), b) # basic tree n_wires = int('1'*(n_levels+1), b) # basic tree # Ops are numbered o with root node (bottom of tree) having the number 0. # outwires are numbered the same as their respective ops. # inwires are numbered as o*b+1..o*b+1+b # This definition can be used to generate basic tree for verilog etc. opnum_root = 0 opnum_leafmax = n_ops - 1 opnum_leafmin = opnum_leafmax / b # Calculate reduced tree connections. # Each op is (int/None outwire, [ints/Nones inwires]) ops = [None for o in range(n_ops)] for o in range(n_ops)[::-1]: basic_inwires = range(o*b+1, o*b+1+b) if o >= opnum_leafmin: inwires = [i if fn(i-n_ops, n_levels, b) < w else None \ for i in basic_inwires] else: inwires = [ops[i][0] for i in basic_inwires] connected = [0 if (i is None) else 1 for i in inwires] n_connected = sum(connected) if n_connected == 0: outwire = None elif n_connected == 1: idx = connected.index(1) outwire = inwires[idx] # Connect lone wire through. inwires[idx] = None # and disconnect op above. else: outwire = o # Note that since inwires may have changed a recalculation of # n_connected might yield a different answer. ops[o] = (outwire, tuple(inwires)) return tuple(ops) # }}} calculate_ops def calculate_opsizes(ops, b, a): # {{{ '''Take a list of operations with the index of the output wire (or None), Return the size/cost/delay/capacitance associated with each operation as a list, for a particular value of alpha. ''' # Get the number of connected inwires for each operation as a list. opins = [sum([0 if inwire is None else 1 for inwire in o[1]]) \ for o in ops] #print(opins) ret = [] for o in opins: unoptimisable = int(o > 1) fixalpha = a * unoptimisable c = unoptimisable * (o/float(b))**fixalpha ret.append(c) return ret # }}} calculate_opsizes def calculate_oppaths(b, w, fn): # {{{ '''Take the base and width of inputs. Return the indices of the operations associated with each input. Paths are stored from leaf to root. ''' n = b**clog(w, b) n_levels = clog(w, b) mapins = tuple(fn(i, n_levels, b) for i in range(n)) #print(mapins) usedins = tuple(i for i,j in enumerate(mapins) if j < w) #print(usedins) oppaths = tuple([ int(i/b**(l+1)) + int('0'+'1'*(n_levels-l-1), b) \ for l in range(n_levels) ] \ for i in usedins) #print(oppaths) #assert len(oppaths) == w #for p in oppaths: # assert len(tuple(p)) == n_levels return oppaths # }}} calculate_oppaths def calculate_t(oppaths, opsizes): # {{{ t_per_leafnode = [sum([opsizes[op] for op in p]) for p in oppaths] #print(t_per_leafnode) t = sum(t_per_leafnode) return t # }}} calculate_t def calculate_results(bases, N_INs, mappings, alphas): # {{{ from joblib import Parallel, delayed print("Initializing results structure...") # Key order is always b,w,nm,a, # Always store as dicts rather than lists to ease merging in large runs. results = {} for b in bases: results[b] = {} for w in N_INs: n = b**clog(w, b) # Round up w to next power of b. p = b**flog(w, b) # Round down w to previous power of b. depth = clog(w, b) # Depth of op tree, #levels. # Total number of operations in tree. # These will be optimised away for non Power-Of-b values of w. # http://mathworld.wolfram.com/Rule50.html if depth > 1: o_max = (b**depth - 1) / (b - 1) else: o_max = 1 o_min = (w-2)/(b-1) + 1 # Number of unoptimisable operations. assert o_min <= o_max, "o_min=%d, o_max=%d" % (o_min, o_max) # t for maximally unbalanced. t_max = int((w * (w+1) * 0.5)-1) # t for maximally balanced (approx). t_bal = w*log(w, b) #n_floor = 2*p-w # base2 only. n_floor = p-ceil(float(w-p)/float(b-1)) n_ceil = w - n_floor # t for balanced (exact integer). t_min = int(n_floor*flog(w, b) + n_ceil*clog(w, b)) results[b][w] = { "n": n, "p": p, "depth": depth, "o_max": o_max, "o_min": o_min, "t_max": t_max, "t_bal": t_bal, "t_min": t_min, } for fn, nm in mappings: results[b][w][nm] = { 't': {}, 'u': {}, } # Temporary result structures. tmp_ops = {b: {w: {} \ for w in N_INs} \ for b in bases} tmp_oppaths = {b: {w: {} \ for w in N_INs} \ for b in bases} tmp_opsizes = {b: {w: {nm: {} \ for fn,nm in mappings} \ for w in N_INs} \ for b in bases} # Fill in ops and oppaths print("Calculating ops... over cross of (b,w,mapping)") cpi_bwm = tuple(itertools.product(bases, N_INs, [m for m in mappings])) cpo_ops = Parallel(n_jobs=-2) \ (delayed(calculate_ops)(b, w, fn) \ for (b, w, (fn, nm)) in cpi_bwm) print("Storing ops to tmp structures...") for i, (b, w, (fn, nm)) in enumerate(cpi_bwm): tmp_ops[b][w][nm] = cpo_ops[i] print("Calculating oppaths... over cross of (b,w,mapping)") cpo_oppaths = Parallel(n_jobs=-2) \ (delayed(calculate_oppaths)(b, w, fn) \ for (b, w, (fn, nm)) in cpi_bwm) print("Storing oppaths to tmp structures...") for i, (b, w, (fn, nm)) in enumerate(cpi_bwm): tmp_oppaths[b][w][nm] = cpo_oppaths[i] # Fill in opsizes, depends on tmp_ops if 2 in bases: print("Calculating opsizes (b==2)... over cross of (w,mapping)") cpi_wm = tuple(itertools.product(N_INs, [m for m in mappings])) cpo_opsizes2 = Parallel(n_jobs=-2) \ (delayed(calculate_opsizes)(tmp_ops[2][w][nm], 2, 0.0) \ for (w, (fn, nm)) in cpi_wm) print("Storing opsizes to tmp structure (b==2)...") for i, (w, (fn, nm)) in enumerate(cpi_wm): tmp_opsizes[2][w][nm] = cpo_opsizes2[i] non2_bases = [b for b in bases if b != 2] cpi_bwma = tuple(itertools.product(non2_bases, N_INs, [m for m in mappings], alphas)) if len(non2_bases) != 0: print("Calculating opsizes (b!=2)... over cross of (b,w,mapping,a)") cpo_opsizes = Parallel(n_jobs=-2) \ (delayed(calculate_opsizes)(tmp_ops[b][w][nm], b, a) \ for (b, w, (fn, nm), a) in cpi_bwma) print("Storing opsizes to tmp structure (b!=2)...") for i, (b, w, (fn, nm), a) in enumerate(cpi_bwma): tmp_opsizes[b][w][nm][a] = cpo_opsizes[i] # Fill in t, depends on tmp_oppaths, tmp_opsizes if 2 in bases: print("Calculating t (b==2)... over cross of (w,mapping)") cpo_t2 = Parallel(n_jobs=-2) \ (delayed(calculate_t)(tmp_oppaths[2][w][nm], tmp_opsizes[2][w][nm]) \ for (w, (fn, nm)) in cpi_wm) print("Storing t to results structure (b==2)...") for i, (w, (fn, nm)) in enumerate(cpi_wm): results[2][w][nm]["t"] = cpo_t2[i] if len(non2_bases) != 0: print("Calculating t (b!=2)... over cross of (b,w,mapping,a)") cpo_t = Parallel(n_jobs=-2) \ (delayed(calculate_t)(tmp_oppaths[b][w][nm], tmp_opsizes[b][w][nm][a]) \ for (b, w, (fn, nm), a) in cpi_bwma) print("Storing t to results structure...") for i, (b, w, (fn, nm), a) in enumerate(cpi_bwma): results[b][w][nm]["t"][a] = cpo_t[i] # Derive further results. # This is serial but could be parallel, overhead permitting. print("Calculating u...") for b in bases: for w in N_INs: for fn, nm in mappings: if b == 2: t = results[2][w][nm ]["t"] t_fwd = results[2][w]["FWD"]["t"] t_min = results[2][w]["t_min"] u = w - w * float(t) / float(t_fwd) v = w - w * float(t) / float(t_min) results[2][w][nm]["u"] = u results[2][w][nm]["v"] = v else: for a in alphas: t = results[b][w][nm ]["t"][a] t_fwd = results[b][w]["FWD"]["t"][a] u = w - w * float(t) / float(t_fwd) results[b][w][nm]["u"][a] = u print("Calculating u_diff...") for b in bases: for w in N_INs: if b == 2: u_values = [results[2][w][nm]["u"] for fn, nm in mappings] u_hi = max(u_values) u_lo = min(u_values) u_diff = u_hi - u_lo results[2][w]["u_hi"] = u_hi results[2][w]["u_lo"] = u_lo results[2][w]["u_diff"] = u_diff else: results[b][w]["u_hi"] = {} results[b][w]["u_lo"] = {} results[b][w]["u_diff"] = {} for a in alphas: u_values = [results[b][w][nm]["u"][a] for fn, nm in mappings] u_hi = max(u_values) u_lo = min(u_values) u_diff = u_hi - u_lo results[b][w]["u_hi"][a] = u_hi results[b][w]["u_lo"][a] = u_lo results[b][w]["u_diff"][a] = u_diff print("Returning results...") return results # }}} calculate_results def dump_results(results, fd): # {{{ import yaml yaml.safe_dump(results, fd) return # }}} dump_results def load_results(fd): # {{{ import yaml results = yaml.safe_load(fd) return results # }}} load_results def print_tables(alphas, N_INs, bases, mappings, fd, results): # {{{ for a in alphas: for b in bases: table_headers = [ "w", "depth", "o_max", "o_min", "t_bal", "t_min", "t_max", ] map_header_fmt = "{:43}" table_data_fmt = ' '.join([ '{:^5}', # w '{:^5}', # depth '{:^5}', # o_max '{:^5}', # o_min '{:^7.3f}', # t_bal '{:^5}', # t_min '{:^5}', # t_max ]) table_header_fmt = ' '.join([ '{:5}', # w '{:5}', # depth '{:5}', # o_max '{:5}', # o_min '{:7}', # t_bal '{:5}', # t_min '{:5}', # t_max ]) mapping_headers = ["b=%d,alpha=%0.3f" % (b, a)] data_w = 9 for fn, nm in mappings: table_headers.append("t") table_headers.append("u") map_header_fmt += " | {:%d}" % (2*data_w + 1) table_header_fmt += " | {:%d} {:%d}" % (data_w, data_w) table_data_fmt += " | {:^%d.3f} {:^%d.3f}" % (data_w, data_w) mapping_headers.append(nm) table_data = [] for w in N_INs: table_data_line = [ w, results[b][w]["depth"], results[b][w]["o_max"], results[b][w]["o_min"], results[b][w]["t_bal"], results[b][w]["t_min"], results[b][w]["t_max"], ] for fn, nm in mappings: if b==2: table_data_line.append(results[b][w][nm]["t"]) table_data_line.append(results[b][w][nm]["u"]) else: table_data_line.append(results[b][w][nm]["t"][a]) table_data_line.append(results[b][w][nm]["u"][a]) table_data.append(table_data_line) print('', file=fd) print(map_header_fmt.format(*mapping_headers), file=fd) print(table_header_fmt.format(*table_headers), file=fd) print(table_header_fmt.format(*[ '-'*5, # w '-'*5, # depth '-'*5, # o_max '-'*5, # o_min '-'*7, # t_bal '-'*5, # t_min '-'*5, # t_max '-'*9, '-'*9, # t/u0 FWD '-'*9, '-'*9, # t/u1 BASEBREV '-'*9, '-'*9, # t/u2 PINGPONG '-'*9, '-'*9, # t/u3 PONGPING '-'*9, '-'*9, # t/u4 STRIDEB '-'*9, '-'*9, # t/u5 STRODEB '-'*9, '-'*9, # t/u6 GRAYFWD '-'*9, '-'*9, # t/u7 GRAYREV ]), file=fd) for line in table_data: print(table_data_fmt.format(*line), file=fd) # }}} print_tables def plot_graphs(alphas, N_INs, bases, mappings, results, interactive=False, png=False, svg=False): # {{{ markers = [ '-ob', # solid line, circle marker, blue '-sg', # solid line, square marker, green '-xr', # solid line, x marker, red '-dc', # solid line, thin_diamond marker, cyan '-*m', # solid line, star, magenta '-hy', # solid line, hexagon1 marker, yellow '-+k', # solid line, plus marker, black '-+b', # solid line, plus marker, blue '-1g', # solid line, plus marker, green ] import matplotlib.pyplot as plt for a in alphas: for b in bases: title = "b=%d,alpha=%0.3f" % (b, a) filename = "img/%s" % title.replace('.', '_') f = plt.figure(dpi=96, figsize=(16,9)) f.canvas.set_window_title(title) plt.title(title) plt.grid(True) plt.xticks([w for w in N_INs if w % 8 == 0]) plt.xlim(min(N_INs)-1, max(N_INs)+1) plt.xlabel("w") plt.ylabel("u") for map_i, (fn, nm) in enumerate(mappings): if nm == "FWD": continue if b == 2: u = [results[b][w][nm]['u'] for w in N_INs] else: u = [results[b][w][nm]['u'][a] for w in N_INs] plt.plot(N_INs, u, markers[map_i], label=nm) #plt.ylim(ymin=0.0) #plt.yscale("log") #plt.yscale("symlog", linthreshy=1.0) #plt.yscale("logit") plt.legend(loc=2) plt.tight_layout() if png: plt.savefig("%s.png" % filename) if svg: plt.savefig("%s.svg" % filename) for a in alphas: title = "u_diff,alpha=%0.3f" % a filename = "img/%s" % title.replace('.', '_') f = plt.figure(dpi=96, figsize=(16,9)) f.canvas.set_window_title(title) plt.title(title) plt.grid(True) plt.xticks([w for w in N_INs if w % 8 == 0]) plt.xlim(min(N_INs)-1, max(N_INs)+1) plt.xlabel("w") plt.ylabel("u") for base_i, b in enumerate(bases): if b == 2: u_diff = [results[b][w]['u_diff'] for w in N_INs] else: u_diff = [results[b][w]['u_diff'][a] for w in N_INs] plt.plot(N_INs, u_diff, markers[base_i], label="b=%d" % b) plt.legend(loc=2) plt.tight_layout() if png: plt.savefig("%s.png" % filename) if svg: plt.savefig("%s.svg" % filename) if 2 in bases: # {{{ title = "b=2" filename = "img/%s" % title f = plt.figure(dpi=96, figsize=(16,9)) f.canvas.set_window_title(title) plt.title(title) plt.grid(True) plt.xticks([w for w in N_INs if w % 8 == 0]) plt.xlim(min(N_INs)-1, max(N_INs)+1) plt.xlabel("w") plt.ylabel("v") for map_i, (fn, nm) in enumerate(mappings): v = [results[2][w][nm]['v'] for w in N_INs] plt.plot(N_INs, v, markers[map_i], label=nm) #plt.ylim(ymin=0.0) #plt.yscale("log") #plt.yscale("symlog", linthreshy=1.0) #plt.yscale("logit") plt.legend(loc=2) plt.tight_layout() if png: plt.savefig("%s.png" % filename) if svg: plt.savefig("%s.svg" % filename) # }}} 2 in bases if interactive: plt.show() # }}} plot_graphs def plot_alpha3d(alphas, N_INs, bases, mappings, results, interactive=False, png=False): # {{{ # TODO: This is unusable. Use mayavi instead. markers = [ '-ob', # solid line, circle marker, blue '-sg', # solid line, square marker, green '-xr', # solid line, x marker, red '-dc', # solid line, thin_diamond marker, cyan '-*m', # solid line, star, magenta '-hy', # solid line, hexagon1 marker, yellow '-+k', # solid line, plus marker, black ] import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import numpy as np def z_pt(b, nm, a, w): w_index = N_INs.index(w) if nm in algos: return results[b][nm][a]["u"][w_index] else: return results[b][nm][w_index] for b in bases: title = "b=%d" % b filename = "img/%s" % title f = plt.figure(dpi=96, figsize=(16,9)) f.canvas.set_window_title(title) ax = f.gca(projection='3d') X, Y = np.meshgrid(N_INs, alphas) Z = {} plt.title(title) plt.grid(True) plt.xticks([w for w in N_INs if w % 8 == 0]) plt.xlim(min(N_INs)-1, max(N_INs)+1) plt.xlabel("w") plt.ylabel("a") ax.set_zlabel("u") # TODO: matplotlib doesn't overlap surfaces. for map_i, (fn, nm) in enumerate(mappings): Z[nm] = np.array([z_pt(b, nm, a, w) \ for w,a in zip(np.ravel(X), np.ravel(Y))]).reshape(X.shape) ax.plot_surface(X, Y, Z[nm], label=nm) plt.ylim(ymin=0.0) #plt.yscale("log") #plt.yscale("symlog", linthreshy=1.0) #plt.yscale("logit") #plt.legend() plt.tight_layout() #if png: # plt.savefig("%s.png" % filename) if interactive: plt.show() # }}} plot_alpha3d if __name__ == "__main__": # {{{ import argparse parser = argparse.ArgumentParser( formatter_class = argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("-o", "--outfile", type=argparse.FileType('w'), default=sys.stdout, help="Output file for STDOUT.") parser.add_argument("-l", "--load", type=argparse.FileType('r'), default=None, help="File containing pre-computed results.") parser.add_argument("-d", "--dump", default=False, action='store_true', help="Dump results in YAML format to outfile.") parser.add_argument("-z", "--algorithms", type=str, default=','.join([nm for fn, nm in available_mappings]), help="Port-mapping algorithms to use, comma separated.") parser.add_argument("-m", "--maps", default=False, action='store_true', help="Print mapping tables to outfile.") parser.add_argument("-t", "--tables", default=False, action='store_true', help="Print result tables to outfile.") parser.add_argument("-i", "--interactive", default=False, action='store_true', help="Show interactive plot figures.") parser.add_argument("-p", "--png", default=False, action='store_true', help="Save PNG figures.") parser.add_argument("-s", "--svg", default=False, action='store_true', help="Save SVG figures.") parser.add_argument("-b", "--base", type=int, default=0, help="Base for calculations. 0 means [bmin..bmax].") parser.add_argument("--bmin", type=int, default=2, help="Minimum base for calculations.") parser.add_argument("--bmax", type=int, default=5, help="Maximum base for calculations.") parser.add_argument("--wmin", type=int, default=2, help="Minimum width for calculations.") parser.add_argument("--wmax", type=int, default=20, help="Maximum width for calculations.") parser.add_argument("-w", "--width", type=int, default=0, help="Use only one width. 0 means [wmin..wmax]") parser.add_argument("-a", "--alpha", type=float, default=golden, help="Non-linear operation size constant.") parser.add_argument("--alpha3d", default=False, action='store_true', help="3D plot over range of alphas.") parser.add_argument("--amin", type=float, default=0.0, help="Minimum alpha for surface plot. Use --alpha3d") parser.add_argument("--amax", type=float, default=5, help="Maximum alpha for surface plot. Use --alpha3d") parser.add_argument("--alen", type=int, default=20, help="Number of alpha values. Use --alpha3d") args = parser.parse_args() N_INs = [args.width] if args.width != 0 else range(args.wmin, args.wmax+1) bases = [args.base] if args.base != 0 else range(args.bmin, args.bmax+1) algos = [a.upper() for a in args.algorithms.split(',')] if "FWD" not in algos: algos.append("FWD") mappings = [m for m in available_mappings if m[1] in algos] alphas = [args.alpha] if args.alpha3d: alpha_step = (args.amax - args.amin) / args.alen alphas += [round(args.amin + i*alpha_step, 4) for i in range(args.alen)] if args.maps: print_maps(N_INs, bases, mappings, args.outfile) elif args.load is not None: results = load_results(args.load) else: results = calculate_results(bases, N_INs, mappings, alphas) if args.dump: dump_results(results, args.outfile) if args.tables: print_tables(alphas, N_INs, bases, mappings, args.outfile, results) elif (args.interactive or args.png or args.svg) and not args.alpha3d: plot_graphs(alphas, N_INs, bases, mappings, results, interactive=args.interactive, png=args.png, svg=args.svg) elif args.alpha3d and (args.interactive or args.png): plot_alpha3d(alphas, N_INs, bases, mappings, results, interactive=args.interactive, png=args.png) # }}} main
{"hexsha": "f67d9c39b1d02e6a13c1e16ba2b0e547cd6e27ec", "size": 28826, "ext": "py", "lang": "Python", "max_stars_repo_path": "dmppl/experiments/treebalance/treebalance.py", "max_stars_repo_name": "DaveMcEwan/dmppl", "max_stars_repo_head_hexsha": "68e8a121d4591360080cd40121add1796ae48a1b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-05-05T19:46:43.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-05T19:46:43.000Z", "max_issues_repo_path": "dmppl/experiments/treebalance/treebalance.py", "max_issues_repo_name": "DaveMcEwan/dmppl", "max_issues_repo_head_hexsha": "68e8a121d4591360080cd40121add1796ae48a1b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dmppl/experiments/treebalance/treebalance.py", "max_forks_repo_name": "DaveMcEwan/dmppl", "max_forks_repo_head_hexsha": "68e8a121d4591360080cd40121add1796ae48a1b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.9063926941, "max_line_length": 81, "alphanum_fraction": 0.4903906196, "include": true, "reason": "import numpy", "num_tokens": 7832}
""" ====================================== Decision Tree Regression with AdaBoost ====================================== A decision tree is boosted using the AdaBoost.R2 [1]_ algorithm on a 1D sinusoidal dataset with a small amount of Gaussian noise. 299 boosts (300 decision trees) is compared with a single decision tree regressor. As the number of boosts is increased the regressor can fit more detail. .. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997. """ print(__doc__) # Author: Noel Dawe <noel.dawe@gmail.com> # # License: BSD 3 clause # importing necessary libraries import numpy as np import matplotlib.pyplot as plt from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import AdaBoostRegressor # Create the dataset rng = np.random.RandomState(1) X = np.linspace(0, 6, 100)[:, np.newaxis] y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0]) # Fit regression model regr_1 = DecisionTreeRegressor(max_depth=4) regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4), n_estimators=300, random_state=rng) regr_1.fit(X, y) regr_2.fit(X, y) # Predict y_1 = regr_1.predict(X) y_2 = regr_2.predict(X) # Plot the results plt.figure() plt.scatter(X, y, c="k", label="training samples") plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2) plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2) plt.xlabel("data") plt.ylabel("target") plt.title("Boosted Decision Tree Regression") plt.legend() plt.show()
{"hexsha": "659a2a5944ea6864a9923b7df5d6abf615262db1", "size": 1531, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/ensemble/plot_adaboost_regression.py", "max_stars_repo_name": "talahajeer/scikit-learn", "max_stars_repo_head_hexsha": "d66b42708a5912039740cd08f747229433e579b5", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 27, "max_stars_repo_stars_event_min_datetime": "2015-01-22T22:30:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-15T07:33:06.000Z", "max_issues_repo_path": "examples/ensemble/plot_adaboost_regression.py", "max_issues_repo_name": "talahajeer/scikit-learn", "max_issues_repo_head_hexsha": "d66b42708a5912039740cd08f747229433e579b5", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 26, "max_issues_repo_issues_event_min_datetime": "2019-11-11T18:17:02.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-14T02:57:37.000Z", "max_forks_repo_path": "examples/ensemble/plot_adaboost_regression.py", "max_forks_repo_name": "talahajeer/scikit-learn", "max_forks_repo_head_hexsha": "d66b42708a5912039740cd08f747229433e579b5", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 25, "max_forks_repo_forks_event_min_datetime": "2015-07-30T13:47:25.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-03T07:48:38.000Z", "avg_line_length": 27.8363636364, "max_line_length": 78, "alphanum_fraction": 0.6930111039, "include": true, "reason": "import numpy", "num_tokens": 417}
import matplotlib.pyplot as plt import numpy as np from numpy.linalg import inv import matplotlib.colors as colors from matplotlib import cm from matplotlib import rc from matplotlib import rcParams __author__ = 'ernesto' # if use latex or mathtext rc('text', usetex=True) rcParams['text.latex.preamble']=[r"\usepackage{amsmath}"] # colors from coolwarm cNorm = colors.Normalize(vmin=0, vmax=1) scalarMap = cm.ScalarMappable(norm=cNorm, cmap=cm.coolwarm) col11 = scalarMap.to_rgba(0) col12 = scalarMap.to_rgba(0.15) col21 = scalarMap.to_rgba(1) col22 = scalarMap.to_rgba(0.9) def fun_H(s): rx = s[0] ry = s[1] R2 = rx ** 2 + ry ** 2 return np.array([[rx / np.sqrt(R2), ry / np.sqrt(R2), 0, 0], [-ry / R2, rx / R2, 0, 0]]) def fun_h(s): rx = s[0] ry = s[1] return [np.sqrt(rx ** 2 + ry ** 2), np.arctan2(ry, rx)] ####### Parámetros ####### # número de muestras N = 100 # delta - intevalo de tiempo entre muestras D = 1 # velocidades vx = -0.2 vy = 0.2 # condiciones iniciales del estado, s[-1] rx_i = 10 ry_i = -5 s_i = [rx_i, ry_i, vx, vy] # número de parámetros p = 4 # matriz de transcición de estados A = np.array([[1, 0, D, 0], [0, 1, 0, D], [0, 0, 1, 0], [0, 0, 0, 1]]) # media de u mu_u = [0, 0, 0, 0] # covarianza de u var_u = 0.0001 Q = np.zeros((p, p)) Q[2, 2] = var_u Q[3, 3] = var_u # varianza de R[n] y beta[n] var_R = 0.1 var_B = 0.01 C = [[var_R, 0], [0, var_B]] # condiciones iniciales del filtro de Kalman # s[-1|-1] s_est_i = [5, 5, 0, 0] # M[-1|-1] C_s_i = 100 * np.eye(p) ### Fin de parámetros ### # seed para graficas np.random.seed(29) # vector de muestras n = np.arange(N) # trayectoria ideal rx_ideal =rx_i + vx * n ry_ideal = ry_i + vy * n # trayectoria verdadera - proceso de Gauss-Markov s[n] = As[n-1] + u[n] s = np.zeros((p, N)) # para almacenar los valores s_prev = s_i for ni in n: u = np.random.multivariate_normal(mu_u, Q, 1)[0] s_prev = A @ s_prev + u s[:, ni] = s_prev rx_true = s[0, :] ry_true = s[1, :] # construcción de la distancia y el ángulo a partir de rx[n] y ry[n] R = np.sqrt(np.square(rx_true) + np.square(ry_true)) B = np.arctan2(ry_true, rx_true) # observaciones ruidosas hat_R = R + np.sqrt(var_R) * np.random.randn(N) hat_B = B + np.sqrt(var_B) * np.random.randn(N) # construcción del vector x de observacion x = np.array([hat_R, hat_B]) #x = [hat_R, hat_B] # filtro de kalman # variables para guardar los resultados s_ests = np.zeros((p, N)) Ms = np.zeros((p, N)) s_est = s_est_i M_est = C_s_i for ni in n: s_pred = A @ s_est M_pred = A @ M_est @ A.T + Q H = fun_H(s_pred) K = M_pred @ H.T @ inv(C + H @ M_pred @ H.T) s_est = s_pred + K @ (x[:, ni] - fun_h(s_pred)) M_est = (np.eye(p) - K @ H) @ M_pred # se salvan los resultados s_ests[:, ni] = s_est.ravel() Ms[:, ni] = np.diag(M_est) fs = 12 # observaciones ruidosas en coordenadas cartesianas - solo para gráfica hat_rx = hat_R * np.cos(hat_B) hat_ry = hat_R * np.sin(hat_B) # estimaciones con el valor inicial s_ests = np.insert(s_ests, 0, s_est_i, axis=1) fig = plt.figure(0, figsize=(9, 3), frameon=False) ax = plt.subplot2grid((1, 8), (0, 0), rowspan=1, colspan=4) plt.plot(rx_ideal, ry_ideal, color=col21, label='$\mathrm{Posici\\acute{o}n\;ideal}$') plt.plot(rx_true, ry_true, color='r', label='$\mathrm{Posici\\acute{o}n\;verdadera}$') plt.text(rx_true[0]+1, ry_true[0]-1.5, '$\mathrm{Inicio}$', fontsize=fs, ha='center', va='top') plt.text(rx_true[-1]-1, ry_true[-1]+0.8, '$\mathrm{Fin}$', fontsize=fs, ha='center', va='bottom') leg = plt.legend(loc=3, frameon=False, fontsize=fs) plt.axis([-22, 16, -10, 21]) plt.xlabel('$r_x[n]$', fontsize=fs) plt.ylabel('$r_y[n]$', fontsize=fs) ax = plt.subplot2grid((1, 8), (0, 4), rowspan=1, colspan=4) plt.plot(rx_true, ry_true, color='r', label='$\mathrm{Posici\\acute{o}n\;verdadera}$') plt.plot(hat_rx, hat_ry, color=col12, label='$\mathrm{Posici\\acute{o}n\;medida\;}(\mathbf{x}[n])$', zorder=0) leg = plt.legend(loc=3, frameon=False, fontsize=fs) plt.axis([-22, 16, -10, 21]) ax.set_yticklabels([]) plt.xlabel('$r_x[n]$', fontsize=fs) plt.savefig('example_13_4_tracks.pdf', bbox_inches='tight') fig = plt.figure(1, figsize=(9, 3), frameon=False) ax = plt.subplot(121) plt.plot(n, R, color='k', label='$R[n]$') plt.plot(n, hat_R, color='r', label='$\hat{R}[n]$') leg = plt.legend(loc=2, frameon=False, fontsize=fs) plt.axis([n[0], n[-1], 0, 28]) plt.xlabel('$n$', fontsize=fs) plt.ylabel('$\mathrm{Distancia\;(metros)}$', fontsize=fs) ax = plt.subplot(122) plt.plot(n, np.rad2deg(B), color='k', label='$\\beta[n]$') plt.plot(n, np.rad2deg(hat_B), color='r', label='$\hat{\\beta}[n]$') leg = plt.legend(loc=2, frameon=False, fontsize=fs) plt.axis([n[0], n[-1], -40, 160]) plt.xlabel('$n$', fontsize=fs) plt.ylabel('$\mathrm{\\acute{A}ngulo\;(grados)}$', fontsize=fs) plt.savefig('example_13_4_observations.pdf', bbox_inches='tight') ax = plt.figure(2, figsize=(9, 5), frameon=False) plt.plot(rx_true, ry_true, color='r', label='$\mathrm{Posici\\acute{o}n\;verdadera}$') plt.plot(hat_rx, hat_ry, color=col12, label='$\mathrm{Posici\\acute{o}n\;medida\;}(\mathbf{x}[n])$', zorder=0) plt.plot(s_ests[0, :], s_ests[1, :], color='k', label='$\mathrm{Estimador\;del\;filtro\;de\;Kalman\;extendido}$') plt.axis([-22, 16, -10, 21]) plt.xlabel('$r_x[n]$', fontsize=fs) plt.ylabel('$r_y[n]$', fontsize=fs) leg = plt.legend(loc=1, frameon=False, fontsize=fs) plt.savefig('example_13_4_kalman_estimator.pdf', bbox_inches='tight') fig = plt.figure(3, figsize=(9, 3), frameon=False) ax = plt.subplot2grid((1, 2), (0, 0), rowspan=1, colspan=1) plt.plot(n, Ms[0, :], color='k') plt.axis([n[0], n[-1], 0, 0.8]) plt.xlabel('$n$', fontsize=fs) plt.title('$\mathrm{MSE\;m\\acute{\i}nimo\;para\;}r_x[n]$', fontsize=fs) ax = plt.subplot2grid((1, 2), (0, 1), rowspan=1, colspan=1) plt.plot(n, Ms[1, :], color='k', label='$\\beta[n]$') plt.axis([n[0], n[-1], 0, 3.5]) plt.xlabel('$n$', fontsize=fs) plt.title('$\mathrm{MSE\;m\\acute{\i}nimo\;para\;}r_y[n]$', fontsize=fs) #ax.yaxis.tick_right() #ax.yaxis.set_label_position("right") plt.savefig('example_13_4_minimum_mse.pdf', bbox_inches='tight') plt.show()
{"hexsha": "826b13ca1296450ec18ccbae97e762cefc9e596d", "size": 6150, "ext": "py", "lang": "Python", "max_stars_repo_path": "figuras/PycharmKayStatisticalReport/example_13_4_graphs.py", "max_stars_repo_name": "bor9/estudiando_el_kay", "max_stars_repo_head_hexsha": "6e07908b8b0b5a5166dadce30001e6100e8304c3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "figuras/PycharmKayStatisticalReport/example_13_4_graphs.py", "max_issues_repo_name": "bor9/estudiando_el_kay", "max_issues_repo_head_hexsha": "6e07908b8b0b5a5166dadce30001e6100e8304c3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "figuras/PycharmKayStatisticalReport/example_13_4_graphs.py", "max_forks_repo_name": "bor9/estudiando_el_kay", "max_forks_repo_head_hexsha": "6e07908b8b0b5a5166dadce30001e6100e8304c3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-02T05:27:27.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-02T05:27:27.000Z", "avg_line_length": 30.9045226131, "max_line_length": 113, "alphanum_fraction": 0.6432520325, "include": true, "reason": "import numpy,from numpy", "num_tokens": 2286}
# Code by Denis Zahariev(DeniBademi) 2021 # Made with <3 and python # Email: denis.zaharievv@gmail.com from numpy import sin, cos import numpy as np import matplotlib.pyplot as plt import scipy.integrate as integrate import matplotlib.animation as animation import math from scipy.integrate import quad delay = 10 class drawingMachine: trail_length = 300 def __init__(self, c): self.c=c self.x_data = [] self.y_data = [] def build(self, tl,n): self.scenario = [] self.trail_length = int(tl/1.1) for t in range(tl): self.scenario.append([]) # frame self.scenario[t-1].append([]) # dimension self.scenario[t-1].append([]) z = 0 + 0*1j for i in sum(zip(range(n+1, 2*n+1), range(n-1, -1, -1)), (n,)): old_z = z z += np.exp(2*np.pi*1j*(i-n)*t/tl)*self.c[i] self.scenario[t-1][0].append(z.real) self.scenario[t-1][1].append(z.imag) def animateF(drawingMachine,tl): global ani fig = plt.figure() ax = fig.add_subplot(111, autoscale_on=False, xlim=(-6, 6), ylim=(-6, 6)) history, = ax.plot([1], [1], 'r-', lw=1) ax.set_aspect('equal') ax.grid() axtext = fig.add_axes([0.0,0.95,0.3,0.05]) axtext.axis("off") time = axtext.text(0.5,0.5, str(0), ha="left", va="top") line, = ax.plot([], [], '-', lw=1) def animation_frame(i): thisx = drawingMachine.scenario[i][0] thisy = drawingMachine.scenario[i][1] drawingMachine.x_data.extend([thisx[-1],thisx[-1]]) drawingMachine.y_data.extend([thisy[-1],thisy[-1]]) line.set_data(thisx, thisy) time.set_text("time = " + str(i*delay / 1000)) history.set_data(drawingMachine.x_data[-drawingMachine.trail_length*2:],drawingMachine.y_data[-drawingMachine.trail_length*2:]) return line, time, history, ani = animation.FuncAnimation(fig, animation_frame, np.arange(1, tl-1), interval=delay, blit=True ) plt.show()
{"hexsha": "e7b1074cb5e521eee44ad94f78a72ed201ceab3a", "size": 2079, "ext": "py", "lang": "Python", "max_stars_repo_path": "basicAnimation.py", "max_stars_repo_name": "DeniBademi/Fourier-Series-drawing-replicator", "max_stars_repo_head_hexsha": "78605d2fd263d36a7f03b5f00110f1ad372e1400", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "basicAnimation.py", "max_issues_repo_name": "DeniBademi/Fourier-Series-drawing-replicator", "max_issues_repo_head_hexsha": "78605d2fd263d36a7f03b5f00110f1ad372e1400", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "basicAnimation.py", "max_forks_repo_name": "DeniBademi/Fourier-Series-drawing-replicator", "max_forks_repo_head_hexsha": "78605d2fd263d36a7f03b5f00110f1ad372e1400", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.3164556962, "max_line_length": 135, "alphanum_fraction": 0.5887445887, "include": true, "reason": "import numpy,from numpy,import scipy,from scipy", "num_tokens": 607}
import argparse import cv2 import numpy as np import math import os import copy # Minimum number of matches that have to be found # to consider the recognition valid MIN_MATCHES = 8 class OBJ: def __init__(self, filename, swapyz=False): """Loads a Wavefront OBJ file. """ self.vertices = [] self.normals = [] self.texcoords = [] self.faces = [] material = None for line in open(filename, "r"): if line.startswith('#'): continue values = line.split() if not values: continue if values[0] == 'v': v = tuple(map(float, values[1:4])) # for key, values in v.items(): # print(key) # for key in v: # print(key) # print(v[key]) if swapyz: v = v[0], v[2], v[1] self.vertices.append(v) elif values[0] == 'vn': v = tuple(map(float, values[1:4])) if swapyz: v = v[0], v[2], v[1] self.normals.append(v) elif values[0] == 'vt': self.texcoords.append(map(float, values[1:3])) #elif values[0] in ('usemtl', 'usemat'): #material = values[1] #elif values[0] == 'mtllib': #self.mtl = MTL(values[1]) elif values[0] == 'f': face = [] texcoords = [] norms = [] for v in values[1:]: w = v.split('/') face.append(int(w[0])) if len(w) >= 2 and len(w[1]) > 0: texcoords.append(int(w[1])) else: texcoords.append(0) if len(w) >= 3 and len(w[2]) > 0: norms.append(int(w[2])) else: norms.append(0) #self.faces.append((face, norms, texcoords, material)) self.faces.append((face, norms, texcoords)) def detectFeaturesKeys(image): descriptor = cv2.xfeatures2d.SIFT_create() (kps, features) = descriptor.detectAndCompute(image, None) return (kps, features) def showImage(frame, time=1000): cv2.imshow('frame', frame) cv2.waitKey(time) return def main(): """ This functions loads the target surface image, """ homography = None # matrix of camera parameters (made up but works quite well for me) camera_parameters = np.array([[756.56499986, 0, 493.2992946 ], [ 0, 753.44416051, 304.00857278],[ 0, 0, 1]]) # load the reference surface that will be searched in the video stream dir_name = os.getcwd() # starting model model1 = cv2.imread(os.path.join(dir_name, 'reference/model.png'), 0) # end model model2 = cv2.imread(os.path.join(dir_name, 'reference/model42.png'), 0) # Compute model keypoints and its descriptors kp_model1, des_model1 = detectFeaturesKeys(model1) kp_model2, des_model2 = detectFeaturesKeys(model2) # Load 3D model from OBJ file obj = OBJ(os.path.join(dir_name, 'models/pirate-ship-fat.obj'), swapyz=True) # init video capture frame = cv2.imread('images4/10.jpg') # read the current frame kp_frame, des_frame = detectFeaturesKeys(frame) # match frame descriptors with model descriptors FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) search_params = dict(checks=50) # or pass empty dictionary flann = cv2.FlannBasedMatcher(index_params,search_params) matches = flann.knnMatch(des_model1,des_frame,k=2) # ratio test as per Lowe's paper good = [] for m,n in matches: if m.distance < 0.65*n.distance: good.append(m) # compute Homography if enough matches are found if len(good) > MIN_MATCHES: # differenciate between source points and destination points src_pts = np.float32([kp_model1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2) dst_pts = np.float32([kp_frame[m.trainIdx].pt for m in good]).reshape(-1, 1, 2) # compute Homography homography, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0) matchesMask = mask.ravel().tolist() else: print("Not enough matches found - "+ str(len(matches)/MIN_MATCHES)) #-------------------------- dest ---------------------------- frame = cv2.imread('images4/10.jpg') # read the current frame kp_frame, des_frame = detectFeaturesKeys(frame) # match frame descriptors with model descriptors FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) search_params = dict(checks=50) # or pass empty dictionary flann = cv2.FlannBasedMatcher(index_params,search_params) matches = flann.knnMatch(des_model2,des_frame,k=2) # ratio test as per Lowe's paper good = [] for m,n in matches: if m.distance < 0.65*n.distance: good.append(m) # compute Homography if enough matches are found if len(good) > MIN_MATCHES: # differenciate between source points and destination points src_pts = np.float32([kp_model2[m.queryIdx].pt for m in good]).reshape(-1, 1, 2) dst_pts = np.float32([kp_frame[m.trainIdx].pt for m in good]).reshape(-1, 1, 2) # compute Homography homography2, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0) else: print("Not enough matches found - "+ str(len(matches)/MIN_MATCHES)) if homography is not None and homography2 is not None: try: # obtain 3D projection matrix from homography matrix and camera parameters projection = projection_matrix(camera_parameters, homography) meanS = np.array([int(model1.shape[1]/2),int(model1.shape[0]/2), 1]) meanD = np.array([int(model2.shape[1]/2),int(model2.shape[0]), 1]) meanS = np.dot(homography, meanS) meanD = np.dot(homography2, meanD) meanS/=meanS[2] meanD/=meanD[2] meanS = np.int32(meanS) meanD = np.int32(meanD) # meanD = [382, 233, 1] # dest = [int(1.1*meanD[0]-0.1*meanS[0]), int(1.1*meanD[1]-0.1*meanS[1])] dest = [int(meanD[0]), int(meanD[1])] # project cube or model oldFrame = copy.deepcopy(frame) count = 0 reached = False while not reached: count+=1 frame = copy.deepcopy(oldFrame) frame, reached = render(frame, obj, projection, model1, 0.01*(meanD-meanS), count, dest, False) showImage(frame, 1) # print(reached) print("Reached destination!!!") showImage(frame, 1000) except Exception as e: print(e) print('Error') pass else: if homography is None: print('Homo 1 None') if homography2 is None: print('Homo 2 None') cv2.destroyAllWindows() return 0 def render(img, obj, projection, model, mov, count, dest, reached, color=False): """ Render a loaded obj model into the current video frame """ vertices = obj.vertices scale_matrix = np.eye(3) * 30 h, w = model.shape z = 0 for face in obj.faces: face_vertices = face[0] points = np.array([vertices[vertex - 1] for vertex in face_vertices]) points = np.dot(points, scale_matrix) # render model in the middle of the reference surface. To do so, # model points must be displaced points = np.array([[int(p[0] + w / 2), int(p[1] + h / 2), p[2]] for p in points]) second_matrix = np.array([[1,0,0.1*count*mov[0]], [0,1,0.1*count*mov[1]], [0,0,1]]) dst = cv2.perspectiveTransform(points.reshape(-1, 1, 3), np.dot(second_matrix, projection)) imgpts = np.int32(dst) if dest in imgpts: z+=1 if color is False: cv2.fillConvexPoly(img, imgpts, (137, 27, 211)) else: color = hex_to_rgb(face[-1]) color = color[::-1] # reverse cv2.fillConvexPoly(img, imgpts, color) if z>10: reached = True return img, reached def projection_matrix(camera_parameters, homography): """ From the camera calibration matrix and the estimated homography compute the 3D projection matrix """ # Compute rotation along the x and y axis as well as the translation homography = homography * (-1) rot_and_transl = np.dot(np.linalg.inv(camera_parameters), homography) col_1 = rot_and_transl[:, 0] col_2 = rot_and_transl[:, 1] col_3 = rot_and_transl[:, 2] # normalise vectors l = math.sqrt(np.linalg.norm(col_1, 2) * np.linalg.norm(col_2, 2)) rot_1 = col_1 / l rot_2 = col_2 / l translation = col_3 / l # compute the orthonormal basis c = rot_1 + rot_2 p = np.cross(rot_1, rot_2) d = np.cross(c, p) rot_1 = np.dot(c / np.linalg.norm(c, 2) + d / np.linalg.norm(d, 2), 1 / math.sqrt(2)) rot_2 = np.dot(c / np.linalg.norm(c, 2) - d / np.linalg.norm(d, 2), 1 / math.sqrt(2)) rot_3 = np.cross(rot_1, rot_2) # finally, compute the 3D projection matrix from the model to the current frame projection = np.stack((rot_1, rot_2, rot_3, translation)).T return np.dot(camera_parameters, projection) def hex_to_rgb(hex_color): """ Helper function to convert hex strings to RGB """ hex_color = hex_color.lstrip('#') h_len = len(hex_color) return tuple(int(hex_color[i:i + h_len // 3], 16) for i in range(0, h_len, h_len // 3)) # Command line argument parsing # NOT ALL OF THEM ARE SUPPORTED YET parser = argparse.ArgumentParser(description='Augmented reality application') parser.add_argument('-ma','--matches', help = 'draw matches between keypoints', action = 'store_true') args = parser.parse_args() if __name__ == '__main__': main()
{"hexsha": "53192bd9cf4fd50379892a23fa1b3a5f8ed411e1", "size": 10126, "ext": "py", "lang": "Python", "max_stars_repo_path": "Part4.py", "max_stars_repo_name": "ankit-1517/Augmented-Reality-based-Car-race", "max_stars_repo_head_hexsha": "548ef56c3a6bfdc785c94858aae44a6941800784", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-05-04T13:35:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-21T16:01:06.000Z", "max_issues_repo_path": "Part4.py", "max_issues_repo_name": "ankit-1517/Augmented-Reality-based-Car-race", "max_issues_repo_head_hexsha": "548ef56c3a6bfdc785c94858aae44a6941800784", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-01-23T12:28:37.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-23T12:28:37.000Z", "max_forks_repo_path": "Part4.py", "max_forks_repo_name": "ankit-1517/Augmented-Reality-based-Car-race", "max_forks_repo_head_hexsha": "548ef56c3a6bfdc785c94858aae44a6941800784", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.3560606061, "max_line_length": 112, "alphanum_fraction": 0.581374679, "include": true, "reason": "import numpy", "num_tokens": 2696}
import itertools import pandas as pd import numpy as np # all permutations are already reverse-deleted # all sequences are represented in binary nucleotides = {'A':0,'C':1,'G':2,'T':3} numtonuc = {0:'A',1:'C',2:'G',3:'T'} complement = {0:3,3:0,1:2,2:1} def window(fseq, window_size): for i in range(len(fseq) - window_size + 1): yield fseq[i:i+window_size] # return the first or the last number representation def seqpos(kmer,last): return 1 << (1 + 2 * kmer) if last else 1 << 2 * kmer; def seq_permutation(seqlen): return (range(seqpos(seqlen,False),seqpos(seqlen,True))) def gen_nonreversed_kmer(k): nonrevk = list() for i in range(seqpos(k,False),seqpos(k,True)): if i <= revcomp(i): nonrevk.append(i) return nonrevk def itoseq(seqint): if type(seqint) is not int: return seqint seq = "" mask = 3 copy = int(seqint) # prevent changing the original value while(copy) != 1: seq = numtonuc[copy&mask] + seq copy >>= 2 if copy == 0: print("Could not find the append-left on the input sequence") return 0 return seq def seqtoi(seq,gappos=0,gapsize=0): # due to various seqlengths, this project always needs append 1 to the left binrep = 1 gaps = range(gappos,gappos+gapsize) for i in range(0,len(seq)): if i in gaps: continue binrep <<= 2 binrep |= nucleotides[seq[i]] return binrep def revcomp(seqbin): rev = 1 mask = 3 copy = int(seqbin) while copy != 1: rev <<= 2 rev |= complement[copy&mask] copy >>= 2 if copy == 0: print("Could not find the append-left on the input sequence") return 0 return rev def revcompstr(seq): rev = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'} return "".join([rev[base] for base in reversed(seq)]) def insert_pos(seqint,base,pos): # pos is position from the right return ((seqint << 2) & ~(2**(2*pos+2)-1)) | ((seqint & 2**(2*pos)-1) | (nucleotides[base] << pos*2)) #return (seqint << 2) | (seqint & 2**pos-1) & ~(3 << (pos*2)) | (nucleotides[base] << pos*2) # this function already counts without its reverse complement, # i.e. oligfreq + reverse merge in the original R code # Input: panda list and kmer length # Output: oligonucleotide count with reverse removed def nonr_olig_freq(seqtbl,kmer,nonrev_list,gappos=0,gapsize=0): # with the gapmodel, our model become gapsize + kmer gapmer = kmer+gapsize # separator, since this is binary, the number is counted from the right rightseparator = kmer-gappos leftseparator = rightseparator+gapsize olig_df = {k: [0] * len(seqtbl) for k in nonrev_list} # use dictionary first to avoid slow indexing from panda data frame for i in range(0,len(seqtbl)): #22s for 3000 mask = (4**gapmer)-1 cpy = int(seqtbl[i]) while cpy > (4**gapmer)-1: # gap calculation here cur = cpy & mask right = cur & ((4**rightseparator)-1) left = (cur >> 2*leftseparator) << 2*rightseparator gappedseqint = left | right r = (1<<(2*kmer))|gappedseqint # append 1 rc = revcomp(r) if r > rc: r = rc # 392secs with loc,434 secs with the regression. R time, 10secs for allocation, 3.97mins for linreg # with 'at', only 23secs! -- 254secs total for 6mer olig_df[r][i] += 1 cpy >>= 2 return pd.DataFrame(olig_df)
{"hexsha": "41126b58b023d692d489620dd861a44c6d619f54", "size": 3563, "ext": "py", "lang": "Python", "max_stars_repo_path": "generate_prediction/bio.py", "max_stars_repo_name": "yangyxt/QBiC-Pred", "max_stars_repo_head_hexsha": "fc4a7ca042fc89ee9b726dc99c981be11030e29c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2019-01-08T19:55:04.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-10T19:45:06.000Z", "max_issues_repo_path": "generate_prediction/bio.py", "max_issues_repo_name": "yangyxt/QBiC-Pred", "max_issues_repo_head_hexsha": "fc4a7ca042fc89ee9b726dc99c981be11030e29c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2019-03-11T20:28:28.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-15T16:20:45.000Z", "max_forks_repo_path": "generate_prediction/bio.py", "max_forks_repo_name": "yangyxt/QBiC-Pred", "max_forks_repo_head_hexsha": "fc4a7ca042fc89ee9b726dc99c981be11030e29c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2018-12-26T18:36:27.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-10T19:45:08.000Z", "avg_line_length": 33.2990654206, "max_line_length": 126, "alphanum_fraction": 0.5983721583, "include": true, "reason": "import numpy", "num_tokens": 1092}
""" FeasibilityEvaluator{T} <: AbstractNLPEvaluator TODO """ mutable struct FeasibilityEvaluator{Evaluator<:AbstractNLPEvaluator, T, VT} <: AbstractNLPEvaluator inner::Evaluator x_min::VT x_max::VT cons::VT end function FeasibilityEvaluator(nlp::AbstractNLPEvaluator) if !is_constrained(nlp) error("Input problem must have inequality constraints") end x_min, x_max = bounds(nlp, Variables()) cx = similar(x_min, n_constraints(nlp)) return FeasibilityEvaluator{typeof(nlp), eltype(x_min), typeof(x_min)}(nlp, x_min, x_max, cx) end function FeasibilityEvaluator(datafile::String) nlp = SlackEvaluator(datafile) return FeasibilityEvaluator(nlp) end n_variables(nlp::FeasibilityEvaluator) = n_variables(nlp.inner) n_constraints(nlp::FeasibilityEvaluator) = 0 constraints_type(::FeasibilityEvaluator) = :bound has_hessian(nlp::FeasibilityEvaluator) = has_hessian(nlp.inner) has_hessian_lagrangian(nlp::FeasibilityEvaluator) = has_hessian(nlp) # Getters get(nlp::FeasibilityEvaluator, attr::AbstractNLPAttribute) = get(nlp.inner, attr) get(nlp::FeasibilityEvaluator, attr::AbstractVariable) = get(nlp.inner, attr) get(nlp::FeasibilityEvaluator, attr::PS.AbstractNetworkAttribute) = get(nlp.inner, attr) # Setters function setvalues!(nlp::FeasibilityEvaluator, attr::PS.AbstractNetworkValues, values) setvalues!(nlp.inner, attr, values) end # Bounds bounds(nlp::FeasibilityEvaluator, ::Variables) = bounds(nlp.inner, Variables()) bounds(nlp::FeasibilityEvaluator, ::Constraints) = (Float64[], Float64[]) initial(nlp::FeasibilityEvaluator) = initial(nlp.inner) function update!(nlp::FeasibilityEvaluator, u) conv = update!(nlp.inner, u) constraint!(nlp.inner, nlp.cons, u) return conv end # f(x) = 0.5 * || c(x) ||² function objective(nlp::FeasibilityEvaluator, u) return 0.5 * dot(nlp.cons, nlp.cons) end function constraint!(nlp::FeasibilityEvaluator, cons, u) @assert length(cons) == 0 return end # Gradient # ∇f = J' * c(x) function gradient!(nlp::FeasibilityEvaluator, grad, u) σ = 0.0 ojtprod!(nlp.inner, grad, u, σ, nlp.cons) return end jacobian_structure(ag::FeasibilityEvaluator) = (Int[], Int[]) function jacobian!(ag::FeasibilityEvaluator, jac, u) @assert length(jac) == 0 return end # H = ∇²c(x) + J'*J function hessprod!(nlp::FeasibilityEvaluator, hessvec, u, v) σ = 0.0 # Need to update the first-order adjoint λ first hessian_lagrangian_penalty_prod!(nlp.inner, hessvec, u, nlp.cons, σ, v, 0.0) # J' * J * v jv = similar(nlp.cons) jtv = similar(u) jprod!(nlp.inner, jv, u, v) jtprod!(nlp.inner, jtv, u, jv) hessvec .+= jtv return end function hessian_structure(nlp::FeasibilityEvaluator) n = n_variables(nlp) rows = Int[r for r in 1:n for c in 1:r] cols = Int[c for r in 1:n for c in 1:r] return rows, cols end function reset!(nlp::FeasibilityEvaluator) reset!(nlp.inner) end
{"hexsha": "1f8c6074d12542ae56ad20ed7fa82c6a80322852", "size": 2966, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Evaluators/feasibility_evaluator.jl", "max_stars_repo_name": "exanauts/ExaPF.jl", "max_stars_repo_head_hexsha": "cd1bcb8a0782fe448d46a10816f82c5d28c3854e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 43, "max_stars_repo_stars_event_min_datetime": "2020-07-15T16:01:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T21:28:25.000Z", "max_issues_repo_path": "src/Evaluators/feasibility_evaluator.jl", "max_issues_repo_name": "exanauts/ExaPF.jl", "max_issues_repo_head_hexsha": "cd1bcb8a0782fe448d46a10816f82c5d28c3854e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 129, "max_issues_repo_issues_event_min_datetime": "2020-07-02T11:59:10.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T20:10:54.000Z", "max_forks_repo_path": "src/Evaluators/feasibility_evaluator.jl", "max_forks_repo_name": "exanauts/ExaPF.jl", "max_forks_repo_head_hexsha": "cd1bcb8a0782fe448d46a10816f82c5d28c3854e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-07-15T18:49:59.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-29T20:12:19.000Z", "avg_line_length": 27.9811320755, "max_line_length": 99, "alphanum_fraction": 0.7124072825, "num_tokens": 919}
#= Copyright (c) 2015, Intel Corporation Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =# # $ julia knob-test1.jl ../matrices/bcsstk14.mtx # assymmetric (2, 8) exists but (8, 2) doesn't # julia: SymGS.cpp:305: bool SpMP::getSymmetricNnzPattern(const SpMP::CSR *, int **, int **, int **, int **): Assertion `sym.isSymmetric(false, true)' failed. # Aborted (core dumped) include("../../src/Sparso.jl") include("../../src/simple-show.jl") include("./utils.jl") using Sparso function pcg_symgs(x, A, b) L, U = Sparso.ilu(A) r = b - Sparso.SpMV(1,A,x) z = copy(r) Sparso.fwdTriSolve!(z,L,r, C_NULL) end A = matrix_market_read(ARGS[1], true, true) m = size(A, 1) b = ones(Float64, m) x = zeros(Float64, m) pcg_symgs(x, A, b)
{"hexsha": "23e938ece02690a09debd085b5af0ce0e74d9fc0", "size": 2156, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/correctness/knob-test1.jl", "max_stars_repo_name": "IntelLabs/Sparso", "max_stars_repo_head_hexsha": "570e7a18a96045e490f4ebf27ea948592e0bfa0b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2016-07-11T15:11:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-24T00:32:08.000Z", "max_issues_repo_path": "test/correctness/knob-test1.jl", "max_issues_repo_name": "IntelLabs/Sparso", "max_issues_repo_head_hexsha": "570e7a18a96045e490f4ebf27ea948592e0bfa0b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2016-09-15T13:37:36.000Z", "max_issues_repo_issues_event_max_datetime": "2016-12-09T19:30:32.000Z", "max_forks_repo_path": "test/correctness/knob-test1.jl", "max_forks_repo_name": "IntelLabs/Sparso", "max_forks_repo_head_hexsha": "570e7a18a96045e490f4ebf27ea948592e0bfa0b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-01-03T03:11:19.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-03T03:11:19.000Z", "avg_line_length": 43.12, "max_line_length": 158, "alphanum_fraction": 0.7462894249, "num_tokens": 523}
#= Sigmoid.jl a quick script to perform a sigmoid transformation using the equation: σ(X) = 1 /(1 + ℯ ^ vX) function: sigmoid(signal;v) where signal is the input timeseries and the parameter v determines the how shallow / steep the sigmoid curve will be. Smaller values of v flatten the curve From França et al (2018; https://doi.org/10.3389/fphys.2018.01767): The parameter v was chosen based on its effect on the estimated multifractal width for three types of time series: icEEG (NHNN1-channel 1), surrogate EEG (temporally shuffled values of the original time series from NHNN1-channel 1) and a simulated random series (with the same mean and variance), across the range v = [0.1, 2.0] in steps of 0.1. To find the optimal value for the parameter v, we needed to balance the trade-off between the three series in terms of presenting the most distinct Δα values (Appendix Figure E3A in Supplementary Material), while showing minimum distortion on the recording, or maximum correlation with the original time series =# function sigmoid(signal; v) sigmoid_transform = 1 ./(1 .+ ℯ .^(v .* signal)) return sigmoid_transform end
{"hexsha": "bfefa1af0e8d8829dfc2a62341f48d84195eb200", "size": 1192, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "sigmoid.jl", "max_stars_repo_name": "tehrandavis/data_management_tools", "max_stars_repo_head_hexsha": "3c531c78f85f4de3be20dc4ac35696721fe77290", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sigmoid.jl", "max_issues_repo_name": "tehrandavis/data_management_tools", "max_issues_repo_head_hexsha": "3c531c78f85f4de3be20dc4ac35696721fe77290", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sigmoid.jl", "max_forks_repo_name": "tehrandavis/data_management_tools", "max_forks_repo_head_hexsha": "3c531c78f85f4de3be20dc4ac35696721fe77290", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.5714285714, "max_line_length": 91, "alphanum_fraction": 0.7323825503, "num_tokens": 302}
""" .. _ref_mapdl_math_basic: PyMAPDL APDLMath Basic Operations --------------------------------- This tutorial shows how you can use pymapdl to use APDL math for basic operations on APDLMath vectors and matrices in the APDL memory workspace. The `ansys.mapdl.math` submodule gives access to APDLMath features inside PyMAPDL. """ import numpy as np from ansys.mapdl.core import launch_mapdl # Start MAPDL as a service and create an APDLMath object. mapdl = launch_mapdl() mm = mapdl.math ############################################################################### # Create and Manipulate Vectors # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Create 2 APDLMath vectors of size 5. :math:`\vec{v}` is initialized with # ones, $\vec{w}$ is filled with random values # # Corresponding APDLMath commands # - `*VEC,V,D,ALLOC,5` # - `*INIT,V,CONST,1` # - `*VEC,W,D,ALLOC,5` # - `*INIT,W,RAND` v = mm.ones(5) w = mm.rand(5) print(w) ############################################################################### # Use operators on vectors # ~~~~~~~~~~~~~~~~~~~~~~~~ # Just like `numpy` PyMAPDL APDLMath vectors can be have most of the # standard operators (e.g. ``+, -, +=, -=, *=``) # # Here we form :math:`\vec{z}=\vec{v}+\vec{w}` # # Then we compute :math:`\|z\|_2` (the default `norm` is nrm2, but you # can use `.norm('nrm1')` or `.norm('nrminf')` for different normals. # See `help(z.norm)` for additional details. # # APDLMath Commands: # - `*VEC,Z,D,COPY,V` # - `*AXPY,1,,W,1,,Z` # - `*NRM,Z,,nrmval` z = v + w z.norm() ############################################################################### # Methods # ~~~~~~~ # Alternatively you can use methods, following the numpy # standards. Available methods are: # # - `mm.add()` # - `mm.subtract()` # - `mm.dot()` # # Equivalent operator: # `z = v + w` # # Equivalent APDLMath Commands: # - `*VEC,Z,D,COPY,V` # - `*AXPY,1,,W,1,,Z` z = mm.add(v, w) z.norm() ############################################################################### # Subtraction # # Equivalent operator: # z = v - w # # Equivalent APDLMath Commands: # - `*VEC,Z,D,COPY,V` # - `*AXPY,-1,,W,1,,Z` z = mm.subtract(v, w) print(z) ############################################################################### # Dot product of 2 vectors # # Equivalent APDLMath Command: `*DOT,V,W,dotval` vw = mm.dot(v, w) print("Dot product :", str(vw)) ############################################################################### # Perform an in-place operations (without copying vectors) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # In-Place Addition # # MAPDL Commands: # - `*AXPY,1,,V,1,,Z` # - `*PRINT,Z` v += v print(v) ############################################################################### # In-Place Multiplication # # MAPDL Command: `*SCAL,v,2` v *= 2 print(v) ############################################################################### # In-Place Multiplication # v /= 2.0 print(v) ############################################################################### # Working with Dense Matrices # ~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Allocate two dense matrices with random values. # # MAPDL Commands: # # - `*DMAT,m1,D,ALLOC,4,5` # - `*INIT,m1,RAND` # - `*DMAT,m1,D,ALLOC,4,5` # - `*INIT,m1,CONST,1` m1 = mm.rand(4, 5) m2 = mm.ones(4, 5) m1, m2 ############################################################################### # **Add** these 2 dense matrices, and **scale** the result matrix. # # Mapdl Commands # - `*DMAT,m3,D,COPY,m1` # - `*AXPY,1,,m2,1,,m3` m3 = m1 + m2 print(m3) m3 *= 2 print(m3) ############################################################################### # ***Transpose*** a Matrix # m4 = m3.T print(m4) ############################################################################### # As for vectors, methods are also available as an alternative to operators. m3 = mm.add(m1, m2) print(m3) ############################################################################### # Compute a matrix vector multiplication # mw = m3.dot(m4) print(mw) ############################################################################### # APDLMath matrices can be identified by printing, viewing their types, or with using the `__repr__` method by simply typing out the variable # # APDLMath Matrix # ~~~~~~~~~~~~~~~ type(m1) print(m1) m1 ############################################################################### # APDLMath Vector # type(w) print(w) w ############################################################################### # Numpy methods on APDLMath objects # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Regardless of the underlying APDLMath object type, you are generally # able to perform most numpy or scipy operations on these arrays. You # can do this one of two ways. First, you can convert a matrix to a numpy array: apdl_mat = mm.rand(5, 5) np_mat = apdl_mat.asarray() print(np_mat) ############################################################################### # Alternatively, you can simply use numpy to compute the max of the array # # This works because PyMAPDL copies over the matrix to the local # python memory and then computes the max using numpy. print(np.max(apdl_mat)) ############################################################################### # This works for most numpy operations, but keep in mind that # operations that are supported within MAPDL (such as adding or # multiplying arrays) will compute much faster as the data is not copied. # apdl_arr = mm.rand(5, 5) np_array = apdl_mat.asarray() print(np.allclose(apdl_mat, np_array)) ############################################################################### # stop mapdl mapdl.exit()
{"hexsha": "69c83566c1c827526e4a6b4b76b85cd7cab41137", "size": 5665, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/01-apdlmath-examples/basic_operations.py", "max_stars_repo_name": "Miiicah/pymapdl", "max_stars_repo_head_hexsha": "ce85393ca82db7556a5d05883ca3fd9296444cba", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-09T01:12:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-09T01:12:02.000Z", "max_issues_repo_path": "examples/01-apdlmath-examples/basic_operations.py", "max_issues_repo_name": "Miiicah/pymapdl", "max_issues_repo_head_hexsha": "ce85393ca82db7556a5d05883ca3fd9296444cba", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2022-02-24T20:34:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T20:44:17.000Z", "max_forks_repo_path": "examples/01-apdlmath-examples/basic_operations.py", "max_forks_repo_name": "lynch1972/pymapdl", "max_forks_repo_head_hexsha": "46b31438af2a0d5b2d9a69abe82e0fe69935a855", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.0663716814, "max_line_length": 141, "alphanum_fraction": 0.4660194175, "include": true, "reason": "import numpy", "num_tokens": 1324}
from datascientist.model.regression.skl.linear_model.sgd import _sgd import numpy as np def test_sgd(): x_train = np.array([[1, 1], [1, 2], [2, 2], [2, 3]]) y_train = np.dot(x_train, np.array([1, 2])) + 3 x_test = np.array([[1, 1], [1, 2], [2, 2], [2, 3]]) y_test = np.dot(x_test, np.array([1, 2])) + 3 metrics = 'mae' answer = _sgd(train=(x_train, y_train), test=(x_test, y_test), metrics=metrics) assert answer[0] == 'SGDRegressor' assert round(answer[1] , 4) == 0.4900 assert answer[2] is None metrics = 'mse' answer = _sgd(train=(x_train, y_train), test=(x_test, y_test), metrics=metrics) assert answer[0] == 'SGDRegressor' assert round(answer[1] , 4) == 0.3071 assert answer[2] is None metrics = 'rmse' answer = _sgd(train=(x_train, y_train), test=(x_test, y_test), metrics=metrics) assert answer[0] == 'SGDRegressor' assert round(answer[1] , 4) == 0.5601 assert answer[2] is None answer = _sgd(train=(x_train, y_train), test=(x_test, y_test), metrics=metrics, x_predict=x_test) arr=np.array([5.1532535 , 7.45404715, 9.13397804, 11.4347717 ]) for i in range(len(answer[2])): assert round(answer[2][i], 2) == round(arr[i], 2)
{"hexsha": "c81296018a8de1556aac605b4bbc713c90a0d96d", "size": 1235, "ext": "py", "lang": "Python", "max_stars_repo_path": "datascientist/model/regression/skl/linear_model/tests/test_sgd.py", "max_stars_repo_name": "kritikaparmar-programmer/DataScientist", "max_stars_repo_head_hexsha": "b70f25b4afe28a2862a4ebfba163d162f645fba1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-12-05T11:09:13.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-05T11:09:13.000Z", "max_issues_repo_path": "datascientist/model/regression/skl/linear_model/tests/test_sgd.py", "max_issues_repo_name": "kritikaparmar-programmer/DataScientist", "max_issues_repo_head_hexsha": "b70f25b4afe28a2862a4ebfba163d162f645fba1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "datascientist/model/regression/skl/linear_model/tests/test_sgd.py", "max_forks_repo_name": "kritikaparmar-programmer/DataScientist", "max_forks_repo_head_hexsha": "b70f25b4afe28a2862a4ebfba163d162f645fba1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.3235294118, "max_line_length": 101, "alphanum_fraction": 0.6210526316, "include": true, "reason": "import numpy", "num_tokens": 441}
using SymbolicML using Test using Statistics using StatsBase include("functions/runtests_summary.jl")
{"hexsha": "7387c7c282a3c2da1c52ad63fe879cab06faf03b", "size": 104, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "gspina140/SymbolicML.jl", "max_stars_repo_head_hexsha": "a89d19cbfeeb32ad6ead9944f7d87e86788c6a90", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-04-30T08:44:44.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-26T06:23:08.000Z", "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "gspina140/SymbolicML.jl", "max_issues_repo_head_hexsha": "a89d19cbfeeb32ad6ead9944f7d87e86788c6a90", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2021-05-18T01:19:55.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-24T15:46:16.000Z", "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "gspina140/SymbolicML.jl", "max_forks_repo_head_hexsha": "a89d19cbfeeb32ad6ead9944f7d87e86788c6a90", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-26T14:17:39.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-26T14:17:39.000Z", "avg_line_length": 13.0, "max_line_length": 40, "alphanum_fraction": 0.8365384615, "num_tokens": 24}
import datetime import os import subprocess import numpy from scipy.stats import norm from . import romannumerals # ToDo: Bring back scale bar # ToDo: Add option for solid fill of vectors def roundto(num, nearest): """ Rounds :param:`num` to the nearest increment of :param:`nearest` """ return int((num + (nearest / 2)) // nearest * nearest) def convert_chromosome_name(chrom_string, dialect='ucsc'): """ Try to auto-detect chromosome number and convert it to the specified "dialect". Valid dialects are "ucsc", "ensembl" and "yeast". :param chrom_string: :param source: :param dest: :return: """ try: chrom_string = str(romannumerals.roman_to_int(chrom_string)) except ValueError: pass if dialect == 'ensembl': if chrom_string == 'chrM': return 'dmel_mitochonrdion_genome' elif chrom_string[:3].lower() == 'chr': return chrom_string[3:] else: return chrom_string elif dialect == 'ucsc': if chrom_string == 'dmel_mitochondrion_genome': return 'chrM' elif chrom_string[:3].lower() == 'chr': return chrom_string else: return 'chr{}'.format(chrom_string) elif dialect == 'yeast': if chrom_string[:3].lower() == 'chr': chrom_string = chrom_string[3:] try: return romannumerals.int_to_roman(int(chrom_string)) except ValueError: return chrom_string else: raise ValueError('Unknown dialect {}'.format(dialect)) def binary_search_tag_file(tag_filename, search_target): """ Find the offset (in bytes) in :param:`tag_filename` that corresponds to the start of the first tag that is equal to or greater than :param:`search_target`. If none of the reads have a start position greater than :param:`search_target`, return None. Note that positions in tag files have a 1-based index. """ def get_read_start(file_offset): tag_file.seek(file_offset) if file_offset > 0: _ = tag_file.readline() # read forward to get to a line start this_line = tag_file.readline().strip() if tag_file.tell() >= filesize: # We've reached the end of the file and the reads are still upstream of the target return None else: return int(this_line.split('\t')[1]) filesize = os.path.getsize(tag_filename) search_window_start = 0 search_window_end = filesize - 1 guess_genomic_start = -1 guess = int((search_window_start + search_window_end) / 2) with open(tag_filename, 'rt') as tag_file: first_genomic_start = get_read_start(search_window_start) # last_genomic_start = get_read_position(search_window_end) if search_target < first_genomic_start: return search_window_start while search_window_end - search_window_start > 1: guess = int((search_window_start + search_window_end) / 2) guess_genomic_start = get_read_start(guess) if guess_genomic_start == None: return None # print(search_window_start, guess, search_window_end, guess_genomic_start) if guess_genomic_start < search_target: # print('\ttoo low!') search_window_start = guess elif guess_genomic_start > search_target: search_window_end = guess # print('\ttoo high!') else: # print('\tjust right!') break if guess_genomic_start == -1: return None if guess_genomic_start < search_target: guess += 1 tag_file.seek(guess) _ = tag_file.readline() guess = tag_file.tell() return guess def bgzip_gff(gff3_fname, bgzipped_fname): """ Compress a GFF3 file in block-gzip format (requires that bgzip be accessible on the current path). If :param gff3_fname: ends with '.gz' assumes that the file is gzipped, otherwise assumes it is uncompressed. :param gzipped_fname: :param bgzipped_fname: :return: """ if bgzipped_fname == gff3_fname: log_print('Destination and source file cannot have the same name!') cmd_line = '{} {} | sort -k1,1 -k4,4n | bgzip > {}'.format(('cat', 'zcat')[gff3_fname.endswith('.gz')], gff3_fname, bgzipped_fname) try: assert os.path.isfile(gff3_fname) # needed since no error occurs otherwise subprocess.check_call(cmd_line, shell=True) except subprocess.CalledProcessError as cpe: log_print('Unsuccessful. Got return code {}'.format(cpe.returncode)) except AssertionError: log_print('{} not found!'.format(gff3_fname)) else: log_print('Successfully generated block-gzipped file {} from {}'.format(bgzipped_fname, gff3_fname)) def generate_tabix_index(target_fname): """ Index :param target_fname: with tabix. Requires that the directory in which :param:target_fname: resides is writeable. :param target_fname: :return: """ cmd_line = 'tabix -f -p gff {}'.format(target_fname) try: return_code = subprocess.check_call(cmd_line, shell=True) except subprocess.CalledProcessError as cpe: log_print('Unsuccessful. Got return code {}'.format(cpe.returncode)) else: log_print('Successfully indexed block-gzipped file {}'.format(target_fname)) def pretty_now(): """ Returns the current date/time in a nicely formatted string (without decimal seconds) """ return datetime.datetime.strftime(datetime.datetime.now(), '%Y-%b-%d %H:%M:%S') def log_print(message, tabs=1): """ Print a chunk of text preceded by a timestamp and an optional number of tabs (default 1). :param message: :param tabs: :return: """ print('{}{}{}'.format(pretty_now(), '\t' * tabs, message)) def gaussian_kernel(sd, sd_cutoff=3, normalize=False): """ Generate and return a numpy.Array whose elements are proportional to the PDF of a normal distribution having standard deviation :param:`sd`. :param sd: :param sd_cutoff: :param normalize: :return: """ bw = sd_cutoff * sd * 2 + 1 midpoint = sd_cutoff * sd kern = numpy.zeros(bw) frozen_rv = norm(scale=sd) for i in range(bw): kern[i] = frozen_rv.pdf(i - midpoint) if normalize: kern = kern / kern.max() return kern def add_label(ax, tick, tick_label, axis='x'): """ Updates the set of ticks and tick labels for the specified matplotlib.Axes object and axis. If the tick already exists, it's label will be updated. If not, it will be created and labeled appropriately. """ if axis == 'y': tick_getter, label_getter = ax.get_yticks, ax.get_yticklabels tick_setter, label_setter = ax.set_yticks, ax.set_yticklabels else: tick_getter, label_getter = ax.get_xticks, ax.get_xticklabels tick_setter, label_setter = ax.set_xticks, ax.set_xticklabels labels = dict(zip(tick_getter(), label_getter())) labels[tick] = tick_label new_ticks, new_labels = zip(*sorted(labels.items())) tick_setter(new_ticks) label_setter(new_labels) def adjust_limits(ax, new_position, axis='y', padding_fraction=0.1): """ If necessary adjusts the limits for the specified :param axis: on :param ax: to accomodate :param new_position: according to the following scheme: 1. Assumes that the current limits are the smallest and largest content item minus / plus a padding equal to :param padding_fraction: * the span between the smallest and largest content item. 2. If :param new_position: is beyond the inferred content limits, adjust the padding to :param padding_fraction: * the new content span, then adjust the plot limits to the new content limits minus / plus the new padding. """ assert padding_fraction < 0.5, 'padding_fraction must be below 0.5!' if axis == 'y': limit_getter = ax.get_ylim limit_setter = ax.set_ylim else: limit_getter = ax.get_xlim limit_setter = ax.set_xlim current_plot_min, current_plot_max = limit_getter() current_plot_span = current_plot_max - current_plot_min current_data_span = current_plot_span / (1 + 2 * padding_fraction) current_pad = current_data_span * padding_fraction current_data_min = current_plot_min + current_pad current_data_max = current_plot_max - current_pad # print(current_plot_min, current_plot_max, current_plot_span) # print(current_data_min, current_data_max, current_data_span, current_pad) if new_position > current_data_max: new_data_min = current_data_min new_data_max = new_position elif new_position < current_data_min: new_data_min = new_position new_data_max = current_data_max else: # no changes needed return new_data_span = new_data_max - new_data_min new_pad = new_data_span * padding_fraction new_plot_min = new_data_min - new_pad new_plot_max = new_data_max + new_pad # print(new_data_min, new_data_max, new_data_span, new_pad) # print(new_plot_min, new_plot_max) limit_setter((new_plot_min, new_plot_max)) def diag_indices(n, k=0): """ Return the indices corresponding to the kth diagonal of an n X n array in the form of a tuple of (x coords, y coords). Created since numpy does not provide this functionality. """ if k <= 0: x_coords = numpy.arange(-k, n) y_coords = numpy.arange(0, n + k) else: x_coords = numpy.arange(0, n - k) y_coords = numpy.arange(k, n) return (x_coords, y_coords)
{"hexsha": "5c12f2b89c78d1e0bd28229f8856e698d7e7c93e", "size": 10014, "ext": "py", "lang": "Python", "max_stars_repo_path": "pygbrowse/utilities.py", "max_stars_repo_name": "phageghost/python-genome-browser", "max_stars_repo_head_hexsha": "c052bb72ca89664f48d1248b5a4500482c823ee7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2018-07-25T22:04:01.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-03T15:39:16.000Z", "max_issues_repo_path": "pygbrowse/utilities.py", "max_issues_repo_name": "phageghost/python-genome-browser", "max_issues_repo_head_hexsha": "c052bb72ca89664f48d1248b5a4500482c823ee7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-07-31T20:05:34.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-20T17:13:51.000Z", "max_forks_repo_path": "pygbrowse/utilities.py", "max_forks_repo_name": "phageghost/python-genome-browser", "max_forks_repo_head_hexsha": "c052bb72ca89664f48d1248b5a4500482c823ee7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.3032258065, "max_line_length": 119, "alphanum_fraction": 0.6466946275, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2361}
!> CHEASE Output Reader !! !! A module to read in datafiles from CHEASE. !! !! Written by Edmund Highcock !! edmundhighcock@sourceforge.net !! !! !! Available quantities are: !! !! Zero D: !! r0exp_chease,b0exp_chease !! One D: !! rgeom_chease,ageom_chease,q_chease,dqdpsi_chease, !! d2qdpsi2_chease,p_chease,dpdpsi_chease,f_chease, !! fdfdpsi_chease,v_chease,rho_t_chease,shear_chease, !! dsheardpsi_chease,kappa_chease,delta_lower_chease, !! delta_upper_chease,dvdpsi_chease,dpsidrhotor_chease, !! gdpsi_av_chease,radius_av_chease,r_av_chease,te_chease, !! dtedpsi_chease,ne_chease,dnedpsi_chease,ti_chease, !! dtidpsi_chease,ni_chease,dnidpsi_chease,zeff_chease, !! signeo_chease,jbsbav_chease !! Two D: !! g11_chease,g12_chease,g22_chease,g33_chease,b_chease, !! dbdpsi_chease,dbdchi_chease,dpsidr_chease,dpsidz_chease, !! dchidr_chease,dchidz_chease,jacobian_chease,r_chease, !! z_chease !! !! This module is generated automatically !! using !! $ ruby generate_read_chease.rb !! !! DO NOT EDIT! !! YOUR CHANGES WILL BE LOST ! An example chease_namelist will be on the wiki module read_chease implicit none private public :: read_infile public :: finish public :: npsi_chease, nchi_chease public :: psi_chease, chi_chease public :: r0exp_chease public :: b0exp_chease public :: rgeom_chease public :: ageom_chease public :: q_chease public :: dqdpsi_chease public :: d2qdpsi2_chease public :: p_chease public :: dpdpsi_chease public :: f_chease public :: fdfdpsi_chease public :: v_chease public :: rho_t_chease public :: shear_chease public :: dsheardpsi_chease public :: kappa_chease public :: delta_lower_chease public :: delta_upper_chease public :: dvdpsi_chease public :: dpsidrhotor_chease public :: gdpsi_av_chease public :: radius_av_chease public :: r_av_chease public :: te_chease public :: dtedpsi_chease public :: ne_chease public :: dnedpsi_chease public :: ti_chease public :: dtidpsi_chease public :: ni_chease public :: dnidpsi_chease public :: zeff_chease public :: signeo_chease public :: jbsbav_chease public :: g11_chease public :: g12_chease public :: g22_chease public :: g33_chease public :: b_chease public :: dbdpsi_chease public :: dbdchi_chease public :: dpsidr_chease public :: dpsidz_chease public :: dchidr_chease public :: dchidz_chease public :: jacobian_chease public :: r_chease public :: z_chease integer :: npsi_chease, nchi_chease real, dimension (:), allocatable :: psi_chease,chi_chease integer :: infile=1212 integer, parameter :: ncols = 5 real :: r0exp_chease real :: b0exp_chease real, dimension(:), allocatable :: rgeom_chease real, dimension(:), allocatable :: ageom_chease real, dimension(:), allocatable :: q_chease real, dimension(:), allocatable :: dqdpsi_chease real, dimension(:), allocatable :: d2qdpsi2_chease real, dimension(:), allocatable :: p_chease real, dimension(:), allocatable :: dpdpsi_chease real, dimension(:), allocatable :: f_chease real, dimension(:), allocatable :: fdfdpsi_chease real, dimension(:), allocatable :: v_chease real, dimension(:), allocatable :: rho_t_chease real, dimension(:), allocatable :: shear_chease real, dimension(:), allocatable :: dsheardpsi_chease real, dimension(:), allocatable :: kappa_chease real, dimension(:), allocatable :: delta_lower_chease real, dimension(:), allocatable :: delta_upper_chease real, dimension(:), allocatable :: dvdpsi_chease real, dimension(:), allocatable :: dpsidrhotor_chease real, dimension(:), allocatable :: gdpsi_av_chease real, dimension(:), allocatable :: radius_av_chease real, dimension(:), allocatable :: r_av_chease real, dimension(:), allocatable :: te_chease real, dimension(:), allocatable :: dtedpsi_chease real, dimension(:), allocatable :: ne_chease real, dimension(:), allocatable :: dnedpsi_chease real, dimension(:), allocatable :: ti_chease real, dimension(:), allocatable :: dtidpsi_chease real, dimension(:), allocatable :: ni_chease real, dimension(:), allocatable :: dnidpsi_chease real, dimension(:), allocatable :: zeff_chease real, dimension(:), allocatable :: signeo_chease real, dimension(:), allocatable :: jbsbav_chease real, dimension(:,:), allocatable :: g11_chease real, dimension(:,:), allocatable :: g12_chease real, dimension(:,:), allocatable :: g22_chease real, dimension(:,:), allocatable :: g33_chease real, dimension(:,:), allocatable :: b_chease real, dimension(:,:), allocatable :: dbdpsi_chease real, dimension(:,:), allocatable :: dbdchi_chease real, dimension(:,:), allocatable :: dpsidr_chease real, dimension(:,:), allocatable :: dpsidz_chease real, dimension(:,:), allocatable :: dchidr_chease real, dimension(:,:), allocatable :: dchidz_chease real, dimension(:,:), allocatable :: jacobian_chease real, dimension(:,:), allocatable :: r_chease real, dimension(:,:), allocatable :: z_chease contains subroutine read_infile(filename) implicit none character(len=80), intent(in) :: filename open(infile,file= filename) read(infile, *) read(infile, *) npsi_chease write(*, *) npsi_chease, "<---npsi_chease" read(infile, *) read(infile, *) nchi_chease write(*, *) nchi_chease, "<---nchi_chease" read(infile, *) read(infile, *) r0exp_chease read(infile, *) read(infile, *) b0exp_chease allocate(psi_chease(npsi_chease)) read(infile, *) read(infile, *) psi_chease !write(*, *) psi_chease, "<---psi_chease" allocate(chi_chease(nchi_chease)) read(infile, *) read(infile, *) chi_chease !write(*, *) chi_chease, "<---chi_chease" allocate(rgeom_chease(npsi_chease)) read(infile, *) read(infile, *) rgeom_chease !write(*, *) rgeom_chease, "<---rgeom_chease" allocate(ageom_chease(npsi_chease)) read(infile, *) read(infile, *) ageom_chease !write(*, *) ageom_chease, "<---ageom_chease" allocate(q_chease(npsi_chease)) read(infile, *) read(infile, *) q_chease !write(*, *) q_chease, "<---q_chease" allocate(dqdpsi_chease(npsi_chease)) read(infile, *) read(infile, *) dqdpsi_chease !write(*, *) dqdpsi_chease, "<---dqdpsi_chease" allocate(d2qdpsi2_chease(npsi_chease)) read(infile, *) read(infile, *) d2qdpsi2_chease !write(*, *) d2qdpsi2_chease, "<---d2qdpsi2_chease" allocate(p_chease(npsi_chease)) read(infile, *) read(infile, *) p_chease !write(*, *) p_chease, "<---p_chease" allocate(dpdpsi_chease(npsi_chease)) read(infile, *) read(infile, *) dpdpsi_chease !write(*, *) dpdpsi_chease, "<---dpdpsi_chease" allocate(f_chease(npsi_chease)) read(infile, *) read(infile, *) f_chease !write(*, *) f_chease, "<---f_chease" allocate(fdfdpsi_chease(npsi_chease)) read(infile, *) read(infile, *) fdfdpsi_chease !write(*, *) fdfdpsi_chease, "<---fdfdpsi_chease" allocate(v_chease(npsi_chease)) read(infile, *) read(infile, *) v_chease !write(*, *) v_chease, "<---v_chease" allocate(rho_t_chease(npsi_chease)) read(infile, *) read(infile, *) rho_t_chease !write(*, *) rho_t_chease, "<---rho_t_chease" allocate(shear_chease(npsi_chease)) read(infile, *) read(infile, *) shear_chease !write(*, *) shear_chease, "<---shear_chease" allocate(dsheardpsi_chease(npsi_chease)) read(infile, *) read(infile, *) dsheardpsi_chease !write(*, *) dsheardpsi_chease, "<---dsheardpsi_chease" allocate(kappa_chease(npsi_chease)) read(infile, *) read(infile, *) kappa_chease !write(*, *) kappa_chease, "<---kappa_chease" allocate(delta_lower_chease(npsi_chease)) read(infile, *) read(infile, *) delta_lower_chease !write(*, *) delta_lower_chease, "<---delta_lower_chease" allocate(delta_upper_chease(npsi_chease)) read(infile, *) read(infile, *) delta_upper_chease !write(*, *) delta_upper_chease, "<---delta_upper_chease" allocate(dvdpsi_chease(npsi_chease)) read(infile, *) read(infile, *) dvdpsi_chease !write(*, *) dvdpsi_chease, "<---dvdpsi_chease" allocate(dpsidrhotor_chease(npsi_chease)) read(infile, *) read(infile, *) dpsidrhotor_chease !write(*, *) dpsidrhotor_chease, "<---dpsidrhotor_chease" allocate(gdpsi_av_chease(npsi_chease)) read(infile, *) read(infile, *) gdpsi_av_chease !write(*, *) gdpsi_av_chease, "<---gdpsi_av_chease" allocate(radius_av_chease(npsi_chease)) read(infile, *) read(infile, *) radius_av_chease !write(*, *) radius_av_chease, "<---radius_av_chease" allocate(r_av_chease(npsi_chease)) read(infile, *) read(infile, *) r_av_chease !write(*, *) r_av_chease, "<---r_av_chease" allocate(te_chease(npsi_chease)) read(infile, *) read(infile, *) te_chease !write(*, *) te_chease, "<---te_chease" allocate(dtedpsi_chease(npsi_chease)) read(infile, *) read(infile, *) dtedpsi_chease !write(*, *) dtedpsi_chease, "<---dtedpsi_chease" allocate(ne_chease(npsi_chease)) read(infile, *) read(infile, *) ne_chease !write(*, *) ne_chease, "<---ne_chease" allocate(dnedpsi_chease(npsi_chease)) read(infile, *) read(infile, *) dnedpsi_chease !write(*, *) dnedpsi_chease, "<---dnedpsi_chease" allocate(ti_chease(npsi_chease)) read(infile, *) read(infile, *) ti_chease !write(*, *) ti_chease, "<---ti_chease" allocate(dtidpsi_chease(npsi_chease)) read(infile, *) read(infile, *) dtidpsi_chease !write(*, *) dtidpsi_chease, "<---dtidpsi_chease" allocate(ni_chease(npsi_chease)) read(infile, *) read(infile, *) ni_chease !write(*, *) ni_chease, "<---ni_chease" allocate(dnidpsi_chease(npsi_chease)) read(infile, *) read(infile, *) dnidpsi_chease !write(*, *) dnidpsi_chease, "<---dnidpsi_chease" allocate(zeff_chease(npsi_chease)) read(infile, *) read(infile, *) zeff_chease !write(*, *) zeff_chease, "<---zeff_chease" allocate(signeo_chease(npsi_chease)) read(infile, *) read(infile, *) signeo_chease !write(*, *) signeo_chease, "<---signeo_chease" allocate(jbsbav_chease(npsi_chease)) read(infile, *) read(infile, *) jbsbav_chease !write(*, *) jbsbav_chease, "<---jbsbav_chease" allocate(g11_chease(npsi_chease,nchi_chease)) read(infile, *) read(infile, *) g11_chease !write(*, *) g11_chease, "<---g11_chease" allocate(g12_chease(npsi_chease,nchi_chease)) read(infile, *) read(infile, *) g12_chease !write(*, *) g12_chease, "<---g12_chease" allocate(g22_chease(npsi_chease,nchi_chease)) read(infile, *) read(infile, *) g22_chease !write(*, *) g22_chease, "<---g22_chease" allocate(g33_chease(npsi_chease,nchi_chease)) read(infile, *) read(infile, *) g33_chease !write(*, *) g33_chease, "<---g33_chease" allocate(b_chease(npsi_chease,nchi_chease)) read(infile, *) read(infile, *) b_chease !write(*, *) b_chease, "<---b_chease" allocate(dbdpsi_chease(npsi_chease,nchi_chease)) read(infile, *) read(infile, *) dbdpsi_chease !write(*, *) dbdpsi_chease, "<---dbdpsi_chease" allocate(dbdchi_chease(npsi_chease,nchi_chease)) read(infile, *) read(infile, *) dbdchi_chease !write(*, *) dbdchi_chease, "<---dbdchi_chease" allocate(dpsidr_chease(npsi_chease,nchi_chease)) read(infile, *) read(infile, *) dpsidr_chease !write(*, *) dpsidr_chease, "<---dpsidr_chease" allocate(dpsidz_chease(npsi_chease,nchi_chease)) read(infile, *) read(infile, *) dpsidz_chease !write(*, *) dpsidz_chease, "<---dpsidz_chease" allocate(dchidr_chease(npsi_chease,nchi_chease)) read(infile, *) read(infile, *) dchidr_chease !write(*, *) dchidr_chease, "<---dchidr_chease" allocate(dchidz_chease(npsi_chease,nchi_chease)) read(infile, *) read(infile, *) dchidz_chease !write(*, *) dchidz_chease, "<---dchidz_chease" allocate(jacobian_chease(npsi_chease,nchi_chease)) read(infile, *) read(infile, *) jacobian_chease !write(*, *) jacobian_chease, "<---jacobian_chease" allocate(r_chease(npsi_chease,nchi_chease)) read(infile, *) read(infile, *) r_chease !write(*, *) r_chease, "<---r_chease" allocate(z_chease(npsi_chease,nchi_chease)) read(infile, *) read(infile, *) z_chease !write(*, *) z_chease, "<---z_chease" end subroutine read_infile subroutine finish deallocate(psi_chease) deallocate(chi_chease) deallocate(rgeom_chease) deallocate(ageom_chease) deallocate(q_chease) deallocate(dqdpsi_chease) deallocate(d2qdpsi2_chease) deallocate(p_chease) deallocate(dpdpsi_chease) deallocate(f_chease) deallocate(fdfdpsi_chease) deallocate(v_chease) deallocate(rho_t_chease) deallocate(shear_chease) deallocate(dsheardpsi_chease) deallocate(kappa_chease) deallocate(delta_lower_chease) deallocate(delta_upper_chease) deallocate(dvdpsi_chease) deallocate(dpsidrhotor_chease) deallocate(gdpsi_av_chease) deallocate(radius_av_chease) deallocate(r_av_chease) deallocate(te_chease) deallocate(dtedpsi_chease) deallocate(ne_chease) deallocate(dnedpsi_chease) deallocate(ti_chease) deallocate(dtidpsi_chease) deallocate(ni_chease) deallocate(dnidpsi_chease) deallocate(zeff_chease) deallocate(signeo_chease) deallocate(jbsbav_chease) deallocate(g11_chease) deallocate(g12_chease) deallocate(g22_chease) deallocate(g33_chease) deallocate(b_chease) deallocate(dbdpsi_chease) deallocate(dbdchi_chease) deallocate(dpsidr_chease) deallocate(dpsidz_chease) deallocate(dchidr_chease) deallocate(dchidz_chease) deallocate(jacobian_chease) deallocate(r_chease) deallocate(z_chease) end subroutine finish end module read_chease !program test ! use read_chease ! call read_infile("ogyropsi.dat") ! !end program test
{"hexsha": "faa7e25d1ed5c7969e3438fd153fda8edcae896d", "size": 14123, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "geo/read_chease.f90", "max_stars_repo_name": "nicolaschristen/gs2_ndc_git", "max_stars_repo_head_hexsha": "ec0295f0726d7e2f372a02cadad0e375f7cd1c31", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "geo/read_chease.f90", "max_issues_repo_name": "nicolaschristen/gs2_ndc_git", "max_issues_repo_head_hexsha": "ec0295f0726d7e2f372a02cadad0e375f7cd1c31", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "geo/read_chease.f90", "max_forks_repo_name": "nicolaschristen/gs2_ndc_git", "max_forks_repo_head_hexsha": "ec0295f0726d7e2f372a02cadad0e375f7cd1c31", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.1130063966, "max_line_length": 61, "alphanum_fraction": 0.6880974297, "num_tokens": 4335}
/********************************************************************** * Copyright (c) 2008-2014, Alliance for Sustainable Energy. * All rights reserved. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********************************************************************/ #include "OSVersionAPI.hpp" #include <model/Schedule.hpp> #include <utilities/idf/IdfFile.hpp> #include <utilities/idd/IddFile.hpp> #include <utilities/idd/IddFileAndFactoryWrapper.hpp> #include <utilities/core/Compare.hpp> #include <utilities/core/Logger.hpp> #include <utilities/core/StringStreamLogSink.hpp> #include <boost/functional.hpp> #include <map> namespace openstudio { class ProgressBar; namespace model { class Model; class Component; } namespace osversion { /** This class updates OpenStudio Models and Components to the latest version of OpenStudio. It * must be maintained to keep everything working. The developer who is wrapping up the current * release and starting the next one should: * * <ol> * <li> Run openstudio_osversion_tests.exe and verify that all the tests pass. </li> * <li> Create the release branch. </li> * <li> Copy the latest resources/osversion folder from the build directory to the source * directory, add those files to the resources/CMakeLists.txt, and commit to trunk. </li> * <li> Increment the OpenStudio version in CMakeLists.txt </li> * <li> Increment the OpenStudio version in openstudiocore/resources/model/OpenStudio.idd (at the * top and in the \\default of OS:Version's Version Identifier field). </li> * <li> Register a trivial update method for the new version in the constructor. Further * instructions are provided in the cpp code for VersionTranslator::VersionTranslator. </li> * <li> Add the just-branched-for-release version number to m_startVersions, also in the * constructor. </li> * </ol> * * To support the current iteration, developers should: * * <ol> * <li> Create a non-trivial update method upon the first IDD or other change that should result * in data changes to models of the earlier vintages. </li> * <li> Add to this non-trivial update method should any other such changes occur during the * iteration. </li> * <li> Feel free to just log warnings and errors if the desirable changes cannot be reliably * completed at the data (IDF) level. Such messages could prompt the user to take specific * actions in the OpenStudio Application once they have a nominally valid (updated) model. </li> * </ol> * */ class OSVERSION_API VersionTranslator { public: /** @name Constructors and Destructors */ //@{ VersionTranslator(); //@} /** @name Actions * * Update files on disk to the current version of OpenStudio. */ //@{ /** Returns a current-version OpenStudio Model, if possible. The file at pathToOldOsm must * be an osm of version 0.7.0 or later. */ boost::optional<model::Model> loadModel(const openstudio::path& pathToOldOsm, ProgressBar* progressBar = NULL); /** \overload */ boost::optional<model::Model> loadModel(std::istream& is, ProgressBar* progressBar = NULL); /** Returns a current-version OpenStudio Component, if possible. The file at pathToOldOsc * must be an osc of version 0.7.0 or later. */ boost::optional<model::Component> loadComponent(const openstudio::path& pathToOldOsc, ProgressBar* progressBar = NULL); /** \overload */ boost::optional<model::Component> loadComponent(std::istream& is, ProgressBar* progressBar = NULL); //@} /** @name Queries * * Access warnings, errors, and other information about the last translation process. All this * data is cleared by subsequent calls to loadModel or loadComponent. */ //@{ /** Get the version of the loaded file. Is 0.0.0 before loadModel or loadComponent is called. * 0.7.0 is the default if no Version object is found in the file. */ VersionString originalVersion() const; /** Get warning messages generated by the last translation. */ std::vector<LogMessage> warnings() const; /** Get error messages generated by the last translation. */ std::vector<LogMessage> errors() const; /** Returns objects that were removed from the model because the object type or particular use * has been deprecated. */ std::vector<IdfObject> deprecatedObjects() const; /** Returns objects that were removed from the model because there is not a straightforward, * unique way to upgrade them. */ std::vector<IdfObject> untranslatedObjects() const; /** Returns objects that were added to the model to make it work properly in the latest * version of OpenStudio. */ std::vector<IdfObject> newObjects() const; /** Returns an original object, new object pair for objects that have been significantly * refactored. */ std::vector< std::pair<IdfObject,IdfObject> > refactoredObjects() const; //@} private: REGISTER_LOGGER("openstudio.osversion.VersionTranslator"); typedef boost::function<std::string (VersionTranslator*, const IdfFile&, const IddFileAndFactoryWrapper& )> OSVersionUpdater; std::map<VersionString, OSVersionUpdater> m_updateMethods; std::vector<VersionString> m_startVersions; VersionString m_originalVersion; std::map<VersionString, IdfFile> m_map; StringStreamLogSink m_logSink; std::vector<IdfObject> m_deprecated, m_untranslated, m_new; std::vector< std::pair<IdfObject,IdfObject> > m_refactored; int m_nObjectsStart; int m_nObjectsFinalIdf; int m_nObjectsFinalModel; bool m_isComponent; boost::optional<model::Model> updateVersion(std::istream& is, bool isComponent, ProgressBar* progressBar = NULL); void initializeMap(std::istream& is); IddFileAndFactoryWrapper getIddFile(const VersionString& version); void update(const VersionString& startVersion); std::string defaultUpdate(const IdfFile& idf, const IddFileAndFactoryWrapper& targetIdd); std::string update_0_7_1_to_0_7_2(const IdfFile& idf_0_7_1, const IddFileAndFactoryWrapper& idd_0_7_2); std::string update_0_7_2_to_0_7_3(const IdfFile& idf_0_7_2, const IddFileAndFactoryWrapper& idd_0_7_3); std::string update_0_7_3_to_0_7_4(const IdfFile& idf_0_7_3, const IddFileAndFactoryWrapper& idd_0_7_4); std::string update_0_9_1_to_0_9_2(const IdfFile& idf_0_9_1, const IddFileAndFactoryWrapper& idd_0_9_2); std::string update_0_9_5_to_0_9_6(const IdfFile& idf_0_9_5, const IddFileAndFactoryWrapper& idd_0_9_6); std::string update_0_9_6_to_0_10_0(const IdfFile& idf_0_9_6, const IddFileAndFactoryWrapper& idd_0_10_0); std::string update_0_11_0_to_0_11_1(const IdfFile& idf_0_11_0, const IddFileAndFactoryWrapper& idd_0_11_1); std::string update_0_11_1_to_0_11_2(const IdfFile& idf_0_11_1, const IddFileAndFactoryWrapper& idd_0_11_2); std::string update_0_11_4_to_0_11_5(const IdfFile& idf_0_11_4, const IddFileAndFactoryWrapper& idd_0_11_5); std::string update_0_11_5_to_0_11_6(const IdfFile& idf_0_11_5, const IddFileAndFactoryWrapper& idd_0_11_6); std::string update_1_0_1_to_1_0_2(const IdfFile& idf_1_0_1, const IddFileAndFactoryWrapper& idd_1_0_2); std::string update_1_0_2_to_1_0_3(const IdfFile& idf_1_0_2, const IddFileAndFactoryWrapper& idd_1_0_3); std::string update_1_2_2_to_1_2_3(const IdfFile& idf_1_2_2, const IddFileAndFactoryWrapper& idd_1_2_3); IdfObject updateUrlField_0_7_1_to_0_7_2(const IdfObject& object, unsigned index); struct InterobjectIssueInformation { VersionString endVersion; InterobjectIssueInformation(const VersionString& t_endVersion) : endVersion(t_endVersion) {} virtual ~InterobjectIssueInformation() {} }; std::vector< boost::shared_ptr<InterobjectIssueInformation> > fixInterobjectIssuesStage1( model::Model& model, const VersionString& startVersion); void fixInterobjectIssuesStage2( model::Model& model, std::vector<boost::shared_ptr<InterobjectIssueInformation> >& stage1Information); struct InterobjectIssueInformation_0_8_3_to_0_8_4 : public InterobjectIssueInformation { std::vector<model::Schedule> schedules; std::vector< std::vector<model::ModelObject> > users; std::vector< std::vector< std::vector<unsigned> > > indices; std::vector< std::vector< std::vector<model::ScheduleTypeKey> > > keys; std::vector< std::vector<IdfObject> > componentDataObjects; std::vector<IdfObject> originalUsers; std::set<model::ModelObject> refactoredUsers; InterobjectIssueInformation_0_8_3_to_0_8_4() : InterobjectIssueInformation(VersionString("0.8.4")) {} virtual ~InterobjectIssueInformation_0_8_3_to_0_8_4() {} }; boost::shared_ptr<InterobjectIssueInformation> fixInterobjectIssuesStage1_0_8_3_to_0_8_4( model::Model& model); void fixInterobjectIssuesStage2_0_8_3_to_0_8_4( model::Model& model, boost::shared_ptr<InterobjectIssueInformation>& info); }; } // osversion } // openstudio
{"hexsha": "a65edd6770215b12090137fddf2988abf68b4059", "size": 10161, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "openstudiocore/src/osversion/VersionTranslator.hpp", "max_stars_repo_name": "ORNL-BTRIC/OpenStudio", "max_stars_repo_head_hexsha": "878f94bebf6f025445d1373e8b2304ececac16d8", "max_stars_repo_licenses": ["blessing"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "openstudiocore/src/osversion/VersionTranslator.hpp", "max_issues_repo_name": "ORNL-BTRIC/OpenStudio", "max_issues_repo_head_hexsha": "878f94bebf6f025445d1373e8b2304ececac16d8", "max_issues_repo_licenses": ["blessing"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "openstudiocore/src/osversion/VersionTranslator.hpp", "max_forks_repo_name": "ORNL-BTRIC/OpenStudio", "max_forks_repo_head_hexsha": "878f94bebf6f025445d1373e8b2304ececac16d8", "max_forks_repo_licenses": ["blessing"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.16, "max_line_length": 128, "alphanum_fraction": 0.7021946659, "num_tokens": 2697}
function get_devices(sys::PowerSystems.PowerSystem,category::Type{PowerSystems.ThermalGen}) return sys.generators.thermal end function get_devices(sys::PowerSystems.PowerSystem,category::Type{PowerSystems.RenewableGen}) return sys.generators.renewable end function get_devices(sys::PowerSystems.PowerSystem,category::Type{PowerSystems.HydroGen}) return sys.generators.hydro end function get_devices(sys::PowerSystems.PowerSystem,category::Type{PowerSystems.PowerSystems.ElectricLoad}) return sys.loads end function constructservice!(m::JuMP.AbstractModel, service::PowerSystems.StaticReserve, category_formulation::Type{PS.RampLimitedReserve},devices::Array{NamedTuple{(:device, :formulation), Tuple{DataType,DataType}}}, sys::PowerSystems.PowerSystem; args...) dev_set = Array{NamedTuple{(:device,:formulation),Tuple{PowerSystems.PowerSystemDevice,DataType}}}([]) for category in devices if category != nothing D = get_devices(sys,category.device) for d in D if d in service.contributingdevices push!(dev_set,(device=d,formulation=category.formulation)) end end end end if !isempty(dev_set) p_rsv = reservevariables(m, dev_set, sys.time_periods) m = PS.reserves(m, dev_set, service, sys.time_periods) end return m end
{"hexsha": "bbaf5ee401fdc88ddf14c30360ed16a4887b157b", "size": 1390, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/component_constructors/services_constructor.jl", "max_stars_repo_name": "gitter-badger/PowerSimulations.jl", "max_stars_repo_head_hexsha": "608671297c4b813505aef4073932eae3d8875af6", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/component_constructors/services_constructor.jl", "max_issues_repo_name": "gitter-badger/PowerSimulations.jl", "max_issues_repo_head_hexsha": "608671297c4b813505aef4073932eae3d8875af6", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/component_constructors/services_constructor.jl", "max_forks_repo_name": "gitter-badger/PowerSimulations.jl", "max_forks_repo_head_hexsha": "608671297c4b813505aef4073932eae3d8875af6", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.75, "max_line_length": 255, "alphanum_fraction": 0.7201438849, "num_tokens": 328}
#import string import multiprocessing from functools import partial import sys import numpy as np import pandas as pd from sqlalchemy import create_engine from sqlalchemy_utils import database_exists import pickle from sklearn.model_selection import train_test_split from sklearn.pipeline import Pipeline from sklearn.svm import SVC from sklearn.metrics import classification_report, accuracy_score from sklearn.model_selection import GridSearchCV from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.multioutput import MultiOutputClassifier from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer from sklearn.utils import parallel_backend import nltk from nltk.tokenize import RegexpTokenizer from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer from nltk.tokenize import word_tokenize #nltk.download(['punkt', 'wordnet', 'averaged_perceptron_tagger', 'stopwords']) stop_words = set(stopwords.words('english')) def load_data(database_filepath): ''' Input: database_filename(str): Filepath of the database. Output: X(numpy.ndarray): Array of input features. y(numpy.ndarray): Output labels, classes. ''' try: database_exists(f'sqlite:///{database_filepath}') engine = create_engine(f'sqlite:///{database_filepath}') connection = engine.connect() df = pd.read_sql_table("messages_categories", con=connection) labels = df.iloc[:, 4:].columns X = df["message"].values y = df.iloc[:, 4:].values connection.close() return X, y, labels except: print("Database does not exist! Check your database_filepath!") def tokenize(text): ''' Normalize, lemmantize and tokenize text messages. Input: text(str): Text messages. Output: clean_tokens(str): Normalize, lemmantize and tokenize text messages. ''' # normalize text normalized_text = text.lower().strip() # tokenize text tokens = word_tokenize(normalized_text) # lemmantize text and remove stop words and non alpha numericals clean_tokens = [] for token in tokens: lemmatizer = WordNetLemmatizer() clean_token = lemmatizer.lemmatize(token) if clean_token not in stop_words and clean_token.isalpha(): clean_tokens.append(clean_token) return clean_tokens def build_model(): '''Build a Machine Learning pipeline using TfidfTransformer, RandomForestClassifier and GridSearchCV Input: None Output: cv(sklearn.model_selection._search.GridSearchCV): Results of GridSearchCV ''' text_clf = Pipeline([ ('vect', CountVectorizer(tokenizer=partial(tokenize))), ('tfidf', TfidfTransformer()), ('clf', MultiOutputClassifier( estimator=RandomForestClassifier(verbose=2))) ]) # parameters = { # 'clf__estimator__max_depth': [4, 6, 10, 12], # 'clf__estimator__n_estimators': [20, 40, 100], # } # grid_fit = GridSearchCV( # estimator=text_clf, # param_grid=parameters, # verbose=3, # cv=2, # n_jobs=-1) return text_clf def evaluate_model(model, X_test, y_test, labels): """ Function that will predict on X_test messages using build_model() function that transforms messages, extract features and trains a classifer. Input: model(sklearn.model_selection._search.GridSearchCV): Trained model. X_test(numpy.ndarray): Numpy array of messages that based on which trained model will predict. y_test(numpy.ndarray): Numpy array of classes that will be used to validate model predictions. labels(pandas.core.indexes.base.Index): Target labels for a multiclass prediction. Output: df(pandas.core.frame.DataFrame): Dataframe that contains report showing the main classification metrics. """ y_pred = model.predict(X_test) df = pd.DataFrame(classification_report( y_test, y_pred, target_names=labels, output_dict=True)).T.reset_index() df = df.rename(columns={"index": "labels"}) return df def save_model(model, filepath): '''Saves the model to defined filepath Input model(sklearn.model_selection._search.GridSearchCV): The model to be saved. model_filepath(str): Filepath where the model will be saved. Output This function will save the model as a pickle file on the defined filepath. ''' temporary_pickle = open(filepath, 'wb') pickle.dump(model, temporary_pickle) temporary_pickle.close() print("Model has been succesfully saved!") def main(): if len(sys.argv) == 3: database_filepath, model_filepath = sys.argv[1:] print('Loading data...\n DATABASE: {}'.format(database_filepath)) X, Y, category_names = load_data(database_filepath) X_train, X_test, Y_train, Y_test = train_test_split( X, Y, test_size=0.2) print('Building model...') model = build_model() print('Training model...') model.fit(X_train, Y_train) print('Evaluating model...') evaluate_model(model, X_test, Y_test, category_names) print('Saving model...\n MODEL: {}'.format(model_filepath)) save_model(model, model_filepath) print('Trained model saved!') else: print('Please provide the filepath of the disaster messages database ' 'as the first argument and the filepath of the pickle file to ' 'save the model to as the second argument. \n\nExample: python ' 'train_classifier.py ../data/DisasterResponse.db classifier.pkl') # https://github.com/scikit-learn/scikit-learn/issues/10533 if __name__ == '__main__': main()
{"hexsha": "c4aa4bb94794b0601be0ff27d7cf962e237ba023", "size": 5932, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/train_classifier.py", "max_stars_repo_name": "baky0905/disaster-response-pipeline", "max_stars_repo_head_hexsha": "09ff91230037c21fea69a101c7057b30accb56fd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "models/train_classifier.py", "max_issues_repo_name": "baky0905/disaster-response-pipeline", "max_issues_repo_head_hexsha": "09ff91230037c21fea69a101c7057b30accb56fd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "models/train_classifier.py", "max_forks_repo_name": "baky0905/disaster-response-pipeline", "max_forks_repo_head_hexsha": "09ff91230037c21fea69a101c7057b30accb56fd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-04-29T17:08:37.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-29T19:22:56.000Z", "avg_line_length": 32.0648648649, "max_line_length": 112, "alphanum_fraction": 0.681220499, "include": true, "reason": "import numpy", "num_tokens": 1278}
#!/usr/bin/env python3 import numpy as np from vchamtools.vcham import vcham from itertools import combinations # TODO: # load and save H from disk # add mode numbers to H class # enables also better plotting! make plotting package for H def op_parameter_section(H, states=None): """states list includes all states to output, beginning with 1""" # if len(mode_numbers) != H.n_modes: # raise ValueError('len(mode_numbers) != H.n_modes') n_states = len(H.E) if states is not None: if len(states) > n_states: # too many states given raise ValueError('len(states) > n_states') if len(states) != len(set(states)): # duplicate entries in states list # remove or throw error? raise ValueError('duplicate entries in states list') states.sort() else: # no states give -> include all states states = list(range(1, n_states+1)) # indices of the coupling elements of the selected states in c_lambda and c_eta array # coupling_element_indices = [vcham.get_index_of_triangular_element_in_flattened_matrix(*state_pair, n_states) for state_pair in combinations(states, 2)] # example: states=[1, 3, 8] --> indices = 1, 6, 25 --> coupling_element_indices = {1: '12', 6: '13', 25: '23'} coupling_element_indices = {vcham.get_index_of_triangular_element_in_flattened_matrix(*state_pair, n_states):''.join([str(states.index(i)+1) for i in state_pair]) for state_pair in combinations(states, 2)} str_parameter_section = 'PARAMETER-SECTION\n' # ground state mode frequencies for i, w in enumerate(H.w): str_parameter_section += 'w_' + str(H.modes[i]) + ' = ' + str(w).replace('e', 'd') + ', ev\n' # vertical excitation energies for i in range(len(states)): str_parameter_section += 'E_' + str(i+1) + ' = ' + str(H.E[states[i]-1]).replace('e', 'd') + ', ev\n' it = np.nditer(H.c_kappa, flags=['multi_index'], order='C') while not it.finished: if not np.isclose(it.value, 0.) and it.multi_index[0]+1 in states: str_value = str(it.value).replace('e', 'd') str_parameter_section += 'kappa' + str(states.index(it.multi_index[0]+1)+1) + '_' + str(H.modes[it.multi_index[1]]) + ' = ' + str_value + ', ev\n' it.iternext() it = np.nditer(H.c_gamma, flags=['multi_index'], order='C') while not it.finished: if not np.isclose(it.value, 0.) and it.multi_index[0]+1 in states: str_value = str(it.value).replace('e', 'd') str_parameter_section += 'gamma' + str(states.index(it.multi_index[0]+1)+1) + '_' + str(H.modes[it.multi_index[1]]) + str(H.modes[it.multi_index[2]]) + ' = ' + str_value + ', ev\n' it.iternext() it = np.nditer(H.c_rho, flags=['multi_index'], order='C') while not it.finished: if not np.isclose(it.value, 0.) and it.multi_index[0]+1 in states: str_value = str(it.value).replace('e', 'd') str_parameter_section += 'rho' + str(states.index(it.multi_index[0]+1)+1) + '_' + str(H.modes[it.multi_index[1]]) + str(H.modes[it.multi_index[2]]) + ' = ' + str_value + ', ev\n' it.iternext() it = np.nditer(H.c_sigma, flags=['multi_index'], order='C') while not it.finished: if not np.isclose(it.value, 0.) and it.multi_index[0]+1 in states: str_value = str(it.value).replace('e', 'd') str_parameter_section += 'sigma' + str(states.index(it.multi_index[0]+1)+1) + '_' + str(H.modes[it.multi_index[1]]) + str(H.modes[it.multi_index[2]]) + ' = ' + str_value + ', ev\n' it.iternext() it = np.nditer(H.c_lambda, flags=['multi_index'], order='C') while not it.finished: if not np.isclose(it.value, 0.) and it.multi_index[0] in coupling_element_indices: str_value = str(it.value).replace('e', 'd') str_parameter_section += 'lambda' + str(coupling_element_indices[it.multi_index[0]]) + '_' + str(H.modes[it.multi_index[1]]) + ' = ' + str_value + ', ev\n' it.iternext() it = np.nditer(H.c_eta, flags=['multi_index'], order='C') while not it.finished: if not np.isclose(it.value, 0.) and it.multi_index[0] in coupling_element_indices: str_value = str(it.value).replace('e', 'd') str_parameter_section += 'eta' + str(coupling_element_indices[it.multi_index[0]]) + '_' + str(H.modes[it.multi_index[1]]) + str(H.modes[it.multi_index[2]]) + ' = ' + str_value + ', ev\n' it.iternext() str_parameter_section += 'end-parameter-section' return str_parameter_section def op_hamiltonian_section(H, states=None): """states list includes all states to output, beginning with 1""" # assumptions: first entry in states list = photoexcited state # add one state = last state = ground state # if len(mode_numbers) != H.n_modes: # raise ValueError('len(mode_numbers) != H.n_modes') n_states = len(H.E) if states is not None: if len(states) > n_states: # too many states given raise ValueError('len(states) > n_states') if len(states) != len(set(states)): # duplicate entries in states list # remove or throw error? raise ValueError('duplicate entries in states list') states.sort() else: # no states give -> include all states states = list(range(1, n_states+1)) # indices of the coupling elements of the selected states in c_lambda and c_eta array # coupling_element_indices = [vcham.get_index_of_triangular_element_in_flattened_matrix(*state_pair, n_states) for state_pair in combinations(states, 2)] # example: states=[1, 3, 8] --> indices = 1, 6, 25 --> coupling_element_indices = {1: '12', 6: '13', 25: '23'} coupling_element_indices = {vcham.get_index_of_triangular_element_in_flattened_matrix(*state_pair, n_states):''.join([str(states.index(i)+1) for i in state_pair]) for state_pair in combinations(states, 2)} str_hamiltonian_section = 'HAMILTONIAN-SECTION\n' # table header str_header = ' modes\t\t| ' for i in H.modes: str_header += 'Q' + str(i) + '\t| ' str_header += 'el\t| Time\t' str_header = str_header.expandtabs(8) delimiter = ''.join(['-']*len(str_header)) str_hamiltonian_section += delimiter + '\n' + str_header + '\n' + delimiter + '\n' # kinetic energy terms for i in H.modes: # str_current_line = '-0.5*w_' + str(i) + '\t| ' str_current_line = 'w_' + str(i) + '\t\t| ' for j in H.modes: if i == j: # str_current_line += 'dq^2\t| ' str_current_line += 'KE\t| ' else: str_current_line += '1\t| ' str_current_line += '1\t| 1\n' str_hamiltonian_section += str_current_line.expandtabs(8) str_hamiltonian_section += '\n' # potential energy terms for i in H.modes: str_current_line = '0.5*w_' + str(i) + '\t| ' for j in H.modes: if i == j: str_current_line += 'q^2\t| ' else: str_current_line += '1\t| ' str_current_line += '1\t| 1\n' str_hamiltonian_section += str_current_line.expandtabs(8) str_hamiltonian_section += '\n' # vertical excitation energies for i in range(1, len(states)+1): str_current_line = 'E_' + str(i) + '\t\t| ' for _ in H.modes: str_current_line += '1\t| ' str_current_line += 'S' + str(i) + '&' + str(i) + '\t| 1\n' str_hamiltonian_section += str_current_line.expandtabs(8) str_hamiltonian_section += '\n' # kappa coefficients it = np.nditer(H.c_kappa, flags=['multi_index'], order='C') while not it.finished: str_current_line = '' if not np.isclose(it.value, 0.) and it.multi_index[0]+1 in states: str_current_line += 'kappa' + str(states.index(it.multi_index[0]+1)+1) + '_' + str(H.modes[it.multi_index[1]]) + '\t| ' for i in range(len(H.modes)): if i == it.multi_index[1]: str_current_line += 'q\t| ' else: str_current_line += '1\t| ' str_current_line += 'S' + str(states.index(it.multi_index[0]+1)+1) + '&' + str(states.index(it.multi_index[0]+1)+1) + '\t| 1\n' str_hamiltonian_section += str_current_line.expandtabs(8) it.iternext() str_hamiltonian_section += '\n' # gamma coefficients it = np.nditer(H.c_gamma, flags=['multi_index'], order='C') while not it.finished: str_current_line = '' if not np.isclose(it.value, 0.) and it.multi_index[0]+1 in states: str_current_line += 'gamma' + str(states.index(it.multi_index[0]+1)+1) + '_' + str(H.modes[it.multi_index[1]]) + str(H.modes[it.multi_index[2]]) + '\t| ' for i in range(len(H.modes)): if i == it.multi_index[1] and i == it.multi_index[2]: str_current_line += 'q^2\t| ' elif i == it.multi_index[1] or i == it.multi_index[2]: str_current_line += 'q\t| ' else: str_current_line += '1\t| ' str_current_line += 'S' + str(states.index(it.multi_index[0]+1)+1) + '&' + str(states.index(it.multi_index[0]+1)+1) + '\t| 1\n' str_hamiltonian_section += str_current_line.expandtabs(8) it.iternext() str_hamiltonian_section += '\n' # rho coefficients it = np.nditer(H.c_rho, flags=['multi_index'], order='C') while not it.finished: str_current_line = '' if not np.isclose(it.value, 0.) and it.multi_index[0]+1 in states: str_current_line += 'rho' + str(states.index(it.multi_index[0]+1)+1) + '_' + str(H.modes[it.multi_index[1]]) + str(H.modes[it.multi_index[2]]) + '\t| ' for i in range(len(H.modes)): if i == it.multi_index[1] and i == it.multi_index[2]: str_current_line += 'q^3\t| ' elif i == it.multi_index[2]: str_current_line += 'q^2\t| ' elif i == it.multi_index[1]: str_current_line += 'q\t| ' else: str_current_line += '1\t| ' str_current_line += 'S' + str(states.index(it.multi_index[0]+1)+1) + '&' + str(states.index(it.multi_index[0]+1)+1) + '\t| 1\n' str_hamiltonian_section += str_current_line.expandtabs(8) it.iternext() str_hamiltonian_section += '\n' # sigma coefficients it = np.nditer(H.c_sigma, flags=['multi_index'], order='C') while not it.finished: str_current_line = '' if not np.isclose(it.value, 0.) and it.multi_index[0]+1 in states: str_current_line += 'sigma' + str(states.index(it.multi_index[0]+1)+1) + '_' + str(H.modes[it.multi_index[1]]) + str(H.modes[it.multi_index[2]]) + '\t| ' for i in range(len(H.modes)): if i == it.multi_index[1] and i == it.multi_index[2]: str_current_line += 'q^4\t| ' elif i == it.multi_index[1] or i == it.multi_index[2]: str_current_line += 'q^2\t| ' else: str_current_line += '1\t| ' str_current_line += 'S' + str(states.index(it.multi_index[0]+1)+1) + '&' + str(states.index(it.multi_index[0]+1)+1) + '\t| 1\n' str_hamiltonian_section += str_current_line.expandtabs(8) it.iternext() str_hamiltonian_section += '\n' # # lambda coefficients it = np.nditer(H.c_lambda, flags=['multi_index'], order='C') while not it.finished: str_current_line = '' if not np.isclose(it.value, 0.) and it.multi_index[0] in coupling_element_indices: str_current_line += 'lambda' + str(coupling_element_indices[it.multi_index[0]]) + '_' + str(H.modes[it.multi_index[1]]) + '\t| ' for i in range(len(H.modes)): if i == it.multi_index[1]: str_current_line += 'q\t| ' else: str_current_line += '1\t| ' str_current_line += 'S' + coupling_element_indices[it.multi_index[0]][:1] + '&' + coupling_element_indices[it.multi_index[0]][1:] + '\t| 1\n' str_hamiltonian_section += str_current_line.expandtabs(8) it.iternext() str_hamiltonian_section += '\n' it = np.nditer(H.c_eta, flags=['multi_index'], order='C') while not it.finished: str_current_line = '' if not np.isclose(it.value, 0.) and it.multi_index[0] in coupling_element_indices: str_current_line += 'eta' + str(coupling_element_indices[it.multi_index[0]]) + '_' + str(H.modes[it.multi_index[1]]) + str(H.modes[it.multi_index[2]]) + '\t| ' for i in range(len(H.modes)): if i == it.multi_index[1] and i == it.multi_index[2]: str_current_line += 'q^3\t| ' elif i == it.multi_index[2]: str_current_line += 'q^2\t| ' elif i == it.multi_index[1]: str_current_line += 'q\t| ' else: str_current_line += '1\t| ' str_current_line += 'S' + coupling_element_indices[it.multi_index[0]][:1] + '&' + coupling_element_indices[it.multi_index[0]][1:] + '\t| 1\n' str_hamiltonian_section += str_current_line.expandtabs(8) it.iternext() str_hamiltonian_section += '\n' # laser pulse str_current_line = '-1.0*e0\t\t| ' for _ in H.modes: str_current_line += '1\t| ' str_current_line += 'S1&2' #+ str(len(states)+1) str_current_line += '\t| carrier*env\n' str_hamiltonian_section += str_current_line.expandtabs(8) str_hamiltonian_section += delimiter + '\n' + 'end-hamiltonian-section' return str_hamiltonian_section
{"hexsha": "6df92e0e21793824058bc1e4878f377a1dc4bbd7", "size": 14191, "ext": "py", "lang": "Python", "max_stars_repo_path": "vchamtools/vcham/mctdh.py", "max_stars_repo_name": "oh-fv/vchamtools", "max_stars_repo_head_hexsha": "80ca39931b47db00cb9ed70e04687fabade20610", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "vchamtools/vcham/mctdh.py", "max_issues_repo_name": "oh-fv/vchamtools", "max_issues_repo_head_hexsha": "80ca39931b47db00cb9ed70e04687fabade20610", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "vchamtools/vcham/mctdh.py", "max_forks_repo_name": "oh-fv/vchamtools", "max_forks_repo_head_hexsha": "80ca39931b47db00cb9ed70e04687fabade20610", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-01-06T22:02:56.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-06T22:02:56.000Z", "avg_line_length": 51.0467625899, "max_line_length": 210, "alphanum_fraction": 0.5784652244, "include": true, "reason": "import numpy", "num_tokens": 3767}
# Decision Lens API # # No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # # OpenAPI spec version: 1.0 # # Generated by: https://github.com/swagger-api/swagger-codegen.git #' AddUserRequest Class #' #' @field user #' @field message #' #' @importFrom R6 R6Class #' @importFrom jsonlite fromJSON toJSON #' @export AddUserRequest <- R6::R6Class( 'AddUserRequest', public = list( `user` = NULL, `message` = NULL, initialize = function(`user`, `message`){ if (!missing(`user`)) { stopifnot(R6::is.R6(`user`)) self$`user` <- `user` } if (!missing(`message`)) { stopifnot(is.character(`message`), length(`message`) == 1) self$`message` <- `message` } }, toJSON = function() { AddUserRequestObject <- list() if (!is.null(self$`user`)) { AddUserRequestObject[['user']] <- self$`user`$toJSON() } if (!is.null(self$`message`)) { AddUserRequestObject[['message']] <- self$`message` } AddUserRequestObject }, fromJSON = function(AddUserRequestJson) { AddUserRequestObject <- dlensFromJSON(AddUserRequestJson) if (!is.null(AddUserRequestObject$`user`)) { userObject <- User$new() userObject$fromJSON(jsonlite::toJSON(AddUserRequestObject$user, auto_unbox = TRUE)) self$`user` <- userObject } if (!is.null(AddUserRequestObject$`message`)) { self$`message` <- AddUserRequestObject$`message` } }, toJSONString = function() { sprintf( '{ "user": %s, "message": %s }', self$`user`$toJSON(), self$`message` ) }, fromJSONString = function(AddUserRequestJson) { AddUserRequestObject <- dlensFromJSON(AddUserRequestJson) UserObject <- User$new() self$`user` <- UserObject$fromJSON(jsonlite::toJSON(AddUserRequestObject$user, auto_unbox = TRUE)) self$`message` <- AddUserRequestObject$`message` } ) )
{"hexsha": "66057c26cf518d47c72555c692bb004eed8131a4", "size": 2051, "ext": "r", "lang": "R", "max_stars_repo_path": "R/R/AddUserRequest.r", "max_stars_repo_name": "dlens/dlxapi", "max_stars_repo_head_hexsha": "189a6519240ce625d7a9cdb89e305a335d2aa045", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "R/R/AddUserRequest.r", "max_issues_repo_name": "dlens/dlxapi", "max_issues_repo_head_hexsha": "189a6519240ce625d7a9cdb89e305a335d2aa045", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-08-20T17:31:43.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-20T17:31:43.000Z", "max_forks_repo_path": "R/R/AddUserRequest.r", "max_forks_repo_name": "dlens/dlxapi", "max_forks_repo_head_hexsha": "189a6519240ce625d7a9cdb89e305a335d2aa045", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.095890411, "max_line_length": 104, "alphanum_fraction": 0.602145295, "num_tokens": 562}
import streamlit as st import pandas as pd import numpy as np st.title('Weekly Deaths from Pneumonia, Influenza, or COVID-19') DATA_SOURCE = './NCHSData47.csv' YEAR = 'year' WEEK = 'week' OTHER_DEATHS = 'other deaths' PNEUMONIA_DEATHS = 'pneumonia deaths' INFLUENZA_DEATHS = 'influenza deaths' COVID19_DEATHS = 'covid-19 deaths' @st.cache def load_data(nrows): data = pd.read_csv(DATA_SOURCE, nrows=nrows) lowercase = lambda x: str(x).lower() data.rename(lowercase, axis='columns', inplace=True) return data # Load 10,000 rows of data into the dataframe. data = load_data(10000) st.subheader('Deaths per week *') year_filter = st.slider(YEAR, 2014, 2020) filtered_data = data[data[YEAR] == year_filter] st.bar_chart(data=filtered_data.set_index(WEEK)[[OTHER_DEATHS, COVID19_DEATHS, INFLUENZA_DEATHS, PNEUMONIA_DEATHS]], use_container_width=True) st.text('* Data for 2020 is incomplete') st.markdown('[Check Out My Github](https://github.com/greenfructose)')
{"hexsha": "978fd72854d712b21c2e4acd655f29d85d42b9d4", "size": 981, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "greenfructose/PICDeathVisualizationUS", "max_stars_repo_head_hexsha": "d6aa631cd7916c6e20f123bb1b5c36f615d55e0f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main.py", "max_issues_repo_name": "greenfructose/PICDeathVisualizationUS", "max_issues_repo_head_hexsha": "d6aa631cd7916c6e20f123bb1b5c36f615d55e0f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "greenfructose/PICDeathVisualizationUS", "max_forks_repo_head_hexsha": "d6aa631cd7916c6e20f123bb1b5c36f615d55e0f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.7272727273, "max_line_length": 142, "alphanum_fraction": 0.749235474, "include": true, "reason": "import numpy", "num_tokens": 299}
record Foo (param : Nat) where constructor MkFoo num : Int implementation Show (Foo n) where show f = show (param_param f) ++ ", " ++ show (num f) main : IO () main = do let x = MkFoo {param=10} 20 putStrLn (show (record { param_param = 42 } x)) putStrLn (show (record { num = 42 } x))
{"hexsha": "8795f7bc2bd82c98ad1a47e5ec9441956bf67c84", "size": 315, "ext": "idr", "lang": "Idris", "max_stars_repo_path": "test/idris-dev/records002/record002.idr", "max_stars_repo_name": "grin-compiler/idris-grin", "max_stars_repo_head_hexsha": "0514e4d41933143223cb685e23f450dcbf3d5593", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 36, "max_stars_repo_stars_event_min_datetime": "2019-06-06T10:35:31.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T08:48:30.000Z", "max_issues_repo_path": "test/idris-dev/records002/record002.idr", "max_issues_repo_name": "grin-compiler/idris-grin", "max_issues_repo_head_hexsha": "0514e4d41933143223cb685e23f450dcbf3d5593", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2019-11-21T20:45:22.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-02T12:29:23.000Z", "max_forks_repo_path": "test/idris-dev/records002/record002.idr", "max_forks_repo_name": "grin-compiler/idris-grin", "max_forks_repo_head_hexsha": "0514e4d41933143223cb685e23f450dcbf3d5593", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-06-14T13:14:47.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-07T06:20:45.000Z", "avg_line_length": 24.2307692308, "max_line_length": 57, "alphanum_fraction": 0.5904761905, "num_tokens": 95}
""" Example of ordinary Monte Carlo with rejection sampling integrating circle area """ import numpy as np import scipy.stats from matplotlib.colors import Normalize from pylab import *; ion() import probayes as pb # PARAMETERS radius = 1. steps = 10000 # SETUP CIRCLE FUNCTION AND RVs def inside(x, y): return np.array(x**2 + y**2 <= radius**2, dtype=float) def norm2d(x, y, loc=0., scale=radius): return scipy.stats.norm.pdf(x, loc=loc, scale=scale) * \ scipy.stats.norm.pdf(y, loc=loc, scale=scale) xy_range = [-radius, radius] x = pb.RV("x", xy_range) y = pb.RV("y", xy_range) # DEFINE STOCHASTIC PROCESS xy = x & y process = pb.SP(xy) process.set_prob(inside) # DEFINE PROPOSAL DENSITY AND COEFFICIENT VARIABLE process.set_prop(norm2d) process.set_scores(lambda opqr: opqr.p.prob) coef_max = float(norm2d(radius, 1.)) process.set_thresh(np.random.uniform, low=0., high=coef_max) process.set_update(lambda stu: stu.s >= stu.t) # SAMPLE AND SUMMARISE sampler = process.sampler({0}, stop=steps) samples = [sample for sample in sampler] summary = process(samples) expectation = summary.p.size / steps square_area = 4. * radius**2 circle_area = square_area * expectation # PLOT DATA figure() xy_vals = np.array([(sample.p['x'], sample.p['y']) \ for sample in samples]) p_prop = np.array([sample.q.prob for sample in samples]) accept = np.array([sample.u for sample in samples]) reject = np.logical_not(accept) x_accept, x_reject = xy_vals[accept, 0], xy_vals[reject, 0] y_accept, y_reject = xy_vals[accept, 1], xy_vals[reject, 1] c_norm = Normalize(vmin=np.min(p_prop), vmax=np.max(p_prop)) c_map = cm.jet(c_norm(p_prop)) c_accept, c_reject = c_map[accept], c_map[reject] scatter(x_accept, y_accept, color=c_accept, marker='.', linewidths=0.5) scatter(x_reject, y_reject, color=c_reject, marker='x', linewidths=0.5) xlabel('x') ylabel('y') title('Est. circle area={}'.format(circle_area))
{"hexsha": "3c984a0b7efd3122e8314a1d6202a905e5f9e29c", "size": 1931, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/omc/omc_rejection_sp_circle.py", "max_stars_repo_name": "Bhumbra/probayes", "max_stars_repo_head_hexsha": "e5ac193076e4188b9b38c0e18466223ab4d041f7", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/omc/omc_rejection_sp_circle.py", "max_issues_repo_name": "Bhumbra/probayes", "max_issues_repo_head_hexsha": "e5ac193076e4188b9b38c0e18466223ab4d041f7", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/omc/omc_rejection_sp_circle.py", "max_forks_repo_name": "Bhumbra/probayes", "max_forks_repo_head_hexsha": "e5ac193076e4188b9b38c0e18466223ab4d041f7", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.6557377049, "max_line_length": 87, "alphanum_fraction": 0.7167270844, "include": true, "reason": "import numpy,import scipy", "num_tokens": 552}
""" $(SIGNATURES) Write, to the current working directory, a .tex file with the parmater list for the NIPA dataset and parameter values for the TableID parameter. Arguments --------- * `b` -- a [`Bea`](@ref) connection """ function nipa_metadata_tex(b::Bea) url = b.url key = b.key bea_dataset = b.dataset bea_method2 = "GetParameterList" query2 = Dict("UserID" => key, "Method" => bea_method2, "DatasetName" => bea_dataset) response2 = get(url; query = query2) response_json2 = Requests.json(response2) paramlist = response_json2["BEAAPI"]["Results"]["Parameter"] bea_method3 = "GetParameterValues" parameter_name = "TableID" query3 = Dict("UserID" => key, "Method" => bea_method3, "DatasetName" => bea_dataset, "ParameterName" => parameter_name) response3 = get(url; query = query3) response_json3 = Requests.json(response3) paramvals = response_json3["BEAAPI"]["Results"]["ParamValue"] f = open("NipaMetadata.tex", "w") do f # preamble write(f, "\\documentclass[12pt]{article} \n") write(f, "\\usepackage[margin=1in]{geometry} \n") write(f, "\\usepackage{booktabs} \n") write(f, "\\usepackage{parskip} \n") write(f, "\\usepackage{longtable} \n") write(f, "\\usepackage{hyperref} \n") write(f, "\\begin{document} \n \n") write(f, "Date Created: \\today \n\n") # Table of NIPA Dataset parameters write(f, "\\section{Dataset Parameters: $bea_dataset} \n") write(f, "\\begin{tabular}{lp{3.5in}} \n") write(f, "\\toprule \n") for param in paramlist write(f, string("Parameter Name: & \\verb|", param["ParameterName"], "| \\\\ \n")) for (key, value) in param key != "ParameterName" && write(f, string("\\hspace{1em} \\verb|", key, "| & ", value, " \\\\ \n")) end param != paramlist[end] && write(f, "\\midrule \n") end write(f, "\\bottomrule \n") write(f, "\\end{tabular} \n\n") write(f, "\\clearpage \n\n") # Table with parameter values for NIPA TableID write(f, "\\section{Parameter Values: $bea_dataset $parameter_name} \n") write(f, "\\begin{longtable}{cp{5in}} \n") write(f, "\\toprule \n") write(f, "\\verb|TableID| & Description \\\\ \n") write(f, "\\midrule \\endhead \n") for val in paramvals write(f, "\\verb|", string(val["TableID"], "| & ", val["Description"], " \\\\ \n")) end write(f, "\\bottomrule \n") write(f, "\\end{longtable} \n\n") # end document write(f, "\\end{document} \n") end end # function
{"hexsha": "becd5bf87224b4d116df6588915b90775012bca1", "size": 2812, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/nipa_metadata_tex.jl", "max_stars_repo_name": "JuliaPackageMirrors/BeaData.jl", "max_stars_repo_head_hexsha": "dc0e9bef0d59b9110f22c0828a90a28a3c0b5926", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/nipa_metadata_tex.jl", "max_issues_repo_name": "JuliaPackageMirrors/BeaData.jl", "max_issues_repo_head_hexsha": "dc0e9bef0d59b9110f22c0828a90a28a3c0b5926", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/nipa_metadata_tex.jl", "max_forks_repo_name": "JuliaPackageMirrors/BeaData.jl", "max_forks_repo_head_hexsha": "dc0e9bef0d59b9110f22c0828a90a28a3c0b5926", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.0512820513, "max_line_length": 144, "alphanum_fraction": 0.5501422475, "num_tokens": 773}
[STATEMENT] lemma evaluate_iff: "evaluate True env st e r \<longleftrightarrow> (r = eval env e st)" "evaluate_list True env st es r' \<longleftrightarrow> (r' = eval_list env es st)" "evaluate_match True env st v pes v' r \<longleftrightarrow> (r = eval_match env v pes v' st)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. evaluate True env st e r = (r = eval env e st) &&& evaluate_list True env st es r' = (r' = eval_list env es st) &&& evaluate_match True env st v pes v' r = (r = eval_match env v pes v' st) [PROOF STEP] by (metis eval_all evaluate_determ)+
{"llama_tokens": 205, "file": "CakeML_Big_Step_Fun_Equiv", "length": 1}
import os import shutil import torch import logging import numpy as np from scipy.ndimage import gaussian_filter1d from scipy.signal.windows import triang class AverageMeter(object): def __init__(self, name, fmt=':f'): self.name = name self.fmt = fmt self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def __str__(self): fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' return fmtstr.format(**self.__dict__) class ProgressMeter(object): def __init__(self, num_batches, meters, prefix=""): self.batch_fmtstr = self._get_batch_fmtstr(num_batches) self.meters = meters self.prefix = prefix def display(self, batch): entries = [self.prefix + self.batch_fmtstr.format(batch)] entries += [str(meter) for meter in self.meters] logging.info('\t'.join(entries)) @staticmethod def _get_batch_fmtstr(num_batches): num_digits = len(str(num_batches // 1)) fmt = '{:' + str(num_digits) + 'd}' return '[' + fmt + '/' + fmt.format(num_batches) + ']' def query_yes_no(question): """ Ask a yes/no question via input() and return their answer. """ valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False} prompt = " [Y/n] " while True: print(question + prompt, end=':') choice = input().lower() if choice == '': return valid['y'] elif choice in valid: return valid[choice] else: print("Please respond with 'yes' or 'no' (or 'y' or 'n').\n") def prepare_folders(args): folders_util = [args.store_root, os.path.join(args.store_root, args.store_name)] if os.path.exists(folders_util[-1]) and not args.resume and not args.pretrained and not args.evaluate: if query_yes_no('overwrite previous folder: {} ?'.format(folders_util[-1])): shutil.rmtree(folders_util[-1]) print(folders_util[-1] + ' removed.') else: raise RuntimeError('Output folder {} already exists'.format(folders_util[-1])) for folder in folders_util: if not os.path.exists(folder): print(f"===> Creating folder: {folder}") os.mkdir(folder) def adjust_learning_rate(optimizer, epoch, args): lr = args.lr for milestone in args.schedule: lr *= 0.1 if epoch >= milestone else 1. for param_group in optimizer.param_groups: if 'name' in param_group and param_group['name'] == 'noise_sigma': continue param_group['lr'] = lr def save_checkpoint(args, state, is_best, prefix=''): filename = f"{args.store_root}/{args.store_name}/{prefix}ckpt.pth.tar" torch.save(state, filename) if is_best: logging.info("===> Saving current best checkpoint...") shutil.copyfile(filename, filename.replace('pth.tar', 'best.pth.tar')) def calibrate_mean_var(matrix, m1, v1, m2, v2, clip_min=0.1, clip_max=10): if torch.sum(v1) < 1e-10: return matrix if (v1 == 0.).any(): valid = (v1 != 0.) factor = torch.clamp(v2[valid] / v1[valid], clip_min, clip_max) matrix[:, valid] = (matrix[:, valid] - m1[valid]) * torch.sqrt(factor) + m2[valid] return matrix factor = torch.clamp(v2 / v1, clip_min, clip_max) return (matrix - m1) * torch.sqrt(factor) + m2 def get_lds_kernel_window(kernel, ks, sigma): assert kernel in ['gaussian', 'triang', 'laplace'] half_ks = (ks - 1) // 2 if kernel == 'gaussian': base_kernel = [0.] * half_ks + [1.] + [0.] * half_ks kernel_window = gaussian_filter1d(base_kernel, sigma=sigma) / max(gaussian_filter1d(base_kernel, sigma=sigma)) elif kernel == 'triang': kernel_window = triang(ks) else: laplace = lambda x: np.exp(-abs(x) / sigma) / (2. * sigma) kernel_window = list(map(laplace, np.arange(-half_ks, half_ks + 1))) / max(map(laplace, np.arange(-half_ks, half_ks + 1))) return kernel_window
{"hexsha": "f79db7f406a5a81d8c50d2c49617123bb0be1d6f", "size": 4223, "ext": "py", "lang": "Python", "max_stars_repo_path": "imdb-wiki-dir/utils.py", "max_stars_repo_name": "jiawei-ren/BalancedMSE", "max_stars_repo_head_hexsha": "4e1f44fe4cc2518159b1c67159abe3d2b0cea014", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2022-03-30T23:28:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T16:02:08.000Z", "max_issues_repo_path": "imdb-wiki-dir/utils.py", "max_issues_repo_name": "jiawei-ren/BalancedMSE", "max_issues_repo_head_hexsha": "4e1f44fe4cc2518159b1c67159abe3d2b0cea014", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "imdb-wiki-dir/utils.py", "max_forks_repo_name": "jiawei-ren/BalancedMSE", "max_forks_repo_head_hexsha": "4e1f44fe4cc2518159b1c67159abe3d2b0cea014", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.784, "max_line_length": 130, "alphanum_fraction": 0.6047833294, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1115}
```python import numpy as np import matplotlib.pylab as plt from numpy.lib.stride_tricks import sliding_window_view ``` ## Implementing a Function to Compute the Local Binary Pattern Local Binary Pattern (LBP) is a simple yet very efficient texture feature commonly used in image processing. \begin{align} LBP(p) = \sum_{i=0}^{7} [f(n_i) > f(p)]2^i$ \end{align} ```python def local_binary_pattern_list_comprehension(img): neighbors = [(0,2),(1,2),(2,2),\ (2,1), (2,0), (1,0),\ (0,0),(0,1)] H,W = img.shape aux_img = np.zeros((H+2,W+2)) aux_img[1:-1,1:-1] = img return sum((aux_img[x:H+x,y:W+y] > img)*2**i for (i,(x,y)) in enumerate(neighbors)) ``` ```python def local_binary_pattern_loop(img): neighbors = [(0,2),(1,2),(2,2),\ (2,1), (2,0), (1,0),\ (0,0),(0,1)] H,W = img.shape aux_img = np.zeros((H+2,W+2)) aux_img[1:-1,1:-1] = img lbp = np.zeros(img.shape) for i, (x,y) in enumerate(neighbors): lbp+= (aux_img[x:H+x,y:W+y] > img)*2**i return lbp ``` ```python def lbp_sliding_window_view(img): codebook = np.array([[[[64,128,1],\ [32,0,2],\ [16,8,4]]]], dtype = np.uint8) H,W = img.shape aux_img = np.zeros((H+2,W+2)) aux_img[1:-1,1:-1] = img aux_img_view = sliding_window_view(aux_img, (3,3)) lbp = ((aux_img_view > img[:,:,np.newaxis,np.newaxis])*codebook).sum(axis = (2,3)) return lbp ``` ```python from PIL import Image img = np.array(Image.open('../Figures/lena.png').convert('L')) ``` ```python %%timeit local_binary_pattern_loop(img) ``` 8.39 ms ± 414 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) ```python %%timeit lbp_sliding_window_view(img) ``` 24.7 ms ± 1.05 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) ```python %%timeit local_binary_pattern_list_comprehension(img) ``` 8.86 ms ± 450 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
{"hexsha": "65a48e00caada21038013b87b38aa97d72cb5e19", "size": 4471, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "Submissions/lbp_compiled.ipynb", "max_stars_repo_name": "yash30147101/ENEL645", "max_stars_repo_head_hexsha": "15b54b0188be3aa214295d5dd37bb51b39a31e58", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Submissions/lbp_compiled.ipynb", "max_issues_repo_name": "yash30147101/ENEL645", "max_issues_repo_head_hexsha": "15b54b0188be3aa214295d5dd37bb51b39a31e58", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Submissions/lbp_compiled.ipynb", "max_forks_repo_name": "yash30147101/ENEL645", "max_forks_repo_head_hexsha": "15b54b0188be3aa214295d5dd37bb51b39a31e58", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.0463917526, "max_line_length": 117, "alphanum_fraction": 0.4775218072, "converted": true, "num_tokens": 676}
# -*- coding: utf-8 -*- import cv2 import sys import numpy as np import argparse imagePath = "img.png" sx = sy = None previewImage = None if len(sys.argv) < 3: print(""" Usage: python mouseInteractive -i img.png """) sys.exit(-1) if sys.argv[1]=="-i": imagePath = sys.argv[2] def createBlankImage(width, height, color=(255,255,255)): img = np.zeros((height, width, 3), np.uint8) img[:] = color return img def mouseCallback(event,x,y,flags,param): global sx,sy,previewImage if (event == cv2.EVENT_LBUTTONDOWN): print(event,x,y,flags,param) bgrColor = frame[y][x] previewImage = createBlankImage(200,200,bgrColor) hsvColor = cv2.cvtColor(bgrColor.reshape(1,1,3),cv2.COLOR_BGR2HSV) print("bgr->hsv:{}->{}".format(bgrColor,hsvColor.tolist()[0][0])) cv2.circle(frame,(x,y),6, (0,0,255),-1) if (sx != None): cv2.line(frame,(sx,sy),(x,y),(0,0,255),3) sx = x sy = y cv2.imshow('demo', frame) cv2.imshow('preview', previewImage) frame = cv2.imread(imagePath) cv2.namedWindow("demo") cv2.namedWindow("preview") cv2.moveWindow("demo", 1500, 300) cv2.moveWindow("preview", 1500, 80) cv2.imshow('demo', frame) cv2.setMouseCallback('demo', mouseCallback) cv2.waitKey(0) cv2.destroyAllWindows()
{"hexsha": "11bb268e235390ce4ed73761c89b12c27bab8ebc", "size": 1343, "ext": "py", "lang": "Python", "max_stars_repo_path": "mouseInteractive.py", "max_stars_repo_name": "wwwins/OpenCV-Samples", "max_stars_repo_head_hexsha": "6a88c411064d5a8d012fbc2299a6d85b4526785e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mouseInteractive.py", "max_issues_repo_name": "wwwins/OpenCV-Samples", "max_issues_repo_head_hexsha": "6a88c411064d5a8d012fbc2299a6d85b4526785e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mouseInteractive.py", "max_forks_repo_name": "wwwins/OpenCV-Samples", "max_forks_repo_head_hexsha": "6a88c411064d5a8d012fbc2299a6d85b4526785e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.9821428571, "max_line_length": 74, "alphanum_fraction": 0.6202531646, "include": true, "reason": "import numpy", "num_tokens": 404}
#!/usr/bin/env python """ Implements Dozier type algorihms for estimating fire size/temperature. This software is hereby placed in the public domain. Arlindo.daSilva@nasa.gov """ import sys from mxd14 import * from planck import * from math import pi from pylab import pcolor, plot, colorbar, axis, savefig, subplot, clf, \ title, xlabel, ylabel, prctile, median from scipy.stats import kde from scipy.optimize import fixed_point, brent, fmin from matplotlib.mlab import prctile, find from numpy.core.numeric import isscalar igbp_dir = '/nobackup/Emissions/Vegetation/GL_IGBP_INPE' P_SCALE = 1e4 class DOZIER(MxD14_L2): def classic_var(self,tau21=0.864,tau31=0.864,Verbose=False): """ Implements the classic Dozier algorithm. On input, *tau_21* and *tau_31* are the atmospheric transmittances at 4 and 11 microns (MODIS channels 21 and 31). Variational version. """ self.algo = 'dozier' # Compute radiances # ----------------- L21 = B21(self.T21) L31 = B31(self.T31) E21 = B21(self.Tb21) E31 = B31(self.Tb31) N = L21.size if isscalar(tau21): tau21 = tau21 * ones(L21.shape) if isscalar(tau31): tau31 = tau31 * ones(L31.shape) # Use a variational approach - Needs vectorization # ------------------------------------------------ sig21 = 1. sig31 = 1. x0 = [600.,P_SCALE*0.1] # [Tf,p]; p here is normalized; 10 ha/ 1 km2 = 0.1 Tf = - ones(N) p = - ones(N) niter = 200 for i in range(N): rvals = fmin(Jfunc2d, x0, ftol=0.001, maxiter=niter, disp=0, full_output=1, \ args=(L21[i],E21[i],tau21[i],sig21,L31[i],E31[i],tau31[i],sig31)) x = rvals[0] iter = rvals[2] if iter < niter: Tf[i] = x[0] p[i] = 100. * x[1] / P_SCALE # units is % # Quality control # --------------- m = isnan(Tf) == False m = m & (Tf<1800.) m = m & (p>0) & (p<=100) # Add solution as attributes # -------------------------- self.m = m self.Tf = Tf self.p = p # Replace fire size with median size for those fires that did not converge # ------------------------------------------------------------------------ I = (m == False) self.p[I] = median(self.p[m]) self.farea = (self.p/100.) * self.pixar # km2 self.hflux = 0.001 * self.pow / self.farea # kW/m2 # Print out results # ----------------- y = 100. * ( Tf[m].size ) / N + 0.05 if Verbose: print_stats('__header__','Classic Dozier - Variational Results (Yield: %4.1f%%)'%y) print_stats('Tf (K)',Tf[m]) print_stats('p (%)',p[m]) print_stats('A (km2)',self.farea[m]) print_stats('HF (kW/m2)',self.hflux[m]) print_stats('__footer__') # Plot KDE # -------- ### if Verbose: ### plot_dozier(Tf[m],p[m],L21[m],E21[m],tau21[m],L31[m],E31[m],tau31[m],\ ### 'Variational','var',pow=self.pow[m]) #........................................................................................ classic = classic_var # alias classic to clasic_var dozier = classic_var # alias classic to clasic_var def classic_fp(self,tau21=0.864,tau31=0.864): """ Implements the classic Dozier algorithm. On input, *tau_21* and *tau_31* are the atmospheric transmittances at 4 and 11 microns (MODIS channels 21 and 31). Fixed point version. """ # Compute radiances # ----------------- L21 = B21(self.T21) L31 = B31(self.T31) E21 = B21(self.Tb21) E31 = B31(self.Tb31) if isscalar(tau21): tau21 = tau21 * ones(L21.shape) if isscalar(tau31): tau31 = tau31 * ones(L31.shape) # The nonlinear equation to be solved is: # B21(Tf) = a + b * B31(Tf) # or # T = iB21(a + b * B31(T)) # --------------------------------------- r21 = (L21-E21)/(L31-E31) a21 = E21 - r21 * E31 b21 = r21 * tau31 / tau21 if self.verb > 0: print_stats('__header__','Classic Fixed-point Dozier - Inputs') print_stats('DT21',self.T21-self.Tb21) print_stats('DT31',self.T31-self.Tb31) print_stats('__sep__') print_stats('b21',b21) print_stats('a21',a21) print_stats('__footer__') # Used fixed point algorithm to find solution # ------------------------------------------- Tf = fixed_point(Tfunc21,self.T21,xtol=0.001,args=(a21,b21)) p = 100. * (L21 - E21) / ( tau21 * B21(Tf) - E21 ) # Quality control # --------------- m = isnan(Tf) == False m = m & (p>0) # Add solution as attributes # -------------------------- self.m = m self.Tf = Tf self.p = p # Print out results # ----------------- y = 100. * ( Tf[m].size ) / a21.size + 0.05 print_stats('__header__','Classic Dozier - Fixed-point Results (yield: %4.1f%%)'%y) print_stats('Tf (K)',Tf[m]) print_stats('p (%)',p[m]) print_stats('__footer__') # Plot KDE # -------- plot_dozier(Tf[m],p[m],L21[m],E21[m],tau21[m],L31[m],E31[m],tau31[m],\ 'Fixed-point','fp',pow=self.pow[m]) #........................................................................ def bimodal_u(self,tau21=0.864,tau31=0.864,Verbose=False): """ Implements the bi-modal Dozier algorithm. On input, *tau_21* and *tau_31* are the atmospheric transmittances at 4 and 11 microns (MODIS channels 21 and 31). This is the unconstrained version, meaning that no additional MODIS channels are used. Instead, the most likely value of these fire properties are returned: a_F --- fire area (m2) h_F --- fire heat flux (kW/m2) r_F --- fraction of flamming energy frp_F --- flaming fre radiative power """ self.algo = 'bimodal' # Setup Bayesian parameters # ------------------------- self.bayes_setup(tau21=tau21,tau31=tau31) # Reserve space for output # ------------------------ N = self.lon.size self.a_F = - 99.99 * ones(N) self.h_F = - 99.99 * ones(N) self.r_F = - 99.99 * ones(N) self.frp_F = - 99.99 * ones(N) self.m = zeros(N).astype('boolean') S21, S31, F21, F31 = (self.S21,self.S31,self.F21,self.F31) if Verbose: if N>100: Np = range(0,N,N/100) Np = range(N) elif N>10: Np = range(0,N,N/10) else: Np = range(N) print "" print " Unconstrained Bimodal Dozier" print " ----------------------------" print "" print " % | Lon Lat b | r_F h_F" print " | deg deg | % kW/m2" print "---- | ------ ------ - | -------- --------" # Estimate parameters for each fire # --------------------------------- for n in range(N): L21, L31 = (self.L21[n], self.L31[n]) E21, E31 = (self.E21[n], self.E31[n]) tau21, tau31 = (self.tau21[n], self.tau31[n]) pixar = self.pixar[n] pow = self.pow[n] # Estimate admissible solutions for (Ts,Tf) in range # -------------------------------------------------- ps, pf, kappa = bayes_single(L21, E21, tau21, L31, E31, tau31, S21, S31, F21, F31 ) # Parameters in phase space # ------------------------- r_F = pf * F21 / ( pf * F21 + ps * S21 ) # non-dimensional a_F = (pf/100.) * pixar # km2 h_F = 0.001 * r_F * pow / a_F # kW/m2 pow_F = r_F * pow # MW # Kernel density estimates # ------------------------ i = ((ps+pf)>=0) # quality control if any(i): self.m[n] = True self.a_F[n] = mle_kde(a_F[i]) * 1e6 # m2 self.r_F[n] = mle_kde(r_F[i]) # % self.h_F[n] = mle_kde(h_F[i]) # kW/m2 self.pow_F[n] = mle_kde(frp_F[i]) # MW if Verbose: if n in Np: ip = int(0.5+100.*n/N) print "%3d%% | %7.2f %6.2f | %8.2f %8.2f"%\ (ip,self.lon[n],self.lat[n], \ self.r_F[n],self.h_F[n]) #........................................................................ def bayes_setup(self,tau21=0.864,tau31=0.864, grid_type='T', srange=[350.,650.],frange=[650.,1800.]): """ Implements a "bayesian" version of the Dozier algorithm. On input, tau_21, tau_31 -- atmospheric transmittances at 4 and 11 microns (MODIS channels 21 and 31). srange -- Range of temperatures (K) for SMOLDERING fires frange -- Range of temperatures (K) for FLAMING fires This is a really simple minded, brute force algorithm for now. Approach: once all randiances are specified (including the fire ones), then we can solve for the smoldering/flaming area fractions ps/pf: DS21 * ps + DF21 * pf = L21 - E21 DS31 * ps + DF31 * pf = L31 - E31 and, DS21 = tau21 * B21(Ts) - E21 DF21 = tau21 * B21(Tf) - E21 etc. If *grid_type* is 'T' the 2x2 phase space will have increments constant in temperature, other it will be constant in radiances. """ # Compute radiances # ----------------- self.L21 = B21(self.T21) self.L31 = B31(self.T31) self.E21 = B21(self.Tb21) self.E31 = B31(self.Tb31) self.tau21 = tau21 self.tau31 = tau31 if isscalar(tau21): self.tau21 = tau21 * ones(self.L21.shape) if isscalar(tau31): self.tau31 = tau31 * ones(self.L31.shape) if len(srange)==3: Ns = srange[2] else: Ns = 250 if len(frange)==3: Nf = frange[2] else: Nf = 250 # The grid is uniform Ts, Tf # -------------------------- if grid_type == 'T': ds, df = ( (srange[1]-srange[0])/Ns, (frange[1]-frange[0])/Nf ) self.Ts, self.Tf = mgrid[srange[0]:srange[1]:ds,frange[0]:frange[1]:df] self.S21, self.F21 = (B21(self.Ts),B21(self.Tf)) # The grid is uniform in B21/F21 # ------------------------------ else: s21 = ( B21(srange[0]), B21(srange[1]) ) f21 = ( B21(frange[0]), B21(frange[1]) ) ds, df = ( (s21[1]-s21[0])/Ns, (f21[1]-f21[0])/Nf ) self.S21, self.F21 = mgrid[s21[0]:s21[1]:ds,f21[0]:f21[1]:df] self.Ts, self.Tf = (iB21(self.S21), iB21(self.F21)) self.S31, self.F31 = (B31(self.Ts),B31(self.Tf)) # Evaludate radiances on the phase-space grid: smoldering vs. flaming temps # ------------------------------------------------------------------------- if self.verb > 0: print_stats('__header__','Bayesian Dozier - Inputs') print_stats('L21',self.L21) print_stats('L31',self.L31) print_stats('__sep__') print_stats('E21',self.E21) print_stats('E31',self.E31) print_stats('__sep__') print_stats('Ts',iB21(self.S21)) print_stats('Tf',iB21(self.F21)) print_stats('__sep__') print_stats('S21',self.S21) print_stats('S31',self.S31) print_stats('__sep__') print_stats('F21/100',self.F21/100) print_stats('F31/100',self.F31/100) print_stats('__footer__') return def bayes_one(self,n): """ Runs bayes_single() for a given observation with index "n"; you must call method bayesian() first. This is a convenience, short-hand method for development purposes only. """ return bayes_single(self.L21[n],self.E21[n],self.tau21[n], self.L31[n],self.E31[n],self.tau31[n], self.S21,self.S31,self.F21,self.F31,Verb=True) def design(self,n,i,j,ka,kb=None,vmin=0.99,vmax=1.): """ For an observation with index "n", and 2 additional walengths ka and kb (in microns), computes the likelihood function in "phase space" (meaning, the range of smoldering/flaming temperatures). The input (i,j) are used to create the synthetic observations corresponding to Ts[i,j] and Tf[i,j] at k1 and k2. Requires setup by the bayes_setup() method. """ # Find corresponding ps, pf in phase space (Ts,Tf) # ------------------------------------------------ ps, pf, kappa = self.bayes_one(n) ps, pf = (ps/100.,pf/100.) Ts, Tf = (self.Ts, self.Tf) # Get Background radiances based on 4/11 micron Tb # ------------------------------------------------ Tb_ = (self.Tb21[n]+self.Tb31[n])/2. # "b" for background here Ea_ = planck(-ka,Tb_) # Compute La in all of the phase space # ------------------------------------ La = ps * planck(-ka,Ts) + pf * planck(-ka,Tf) + (1-ps-pf) * Ea_ # The notional ground truth # ------------------------- La_ = La[i,j] # Ok, now use Bayes theorem and compute the likelihood function # ------------------------------------------------------------- siga = 0.1 * La_ va = (La-La_)/siga # Do the same for second channel # ------------------------------ if kb != None: Eb_ = planck(-kb,Tb_) Lb = ps * planck(-kb,Ts) + pf * planck(-kb,Tf) + (1-ps-pf) * Eb_ Lb_ = Lb[i,j] sigb = siga vb = (Lb-Lb_)/sigb # Evaluate likelihood # ------------------- if kb==None: P = exp( -va*va/2.0 ) # / (siga * sqrt(2*pi)) else: P = exp( -(va*va + vb*vb)/2.0 ) # / ( siga * sigb * 2. * pi ) # Plot it # ------- clf() pcolor(Tf,Ts,P,vmin=0.99,vmax=1.) plot([self.Tf[i,j],],[self.Ts[i,j],],'wo') colorbar() xlabel('Flaming Temperature (K)') ylabel('Smoldering Temperature (K)') if kb==None: title('Likelihood given (3.959,11.03,%s) $\mu$m'%ka) else: title('Likelihood given (3.959,11.03,%s,%s) $\mu$m'%(ka,kb)) return P #............................................................................ def bayes_single(L21,E21,tau21,L31,E31,tau31,S21,S31,F21,F31,Verb=False): """ Given a *single* measurement of pixel radiances (L21,L31) background randiances (E21,E31) and atmospheric transmittances (tau21,Tau31) for MODIS channels 21 and 31 (4 and 11 microns) it evaluates the fractional smoldering/flaming areas corresponding to each smoldering/flamming radiance given by (S21,S31)/(F21,F31), respectifully. Input ----- Scalars: L21, E21, tau21, L31, E31, tau31 Arrays: S21, S31, F21, F31 Output ------ ps --- fractional smoldering area pf --- fractional flaming area kappa --- condition number Recall that it is not always possible to find a solution (ps,pf) for all possible input combinations. At those points where a physical solution was not possible, the fractional areas have been set to -1. In addition, the condition number of the 2x2 matrix used to compute (ps,pf) is also returned as a reliability indicator of the *numerical* solution. """ # Matrix elements # --------------- shape = S21.shape DS21 = tau21 * S21.ravel() - E21 DF21 = tau21 * F21.ravel() - E21 DS31 = tau31 * S31.ravel() - E31 DF31 = tau31 * F31.ravel() - E31 # RHS # --- DL21 = L21 - E21 DL31 = L31 - E31 # Determinant and condition number # -------------------------------- det = DS21 * DF31 - DS31 * DF21 chi = DS21 + DF31 sqd = sqrt(chi*chi - 4 * det) kappa = abs((sqd + chi) / ( sqd - chi )) # condition number kappa = where(kappa<1.,1./kappa,kappa) # ensure kappa > 1 (just in case) # Solutions # --------- ps = (DF31 * DL21 - DF21 * DL31) / det pf = (DS21 * DL31 - DS31 * DL21) / det # Quality control # --------------- m = (isnan(ps) | isnan(pf) | isinf(ps) | isinf(pf) | (ps<0.) | (pf<0.) | (ps>1.) | (pf>1.) ) n = (m==False) ps[m] = -1. pf[m] = -1. if Verb: y = 100. * ps[n].size / ps.size pr = 100 * pf / ( ps + pf ) qf = pf * F21.ravel() qs = ps * S21.ravel() qr = 100 * qf / (qf + qs) print_stats('__header__','Bayesian Dozier - Results (Yield: %4.1f%%)'%y) print_stats('ps',100*ps[n]) print_stats('pf',100*pf[n]) print_stats('pt',100*(pf[n]+ps[n])) print_stats('__sep__') print_stats('ps*S21',qs[n]) print_stats('pf*F21',qf[n]) print_stats('qs+qf',qs[n]+qf[n]) print_stats('__sep__') print_stats('pr',pr[n]) print_stats('qr',qr[n]) print_stats('kappa',kappa[n]) print_stats('__footer__') # Reshape # ------- ps = 100 * reshape(ps,shape) pf = 100 * reshape(pf,shape) kappa = reshape(kappa,shape) # Return fractional area and condition number # ------------------------------------------- return (ps,pf,kappa) def plot_dozier(Tf,p,L21,E21,tau21,L31,E31,tau31,algo,prefix,pow=None): subplot(211) plot_kde(Tf,300.,1800.,500,'Fire Kinectic Temperature (K) - %s'%algo) subplot(212) plot_kde(p,0.,5.,500,'Fire Fractional Area (%)') savefig('%s.tf_kde.png'%prefix) subplot(111) p = p / 100 # units of fraction clf() L21_ = tau21*p*B21(Tf)+(1-p)*E21 L31_ = tau31*p*B31(Tf)+(1-p)*E31 plot(L21,L21_,'bo',L31,L31_,'ro') title('L21 (Blue) --- L31 (Red) --- %s'%algo) xlabel('Observed'), ylabel('Fitted') savefig('%s.fits.png'%prefix) if pow != None: clf() plot(pow,p*B21(Tf),'o') title('FRP vs p * B$_{21}$(T$_f$) - %s'%algo) xlabel('Fire Radiative Power') ylabel('p * B$_{21}$(T$_f$)') savefig('%s.pow_pB21.png'%prefix) #........................................................................ def mle_kde(X,N=32): """ Uses a Kernel Density Estimate to return to most likely value of X. """ X = X.ravel() bins = linspace(X.min(),X.max(),N) kernel = kde.gaussian_kde(X) pdf = kernel(bins) j = pdf.argmax() return bins[j] #........................................................................ def Jfunc(T,L21,E21,tau21,L31,E31,tau31): """Forces the 2 areas to be equal. Does not work too well (low yields)""" p21 = (L21 - E21) / ( tau21 * B21(T) - E21 ) p31 = (L31 - E31) / ( tau31 * B31(T) - E31 ) d = p21 - p31 return d*d def Jfunc2d(x,L21,E21,tau21,sig21,L31,E31,tau31,sig31): """A sum of 2 J_o kind of terms; could de-emphasize chanel 31""" Tf = x[0] #p = x[1] / 100000. p = x[1] / P_SCALE # normalize so that control variables have same order of magnitude v21 = (L21 - ( p * tau21 * B21(Tf) + (1-p) * E21)) / sig21 v31 = (L31 - ( p * tau31 * B31(Tf) + (1-p) * E31)) / sig31 return v21 * v21 + v31 * v31 def Tfunc21(T,a,b): return iB21(a + b * B31(T)) def Tfunc31(T,a,b): return iB31(a + b * B21(T)) def fixed_point(func, x0, args=(), xtol=1e-4, maxiter=50): """Find the point where func(x) == x Given a function of one or more variables and a starting point, find a fixed-point of the function: i.e. where func(x)=x. Uses Steffensen's Method using Aitken's Del^2 convergence acceleration. See Burden, Faires, "Numerical Analysis", 5th edition, pg. 80 This is a customized version of a SciPy function. """ x0 = asarray(x0) p0 = x0 for iter in range(maxiter): p1 = func(p0, *args) p2 = func(p1, *args) d = p2 - 2.0 * p1 + p0 p = where(d == 0, p2, p0 - (p1 - p0)*(p1-p0) / d) relerr = where(p0 == 0, p, (p-p0)/p0) if all(relerr < xtol): return p p0 = p # print "Failed to converge after %d iterations, value is %s" % (maxiter,p) return p #............................................................................ def utClassic(): fires = DOZIER('data/182',Verb=1,qc_thresh=50.) fires.classic_fp() fires.classic_var() def utBayesian(): fires = DOZIER('182',Verb=1,qc_thresh=50.) fires.bayes_setup() return fires def utBimodal(): import VegType fires = DOZIER('182',Verb=1,qc_thresh=50.) fires.veg = VegType.getSimpleVeg(fires.lon,fires.lat,Path=igbp_dir) fires.bimodal_u(Verbose=True) return fires def utDesign(tau21=0.864,tau31=0.864): f = DOZIER('data/182',Verb=1,qc_thresh=50.) f.bayes_setup(tau21=1.,tau31=1.); P = f.design(345,120,120,3.75,8.55) return (f,P) def utAttach(): f = DOZIER('data/182',Verb=1,qc_thresh=50.) # f.attach('http://thing4.gsfc.nasa.gov:9090/dods/GEOS-5/ARCTAS/0.5_deg/assim/tavg3d_dyn_v',Vars=('t','qv','o3','delp','ps')) f.attach('http://thing4.gsfc.nasa.gov:9090/dods/GEOS-5/ARCTAS/0.5_deg/assim/tavg3d_dyn_v',Vars=('ps')) if __name__ == "__main__": fires = utBimodal()
{"hexsha": "ab82436aae6e350b4ddc59557f029393090a38fe", "size": 22321, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/Components/qfed/qfed/dozier.py", "max_stars_repo_name": "GEOS-ESM/AeroApps", "max_stars_repo_head_hexsha": "874dad6f34420c014d98eccbe81a061bdc0110cf", "max_stars_repo_licenses": ["NASA-1.3", "ECL-2.0", "Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-12-02T14:23:30.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-31T15:39:30.000Z", "max_issues_repo_path": "src/Components/qfed/qfed/dozier.py", "max_issues_repo_name": "GEOS-ESM/AeroApps", "max_issues_repo_head_hexsha": "874dad6f34420c014d98eccbe81a061bdc0110cf", "max_issues_repo_licenses": ["NASA-1.3", "ECL-2.0", "Apache-2.0"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2020-04-15T16:22:14.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-24T13:59:25.000Z", "max_forks_repo_path": "src/Components/qfed/qfed/dozier.py", "max_forks_repo_name": "GEOS-ESM/AeroApps", "max_forks_repo_head_hexsha": "874dad6f34420c014d98eccbe81a061bdc0110cf", "max_forks_repo_licenses": ["NASA-1.3", "ECL-2.0", "Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.1822358346, "max_line_length": 128, "alphanum_fraction": 0.4864925407, "include": true, "reason": "from numpy,from scipy", "num_tokens": 6510}
import dolfin as df import itertools import numpy as np def as_tuple(maybe): '''Tuple of numbers''' if isinstance(maybe, (int, float)): return (maybe, ) return tuple(maybe) def subdomain_bbox(subdomains, label=None): ''' Draw a bounding box around subdomain defined by entities in `subdomains` tagged with label. Return a d-tuple of intervals such that their cartesion product forms the bounding box. ''' if hasattr(label, '__iter__'): return [(min(I[0] for I in intervals), max(I[1] for I in intervals)) for intervals in zip(*(subdomain_bbox(subdomains, l) for l in label))] mesh = subdomains.mesh() if label is None: coords = mesh.coordinates() else: arr = subdomains.array() mesh.init(mesh.topology().dim(), 0) c2v = mesh.topology()(mesh.topology().dim(), 0) vertices = set(np.concatenate(list(map(c2v, np.where(arr == label)[0])))) coords = mesh.coordinates()[list(vertices)] return list(zip(coords.min(axis=0), coords.max(axis=0))) def closest_entity(x, subdomains, label=None): ''' Return entity with smallest distance to x out of entities marked by label in subdomains. The distance is determined by midpoint is it's only approximate. ''' x = df.Point(*x) # Grab all tags if label is None: label = set(subdomains.array()) label = as_tuple(label) sub_iter = itertools.chain(*[df.SubsetIterator(subdomains, l) for l in label]) pairs = (((x-e.midpoint()).norm(), e.index()) for e in sub_iter) dist, index = min(pairs, key=lambda p: p[0]) print('Found y, |x-y|=', dist) return df.MeshEntity(subdomains.mesh(), subdomains.dim(), index) def point_source(e, A, h=1E-10): ''' Create a point source (h cutoff) with amplitude A at the entity center ''' gdim = e.mesh().geometry().dim() x = e.midpoint().array()[:gdim] degree = A.ufl_element().degree() norm_code = '+'.join(['pow(x[%d]-x%d, 2)' % (i, i) for i in range(gdim)]) norm_code = 'sqrt(%s)' % norm_code params = {'h': h, 'A': A} params.update({('x%d' % i): x[i] for i in range(gdim)}) return df.Expression('%s < h ? A: 0' % norm_code, degree=1, **params) def snap_to_nearest(f): '''An expression which evaluates f[function] at dof closest to f''' class ProxyExpression(df.Expression): def __init__(self, f, **kwargs): self.f = f V = f.function_space() self.y = V.tabulate_dof_coordinates().reshape((V.dim(), -1)) self.snaps = {} def eval(self, value, x): x = self.snap_to_nearest(x) value[:] = self.f(x) # Keep track of where the evaluation happend self.eval_point = x def value_shape(self): return f.ufl_element().value_shape() def snap_to_nearest(self, x): x = tuple(x) out = self.snaps.get(x, None) # Memoize if out is None: out = self.y[np.argmin(np.sqrt(np.sum((self.y - x)**2, axis=1)))] self.snaps[x] = out return out return ProxyExpression(f, degree=f.function_space().ufl_element().degree()) class SiteCurrent(df.Expression): '''normal*I where I can vary in time and normal is fixed''' def __init__(self, I, n, **kwargs): self.n = n self.I = I self._time = 0 self.t = 0 def value_shape(self): return (3, ) def eval(self, values, x): values[:] = self.n*self.I(x) @property def t(self): return self._time @t.setter def t(self, t): self._time = t hasattr(self.I, 't') and setattr(self.I, 't', self._time) def surface_normal(tag, facet_f, point): '''Normal of taged surface which points away from the point''' # NOTE: as this is a constant it will only work for a flat surface mesh = facet_f.mesh() tdim = facet_f.dim() assert tdim == mesh.topology().dim()-1 assert mesh.geometry().dim() == 3 point = df.Point(*point) facets, = np.where(facet_f.array() == tag) facets = iter(facets) first = df.Facet(mesh, next(facets)) n = first.normal() # Is this a flat surface assert all(abs(abs(df.Facet(mesh, f).normal().dot(n))-1) < 1E-10 for f in facets) mid = first.midpoint() return n.array() if n.dot(mid-point) > 0 else -1*n.array()
{"hexsha": "8d64d39eadcc393f1874bebf563379c0422f161b", "size": 4527, "ext": "py", "lang": "Python", "max_stars_repo_path": "neuronmi/simulators/solver/aux.py", "max_stars_repo_name": "MiroK/nEuronMI", "max_stars_repo_head_hexsha": "227b26598fa2cde5aabec68db898f308fb44aa31", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-06-16T07:35:53.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-16T07:35:53.000Z", "max_issues_repo_path": "neuronmi/simulators/solver/aux.py", "max_issues_repo_name": "MiroK/nEuronMI", "max_issues_repo_head_hexsha": "227b26598fa2cde5aabec68db898f308fb44aa31", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2019-11-08T16:59:53.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-05T10:44:49.000Z", "max_forks_repo_path": "neuronmi/simulators/solver/aux.py", "max_forks_repo_name": "MiroK/nEuronMI", "max_forks_repo_head_hexsha": "227b26598fa2cde5aabec68db898f308fb44aa31", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-03T05:15:46.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-03T05:15:46.000Z", "avg_line_length": 29.5882352941, "max_line_length": 86, "alphanum_fraction": 0.5840512481, "include": true, "reason": "import numpy", "num_tokens": 1188}
# -*- coding: utf-8 -*- """ Data conversion between space/time vector and space/time grid formats Created on Sat Jun 27 11:40:16 2015 @author: hdragon689 """ from six.moves import range import numpy as np import pandas as pd def valstv2stg(ch, z, cMS=None, tME=None): ''' Converts the values of a space/time variable from a s/t vector format (i.e. the variable z is listed as a vector of n values) to a grid format (i.e. the variable Z is given as a nMS by nME matrix corresponding to nMS Monitoring Sites and nME Measuring Events). Use help stgridsyntax for information on the s/t grid format. SYNTAX : [Z,cMS,tME,nanratio]=valstv2stg(ch,z,cMS,tME); INPUT : ch n by d+1 matrix of space/time coordinates for spatial domain of dimension d z n by 1 vector of field value at coordinate ch cMS nMS by 2 optional matrix of spatial coordinates for the nMS Measuring Sites tME 1 by nME optional vector of times of the tME Measuring Events OUTPUT : Z nMS by nME matrix of values for the variable Z corresponding to nMS Monitoring Sites and nME Measuring Event cMS nMS by d matrix of spatial coordinates for the nMS Measuring Sites tME 1 by nME vector of times of the tME Measuring Events nanratio scalar ratio of the NaNs in Z (0<=nanratio<=1) NOTE : cMS and tME can be provided as input if they are both known. In that case ch must be a nMS*nME by 3 matrix of the points corresponding to nMS Monitoring Sites and nME Measuring Events, listed with space cycling quicker then time. ''' if 'pandas' in str(type(ch)): ch=ch.values if 'pandas' in str(type(z)): z=z.values cols=['x','y','t','z'] data=np.hstack((ch,z.reshape(z.size,1))) datadf=pd.DataFrame(data,columns=cols) datadf['x']=datadf['x'].astype(np.float) datadf['y']=datadf['y'].astype(np.float) datadf['z']=datadf['z'].astype(np.double) dtable=pd.pivot_table(datadf, values=datadf.columns[3], index=['y', 'x'], columns=['t']) # cMS cMS_=zip(*np.array(dtable.index)) cMS_=np.array([np.asarray(cMS_[0]),np.asarray(cMS_[1])]) cMS_=cMS_.T.astype(np.float) cMS_=cMS_[:,[1,0]] #tME try: tME_=np.array(dtable.columns).astype(ch[0,2].dtype) except AttributeError: tME_=np.array(dtable.columns).astype(type(ch[0,2])) # Z Z_=dtable.values.astype(np.double) nt=Z_.shape[1] if cMS is not None: Z=[] for i in range(cMS.shape[0]): ii=np.where(np.logical_and(cMS_[:,0]==cMS[i,0],cMS_[:,1]==cMS[i,1])) Z.append(Z_[ii,:].reshape(nt)) Z=np.asarray(Z,dtype=np.double) else: cMS=cMS_ Z=Z_ if tME is not None: tidx=[] for j,tMEi in enumerate(tME): tidx.append(np.where(tME==tMEi)) tidx=np.asarray(tidx).reshape(tME.size) Z=Z[:,tidx] else: tME=tME_ # nonlocations nanloc=zip(np.where(np.isnan(Z))[0],np.where(np.isnan(Z))[1]) return Z, cMS, tME, nanloc def valstg2stv(Z, cMS, tME): ''' Converts the coordinates and values of a space/time variable from a grid format (i.e. the variable Z is given as a nMS by nME matrix corresponding to nMS Measuring Sites and nME Measuring Events), to a s/t vector format (i.e. the variable z is listed as a vector of nMS*nME values, corresponding to points with space/time coordinates, where the spatial coordinate cycle quicker than the time coordinates). SYNTAX : [ch,z]=valstg2stv(Z,cMS,tME); INPUT : Z nMS by nME matrix of values for the variable Z corresponding to nMS Monitoring Sites and nME Measuring Event cMS nMS by 2 matrix of 2D spatial coordinates for the nMS Measuring Sites tME 1 by nME vector of times of the tME Measuring Events OUTPUT : ch nMS*nME by 3 matrix of space time coordinates, listing the space/time locations of the points corresponding to nMS Monitoring Sites and nME Measuring Event (space cycles quicker then time) z nMS*nME by 1 vector of values for the variable Z corresponding to the s/t points ch ''' nc=cMS.shape[0] nt=tME.size zh=(Z.T).reshape(nc*nt,1) ch=np.asarray(zip(np.tile(cMS[:,0],nt),np.tile(cMS[:,1],nt), \ tME.repeat(nc))) return ch, zh if __name__ == "__main__": import time data='../examples/Data/GeoData.xls' datadf=pd.ExcelFile(data).parse('Sheet1',header=None) ch=datadf.iloc[:,0:3].values z=datadf.iloc[:,4].values.reshape(ch.shape[0],1) stime=time.time() Z1,cMS,tME,nanloc=valstv2stg(ch,z) print(time.time()-stime) ch2,z2=valstg2stv(Z1,cMS,tME) Z2,cMS2,tME2,nanloc=valstv2stg(ch2,z2,cMS[3:,:],tME[3:]) Z3,cMS3,tME3,nanloc=valstv2stg(ch2,z2) id=np.where((Z1!=Z2)) print(id)
{"hexsha": "9e2887bb71d5bfd163c17351b422f223f8364bb9", "size": 4982, "ext": "py", "lang": "Python", "max_stars_repo_path": "stamps/general/valstvgx.py", "max_stars_repo_name": "stemlab689/stamps", "max_stars_repo_head_hexsha": "5494d4e86ad005082c677d9a07f71e1606338ba0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "stamps/general/valstvgx.py", "max_issues_repo_name": "stemlab689/stamps", "max_issues_repo_head_hexsha": "5494d4e86ad005082c677d9a07f71e1606338ba0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "stamps/general/valstvgx.py", "max_forks_repo_name": "stemlab689/stamps", "max_forks_repo_head_hexsha": "5494d4e86ad005082c677d9a07f71e1606338ba0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.5316455696, "max_line_length": 92, "alphanum_fraction": 0.6382978723, "include": true, "reason": "import numpy", "num_tokens": 1495}
#!/usr/bin/env python3 import random import time import json # import pprint import sys import numpy as np from datetime import datetime from signal import signal, SIGPIPE, SIG_DFL if __name__ == '__main__': if len(sys.argv) < 6: sys.stderr.write("arguments:\n") sys.stderr.write("\t$1 filename\n") sys.stderr.write("\t$2 max loop\n") sys.stderr.write("\t$3 delay\n") sys.stderr.write("\t$4 node speed\n") sys.stderr.write("\t$5 update random or linear[r, l]\n") # print("\t$6 single or multi line [s, m]") sys.exit(1) signal(SIGPIPE, SIG_DFL) # filename f = open(sys.argv[1], 'r') # max loop if 0 < int(sys.argv[2]): loop_max = int(sys.argv[2]) else: loop_max = 0 # delay delay = 0 if 0 < float(sys.argv[3]): delay = float(sys.argv[3]) # node speed speed = int(sys.argv[4]) # update random or linear is_random = False if sys.argv[5] == "r": is_random = True else: is_random = False # single or multi line # is_single = False # if sys.argv[6] == "s": # is_single = True # else: # is_single = False # if is_single: line = f.readline() j = json.loads(line) # sys.stdout.write(json.dumps(j, separators=(',' ':'))) # sys.stdout.write("\n") # sys.stdout.flush() node = j["init"]["node"] # else: # node = json.loads("[]") # line = f.readline() # while line: # j = json.loads(line) # init = j['init'] # if 'node' in init: # n = init['node'] # node.append(n) # # sys.stdout.write(json.dumps(j, separators=(',' ':'))) # # sys.stdout.write("\n") # # sys.stdout.flush() # line = f.readline() node = np.array(node) # time.sleep(0.2) # sys.exit() # print(json.dumps(node)) # print("node length: %d" % len(node)) # print("Enter Start") now = datetime.now() now_ts = now.timestamp() random.seed(0) np.random.seed(0) mt = 0 loop_count = 0 idx = 0 json_list = [] while True: if 0 < loop_max: if loop_max <= loop_count: # sys.stdout.write('{"finish":"finish"}') # sys.stdout.write("\n") # sys.stdout.flush() # time.sleep(2) json_list.append('{"finish":"finish"}\n') break t0 = time.perf_counter() if is_random: idx = random.randint(0, len(node) - 1) else: idx = idx % len(node) x = node[idx]['x'] y = node[idx]['y'] r = node[idx]['radius'] x = round(random.uniform(x - speed, x + speed), 1) y = round(random.uniform(y - speed, y + speed), 1) # x = x + 1 json_update = {} json_update['id'] = idx json_update['x'] = x json_update['y'] = y json_update['r'] = r node[idx]['x'] = x node[idx]['y'] = y # update = {'update': []} # update['update'].append(json_update) update = {'update': {}} update['update']['node'] = json_update # idx = 1 # x = json_init['init']['node'][idx]['x'] # y = json_init['init']['node'][idx]['y'] # speed = 10 # json_update = {} # json_update['id'] = idx # json_update['x'] = round(random.uniform(x - speed, y + speed), 1) # json_update['y'] = round(random.uniform(y - speed, y + speed), 1) # update['update'].append(json_update) # print(json.dumps(update)) # sys.stdout.write(json.dumps(update, separators=(',' ':'))) # sys.stdout.write("\n") # sys.stdout.flush() json_list.append(json.dumps(update, separators=(',' ':'))) loop_count += 1 # sys.exit() t1 = time.perf_counter() mt = mt + (t1 - t0) # sys.stderr.write("%04d elapsed: %f\n" % (idx, (t1 - t0))) idx = idx + 1 time.sleep(delay) # sys.exit() # break mt = 0 str_array = np.array(json_list) sys.stderr.write("-- mobility stand-by\n") input() for j in json_list: t0 = time.perf_counter() sys.stdout.write(j) sys.stdout.write("\n") sys.stdout.flush() t1 = time.perf_counter() mt = mt + (t1 - t0) # sys.stderr.write("print elapsed: %f\n" % (t1 - t0)) sys.stderr.write("-- mobility elapsed: %f (%f)\n" % (mt, mt / len(json_list)))
{"hexsha": "9552a8d8e4ea7b572e9f699ba2c830927e46267e", "size": 4715, "ext": "py", "lang": "Python", "max_stars_repo_path": "bin/mobility.py", "max_stars_repo_name": "sarub0b0/hashmot-deltahq", "max_stars_repo_head_hexsha": "8ef0b5d138ae8922e1ba1b649cee4ef05dd04849", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-02-07T07:46:26.000Z", "max_stars_repo_stars_event_max_datetime": "2019-02-07T07:46:26.000Z", "max_issues_repo_path": "bin/mobility.py", "max_issues_repo_name": "sarub0b0/hashmot-deltahq", "max_issues_repo_head_hexsha": "8ef0b5d138ae8922e1ba1b649cee4ef05dd04849", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bin/mobility.py", "max_forks_repo_name": "sarub0b0/hashmot-deltahq", "max_forks_repo_head_hexsha": "8ef0b5d138ae8922e1ba1b649cee4ef05dd04849", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.6682692308, "max_line_length": 82, "alphanum_fraction": 0.4865323436, "include": true, "reason": "import numpy", "num_tokens": 1314}
module FluorescentSeries using AxisArrays, ImageAxes include("core.jl") include("algorithms.jl") export FluorescentSerie, deltaFF end # module
{"hexsha": "064cdf729136f53a5bea06193ba108b8996eec1b", "size": 146, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/FluorescentSeries.jl", "max_stars_repo_name": "UnofficialJuliaMirror/FluorescentSeries.jl-68ab7a74-b89c-58c4-90e0-a4e17bbb69bc", "max_stars_repo_head_hexsha": "0161604726d4d653e529f3e74172be751bd8c7f8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-03-28T17:29:56.000Z", "max_stars_repo_stars_event_max_datetime": "2018-03-28T17:29:56.000Z", "max_issues_repo_path": "src/FluorescentSeries.jl", "max_issues_repo_name": "UnofficialJuliaMirror/FluorescentSeries.jl-68ab7a74-b89c-58c4-90e0-a4e17bbb69bc", "max_issues_repo_head_hexsha": "0161604726d4d653e529f3e74172be751bd8c7f8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/FluorescentSeries.jl", "max_forks_repo_name": "UnofficialJuliaMirror/FluorescentSeries.jl-68ab7a74-b89c-58c4-90e0-a4e17bbb69bc", "max_forks_repo_head_hexsha": "0161604726d4d653e529f3e74172be751bd8c7f8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-02-26T18:21:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-03T12:45:26.000Z", "avg_line_length": 12.1666666667, "max_line_length": 27, "alphanum_fraction": 0.801369863, "num_tokens": 42}
\documentclass{article} \usepackage{graphicx} \usepackage{epsfig} \usepackage{amssymb,amsmath} \usepackage{array} \graphicspath{ {./assignment_2/} } \singlespace \setlength{\parindent}{0pt} \title{CTA200 2020 Assignment 2 Summary} \author{SURP Student Ethan Sun} \date{May 10th, 2020} \begin{document} \maketitle \section*{Question 1} \subsection*{Method} To represent the points on the imaginary plane, I used two nested "for loops", each with a numpy range "numpy.arange(-2.0, 2.0, 0.05)". I then used another for loop to calculate the z value for 25 iterations. At the end of the loops, I applied the "numpy.isfinite()" functin to evaluate the real and imaginary part of z. \\ If both functions return "True", I would then assign the corresponding c value to a list where all the c values that result in a converging z value are stored. If either of the evaluation returns "False", I would assign the c values to a list for diverging results. In addition, I also kept track of how many iterations went through before z diverged using "a = a + 1" as a counter. These counters are assigned to a list as well. \\ Finally, I plotted the convergent and divergent points using "plt.scatter()", and assigned the divergent iteration counter list to the "color" section in order to plot a color scale. \newpage \subsection*{Results} \includegraphics{q1_plot.png} The convergent points are shown in red in this graph, and the rest of the divergent points are colored in gradient from blue to red. The darker blue points indicate that they diverged very early (around 11 to 12 iterations), while the brighter ones diverged after going through more iterations. It could be observed that most of the converging points have small imaginary and real components (less than 1), and as the magnitude of these two parts get larger, the z value tend to diverge after fewer iterations. \newpage \section*{Question 2} \subsection*{Method} In order to prvide the SIRD curves for 4 different cases, I first put all the parameters ($\beta$, $\gamma$, $\mu$) representing infection rate, recovery rate, and mortality rate into three indiviual arrays. I then defined the "SIR model()" function which takes in the above parameters, time vector, as well as a variable "y" representing S, I, R, and D values. This function returns the derivative of each variable with respect to time. \\ To calculate the result, I created a tuple which takes the elements from the parameter arrays for each case. There are 4 cases in total, starting from a relatively moderate scenario where the infection an mortality rates are low and the recovery rate is relatively high. I then tried different combinations of parameters to simulate worse scenarios. Finally, I applied the integrate.oeint() function to calculate the SIRD values for each respective case before organising them into an array and plotting the results against time. \subsection*{Results} \includegraphics[scale = 0.5]{q2_plot_1.png} \\ In this graph, $\beta$ = 0.2, $\gamma$ = 0.15, and $\mu$ = 0.01. Most of the infected patients recovered and the overall mortality is low. After 200 days, the spread of the disease slowed down significantly. \newpage \includegraphics[scale = 0.5]{q2_plot_2.png} \\ In this graph, $\beta$ = 0.4, $\gamma$ = 0.1, and $\mu$ = 0.05. The disease spread much faster and the number of infecte patients peaked around Day 30 . The death toll is much higher as well, but the panemic stopped after around 50 days. \includegraphics[scale = 0.5]{q2_plot_3.png} \\ In this graph, $\beta$ = 0.6, $\gamma$ = 0.06, and $\mu$ = 0.1. Similarly, the disease spread much faster and the number of infecte patients peaked around Day 20 . The death toll is, again, much higher,and the panemic stopped after around 40 days. \includegraphics[scale = 0.5]{q2_plot_4.png} \\ In this graph, $\beta$ = 0.8, $\gamma$ = 0.03, and $\mu$ = 0.2. This is the worst case scenario where the infection and mortality rates are unreasonably high, while the recovery rate is very low. The disease spread rapidly and killed almost 80\% of the population in 30 days and consequently ended itself quickly. \\ Although the first set of parameters I tried reflected the realistic behaviour of a panemic to a certain extent, the rest of the cases are hardly reasonable or useful. For example, to refect real life cases, it would be a better idea to use a smaller $\beta$ value if the mortality rate, $\mu$ is as high as 0.2, because strong quarentine measures would usually be taken swiftly by governments an healthcare services when dealing with such a dangerous disease, and the disease would be forced to become less contagious due to human interference. \end{document}
{"hexsha": "3bcddfeff114cfab3ba47740b121186e2777b4d0", "size": 4708, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "assignment_2/Question_3.tex", "max_stars_repo_name": "ethan-sun-010/CTA200", "max_stars_repo_head_hexsha": "c7f4948501977ce3a7580e0809adbe3011cc086b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "assignment_2/Question_3.tex", "max_issues_repo_name": "ethan-sun-010/CTA200", "max_issues_repo_head_hexsha": "c7f4948501977ce3a7580e0809adbe3011cc086b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "assignment_2/Question_3.tex", "max_forks_repo_name": "ethan-sun-010/CTA200", "max_forks_repo_head_hexsha": "c7f4948501977ce3a7580e0809adbe3011cc086b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 77.1803278689, "max_line_length": 546, "alphanum_fraction": 0.7672047579, "num_tokens": 1173}
function [model, L] = ppcaVb(X, q, prior) % Perform variatioanl Bayeisan inference for probabilistic PCA model. % Input: % X: d x n data matrix % q: dimension of target space % Output: % model: trained model structure % L: variantional lower bound % Reference: % Pattern Recognition and Machine Learning by Christopher M. Bishop % Written by Mo Chen (sth4nth@gmail.com). [m,n] = size(X); if nargin < 3 a0 = 1e-4; b0 = 1e-4; c0 = 1e-4; d0 = 1e-4; else a0 = prior.a; b0 = prior.b; c0 = prior.c; d0 = prior.d; end if nargin < 2 q = m-1; end tol = 1e-6; maxIter = 500; L = -inf(1,maxIter); mu = mean(X,2); Xo = bsxfun(@minus, X, mu); s = dot(Xo(:),Xo(:)); I = eye(q); % init parameters a = a0+m/2; c = c0+m*n/2; Ealpha = 1e-4; Ebeta = 1e-4; EW = rand(q,m); EWo = bsxfun(@minus,EW,mean(EW,2)); EWW = EWo*EWo'/m+EW*EW'; for iter = 2:maxIter % q(z) LZ = I+Ebeta*EWW; V = inv(chol(LZ)); % inv(LZ) = V*V'; EZ = LZ\EW*Xo*Ebeta; EZZ = n*(V*V')+EZ*EZ'; KLZ = n*sum(log(diag(V))); % KLZ = 0.5*n*log(det(inv(LZ))); % q(w) LW = diag(Ealpha)+Ebeta*EZZ; V = inv(chol(LW)); % inv(LW) = V*V'; EW = LW\EZ*Xo'*Ebeta; EWW = m*(V*V')+EW*EW'; KLW = m*sum(log(diag(V))); % KLW = 0.5*n*log(det(inv(LW))); % q(alpha) b = b0+diag(EWW)/2; Ealpha = a./b; KLalpha = -sum(a*log(b)); % q(beta) WZ = EW'*EZ; d = d0+(s-2*dot(Xo(:),WZ(:))+dot(EWW(:),EZZ(:)))/2; Ebeta = c/d; KLbeta = -c*log(d); % q(mu) % Emu = Ebeta/(lambda+n*Ebeta)*sum(X-WZ,2); % lower bound L(iter) = KLalpha+KLbeta+KLW+KLZ; if L(iter)-L(iter-1) < tol*abs(L(iter-1)); break; end end L = L(2:iter); model.Z = EZ; model.W = EW; model.apha = Ealpha; model.beta = Ebeta; model.a = a; model.b = b; model.c = c; model.d = d; model.mu = mu;
{"author": "PRML", "repo": "PRMLT", "sha": "baac49f643db6b39e75307d3b21307b32b29a7a9", "save_path": "github-repos/MATLAB/PRML-PRMLT", "path": "github-repos/MATLAB/PRML-PRMLT/PRMLT-baac49f643db6b39e75307d3b21307b32b29a7a9/chapter12/ppcaVb.m"}
import torch from torch import nn from torch.nn import functional as F import numpy as np torch.set_default_tensor_type("torch.cuda.FloatTensor") class Generator(nn.Module): def __init__(self): super().__init__() self.conv_1 = nn.ConvTranspose2d(100, 512, kernel_size=4, stride=1) self.batc_1 = nn.BatchNorm2d(512) self.conv_2 = nn.ConvTranspose2d(512, 256, kernel_size=4, stride=2, padding=1) self.batc_2 = nn.BatchNorm2d(256) self.conv_3 = nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1) self.batc_3 = nn.BatchNorm2d(128) self.conv_4 = nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1) self.batc_4 = nn.BatchNorm2d(64) self.conv_5 = nn.ConvTranspose2d(64, 3, kernel_size=4, stride=2, padding=1) def forward(self, x): x = F.relu(self.batc_1(self.conv_1(x))) x = F.relu(self.batc_2(self.conv_2(x))) x = F.relu(self.batc_3(self.conv_3(x))) x = F.relu(self.batc_4(self.conv_4(x))) x = torch.tanh(self.conv_5(x)) x = torch.reshape(x, (-1, 3, 64, 64)) return x.to(device='cpu') class Discriminator(nn.Module): def __init__(self): super().__init__() self.conv_1 = nn.Conv2d(3, 64, kernel_size=4, stride=2, padding=1) self.conv_2 = nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1) self.batc_2 = nn.BatchNorm2d(128) self.conv_3 = nn.Conv2d(128, 256, kernel_size=4, stride=2, padding=1) self.batc_3 = nn.BatchNorm2d(256) self.conv_4 = nn.Conv2d(256, 512, kernel_size=4, stride=2, padding=1) self.batc_4 = nn.BatchNorm2d(512) self.conv_5 = nn.Conv2d(512, 1, kernel_size=4, stride=1) def forward(self, x): x = F.leaky_relu(self.conv_1(x), negative_slope=0.2) x = F.leaky_relu(self.batc_2(self.conv_2(x)), negative_slope=0.2) x = F.leaky_relu(self.batc_3(self.conv_3(x)), negative_slope=0.2) x = F.leaky_relu(self.batc_4(self.conv_4(x)), negative_slope=0.2) x = torch.sigmoid(self.conv_5(x)) x = torch.reshape(x, (-1, 1)) return x.to(device='cpu')
{"hexsha": "b2f7ddbcf49d32beb9ea18e125cad3ae981ece9a", "size": 1969, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/dcgan.py", "max_stars_repo_name": "hexhowells/Neural-Network-Implementations", "max_stars_repo_head_hexsha": "3b163e721c62e53aa54a4e1d6ce971bd68b1461b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-02T03:25:02.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-02T03:25:02.000Z", "max_issues_repo_path": "models/dcgan.py", "max_issues_repo_name": "hexhowells/Neural-Network-Implementations", "max_issues_repo_head_hexsha": "3b163e721c62e53aa54a4e1d6ce971bd68b1461b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "models/dcgan.py", "max_forks_repo_name": "hexhowells/Neural-Network-Implementations", "max_forks_repo_head_hexsha": "3b163e721c62e53aa54a4e1d6ce971bd68b1461b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.8333333333, "max_line_length": 80, "alphanum_fraction": 0.6988318944, "include": true, "reason": "import numpy", "num_tokens": 694}
% !TEX root = Main.tex \section{Non-Negative Matrix Factorization} $\mathbf{X} \in \mathbb{Z}^{N \times M}_{\geq 0}$, NMF: $\mathbf{X} \approx \mathbf{U^\top V}, x_{ij}=\sum_z{u_{zi}v_{zj}}=\langle\mathbf{u}_i \mathbf{v}_j\rangle$ Decompose object into features: topics, face parts, etc.. $\mathbf{u}$ weights on parts, $\mathbf{v}$ parts (bases). More interpretable (PCA: holistic repre.). \subsection*{EM for MLE for pLSA (NO global opt guarantee)} \textbf{Context Model:} $p(w | d) = \sum_{z=1}^K p(w | z) p(z | d)$\\ \textbf{Conditional independence assumption ($*$):}\\ $p(w|d) = \sum_z p(w,z|d) = \sum_z p(w|d,z)p(z|d) \stackrel{*}{=} \sum_z p(w|z)p(z|d)$\\ \textbf{Symmetric parameterization:}\\ $p(w, d) = \sum_z p(z)p(w | z) p(d | z)$ \\ Log-Likelihood: $L(\mathbf{U}, \mathbf{V}) = \sum_{i,j} x_{i,j}\log p(w_j|d_i) \\ = \sum_{(i,j) \in X} \log \sum_{z=1}^K p(w_j|z)p(z|d_i)$ \\ $ p(w_j|z) = v_{zj}$, $p(z|d_i) = u_{zi}$, $\sum_j^N v_{zj} = \sum_z^K u_{zi} = 1$\\ E-Step (optimal q: posterior of z over $(d_i, w_j)$):\\ $q_{zij} = \frac{p(w_j|z)p(z|d_i)}{\sum_{k=1}^K p(w_j|k)p(k|d_i)} := \frac{v_{zj}u_{zi}}{\sum_{k=1}^K v_{kj}u_{ki}}$, $\sum_z q_{zij}=1$\\ M-Steps:\\ $p(z|d_i) = \frac{\sum_j x_{ij}q_{zij}}{\sum_j x_{ij}}, p(w_j|z) = \frac{\sum_i x_{ij}q_{zij}}{\sum_{i,l}x_{il}q_{zil}}$\\ \subsection*{Latent Dirichlet Allocation} To sample a new document, we need to extend $X$ and $U^T$ with a new row, s.t. $X=U^T V$. (While pLSA fixes both dimensions)\\ For each $d_i$ sample topic weights $\mathbf{u}_i$\textasciitilde Dirichlet($\alpha$): $p(u_i|\alpha) = \prod_{z=1}^K u_{zi}^{\alpha_k-1}$, then topic $z^t$\textasciitilde Multi($u_i$), word $w^t$\textasciitilde Multi($v_{z^t}$)\\ Multinom. obsv. model on wc vec: $p(\mathbf{x}|V,u) = \frac{l!}{\prod_j \mathbf{x}_j!}\prod_j \pi_j^{\mathbf{x}_j}$ where $\pi_j=\sum_z v_{zj} u_z$, $l=\sum_j x_j$ \\ Bayesian averaging over $\mathbf{u}$: $p(\mathbf{x}|\mathbf{V},\alpha)=\int p(\mathbf{x}|\mathbf{V},\mathbf{u})p(\mathbf{u}|\alpha)d\mathbf{u}$ \subsection*{NMF Algorithm for quadratic cost function} $\min_{\mathbf{U}, \mathbf{V}} J(\mathbf{U}, \mathbf{V}) = \frac{1}{2} \|\mathbf{X} - \mathbf{U}^\top\mathbf{V}\|_F^2$ (non-negativity) s.t. $\forall i,j,z:u_{zi},v_{zj} \geq 0 $ \\ Comparison with pLSA:\\ 1. sampling model: Gaussian vs multinomial 2. objective: quadratic vs KL divergence 3. constraints: not normalized \\ Alternating least squares:\\ 1. init: $\mathbf{U}, \mathbf{V} = rand()$\\ 2. repeat 3\textasciitilde4 for $\mathit{maxIters}$:\\ 3. upd. $(\mathbf{VV}^\top)\mathbf{U} = \mathbf{VX}^\top$, proj. $u_{zi} = \max \{ 0, u_{zi} \}$\\ 4. update $(\mathbf{UU}^\top)\mathbf{V} = \mathbf{UX}$, proj. $v_{zj} = \max \{ 0, v_{zj} \}$
{"hexsha": "083833fa0e797ee3270f8390a9dc09da5cb627ef", "size": 2722, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "NMF.tex", "max_stars_repo_name": "vscherer/eth-cil-exam-cheatsheet", "max_stars_repo_head_hexsha": "9ae156bcf5e2797e65b5495ff520649b43860cdd", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-09-24T20:33:35.000Z", "max_stars_repo_stars_event_max_datetime": "2019-02-21T15:52:46.000Z", "max_issues_repo_path": "NMF.tex", "max_issues_repo_name": "vscherer/eth-cil-exam-cheatsheet", "max_issues_repo_head_hexsha": "9ae156bcf5e2797e65b5495ff520649b43860cdd", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "NMF.tex", "max_forks_repo_name": "vscherer/eth-cil-exam-cheatsheet", "max_forks_repo_head_hexsha": "9ae156bcf5e2797e65b5495ff520649b43860cdd", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-01-14T16:21:22.000Z", "max_forks_repo_forks_event_max_datetime": "2019-08-18T17:17:10.000Z", "avg_line_length": 69.7948717949, "max_line_length": 230, "alphanum_fraction": 0.6223365173, "num_tokens": 1189}
Products and Services Special event shoots (Formals, Philanthropies, Bid Days, etc.) Indoor and outdoor photo shoots Headshots and Portraits Greek Composites Senior Portraits Bands Background Devon Latzen has been photographing people and events for almost ten years. He began shooting for the school newspaper in 2000. In 2004, Devon Latzen began photographing portraits and interior design. Since then, he has photographed many people in studio settings as well as in indoor and outdoor events. In 2005, Devon Latzen studied briefly at the Brooks Institute of Photography in Santa Barbara, before returning to American River College and transferring to UC Davis as a Design student. In 2007, Devon Latzen taught photography as a Teaching Assistant for UC Davis Beginning Photography course. Some of his other notable experiences include photographing headshots for a local Radio Disney personality, weddings, and fraternity and sorority events. Devon Latzen currently shoots events for Chi Omega and Alpha Gamma Omega. Devon Latzen also offers Greek composite photography. Davis has a wide variety of resources for Photography
{"hexsha": "27a3ae4da2baab20db9151c1af14766022bfb5a0", "size": 1155, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Devon_Latzen_Photography.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Devon_Latzen_Photography.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Devon_Latzen_Photography.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 72.1875, "max_line_length": 890, "alphanum_fraction": 0.8121212121, "num_tokens": 237}
from corvus.structures import Handler, Exchange, Loop, Update import corvutils.pyparsing as pp import os, sys, subprocess, shutil #, resource import re # Debug: FDV import pprint import numpy as np pp_debug = pprint.PrettyPrinter(indent=4) # Define dictionary of implemented calculations implemented = {} strlistkey = lambda L:','.join(sorted(L)) implemented['OceanXANES'] = {'type':'Exchange','out':['OceanXANES'],'cost':3, 'req':['ocean.edges', 'ocean.ecut', 'ocean.pp_list', 'ocean.diemac', 'ocean.xred', 'ocean.typat', 'ocean.natom', 'ocean.znucl', 'ocean.ntypat', 'ocean.rprim', 'ocean.acell', 'ocean.photon.operator', 'ocean.photon.polarization'],'desc':'Calculate XANES using ocean.'} class Ocean(Handler): def __str__(self): return 'Ocean Handler' @staticmethod def canProduce(output): if isinstance(output, list) and output and isinstance(output[0], str): return strlistkey(output) in implemented elif isinstance(output, str): return output in implemented else: raise TypeError('Output should be token or list of tokens') @staticmethod def requiredInputFor(output): if isinstance(output, list) and output and isinstance(output[0], str): unresolved = {o for o in output if not Ocean.canProduce(o)} canProduce = (o for o in output if Ocean.canProduce(o)) additionalInput = (set(implemented[o]['req']) for o in canProduce) return list(set.union(unresolved,*additionalInput)) elif isinstance(output, str): if output in implemented: return implemented[output]['req'] else: return [output] else: raise TypeError('Output should be token or list of tokens') @staticmethod def cost(output): if isinstance(output, list) and output and isinstance(output[0], str): key = strlistkey(output) elif isinstance(output, str): key = output else: raise TypeError('Output should be token or list of tokens') if key not in implemented: raise LookupError('Corvus cannot currently produce ' + key + ' using FEFF') return implemented[key]['cost'] @staticmethod def sequenceFor(output,inp=None): if isinstance(output, list) and output and isinstance(output[0], str): key = strlistkey(output) elif isinstance(output, str): key = output else: raise TypeError('Output should be token of list of tokens') if key not in implemented: raise LookupError('Corvus cannot currently produce ' + key + ' using FEFF') f = lambda subkey : implemented[key][subkey] if f('type') is 'Exchange': return Exchange(Ocean, f('req'), f('out'), cost=f('cost'), desc=f('desc')) @staticmethod def prep(config): subdir = config['pathprefix'] + str(config['xcIndex']) + '_OCEAN' xcDir = os.path.join(config['cwd'], subdir) # Make new output directory if if doesn't exist if not os.path.exists(xcDir): os.mkdir(xcDir) # Store current Exchange directory in configuration config['xcDir'] = xcDir #@staticmethod #def setDefaults(input,target): # JJ Kas - run now performs all 3 methods, i.e., generateInput, run, translateOutput # Maybe we should also include prep here. Is there a reason that we want to limit the directory names # to automated Corvus_FEFFNN? Also if we have prep included here, we can decide on making a new directory # or not. @staticmethod def run(config, input, output): # set atoms and potentials # Set directory to ocean executables. # Debug: FDV # pp_debug.pprint(config) # Debug: FDV # sys.exit() dir = config['xcDir'] # Copy ocean related input to oceanInput here. Later we will be overriding some settings, # so we want to keep the original input intact. oceanInput = {key:input[key] for key in input if (key.startswith('ocean.') and 'photon.' not in key)} photonInput = {key:input[key] for key in input if (key.startswith('ocean.') and 'photon.' in key)} photonfile = os.path.join(dir, 'photon1') # Write photon1 input file lines = [] lines.append(str(photonInput['ocean.photon.operator'][0][0])) lines.append('cartesian ' + ' '.join([str(value) for value in photonInput['ocean.photon.polarization'][0]])) lines.append('end') if 'ocean.photon.qhat' in photonInput: lines.append('cartesian ' + ' '.join([str(value) for value in photonInput['ocean.photon.qhat'][0]])) lines.append('end') lines.append(str(photonInput.get('ocean.photon.energy',0.0)[0][0])) writeList(lines,photonfile) #writePhotonInput(photonInput,dir) # Generate any data that is needed from generic input and populate oceanInput with # global data (needed for all feff runs.) # Set directory for this exchange oceandir = config['ocean'] # Set input file inpf = os.path.join(dir, 'ocean.in') # Loop over targets in output. Not sure if there will ever be more than one output target here. for target in output: if (target == 'OceanXANES'): # Set output and error files with open(os.path.join(dir, 'corvus.OCEAN.stdout'), 'w') as out, open(os.path.join(dir, 'corvus.OCEAN.stderr'), 'w') as err: # Get pseudopotentials # Write input file for Ocean. writeXANESInput(oceanInput,inpf) # Copy necessary files to dir # If program is set to hamann, or not set, we are using # John Vinson's version of oncvpsp with q-e # Don't need fhi files, need to append UPF to # files listed in pp_list, and also copy oncvpsp # input file for absorber. In this case we don't # need .opts or .fill files. if oceanInput.get('ocean.opf.program',[['hamann']])[0][0] == 'hamann': # By default, use q-e? However, this should depend on q-e being available? for file in oceanInput['ocean.pp_list']: fileUPF = file[0] + '.UPF' fileONCV = file[0] + '.in' # Should this always be named .in? # UPF file should exist, .in file should only exist for absorber. For now, check that .in # file exists for at least one of pp_list, and error otherwise. shutil.copy(fileUPF,dir) if os.path.exists(fileONCV): shutil.copy(fileONCV,dir) else: # This should work for abinit or q-e. for file in os.listdir("."): if file.endswith(".UPF"): shutil.copy(file,dir) for file in oceanInput['ocean.pp_list']: shutil.copy(file[0],dir) if 'ocean.opf.fill' in oceanInput: for fill in oceanInput['ocean.opf.fill']: shutil.copy(fill[1],dir) if 'ocean.opf.opts' in oceanInput: for opt in oceanInput['ocean.opf.opts']: shutil.copy(opt[1],dir) # Loop over executables: This is specific to feff. Other codes # will more likely have only one executable. executables = ['ocean.pl'] args=['ocean.in'] iExec = 0 for executable in executables: runExecutable(oceandir,dir,executable,args,out,err) # For now, I am only passing the directory. print('Setting output') # Loop over output files? This is not possible in the output # right now. outdir=os.path.join(dir,'CNBSE') output[target] = [ fl for fl in os.listdir(outdir) if fl.startswith('absspct') ] @staticmethod def cleanup(config): pass ##### Generic Helper Methods ########## def check(input, token, default=None): if token in input: return input[token] else: return default def setInput(input, token, default, Force=False): # If token is already defined, leave it unless force, otherwise define with default. if token not in input or Force: input[token] = default def writeInput(input,inpfile): lines=[] for key in input: lines = lines + getInpLines(input,key) # Print ocean input file writeList(lines, inpfile) def getInpLines(input,token): lines=[] block=False endblock=' ' key = token[len('ocean.'):] if token in input: for element in input[token]: # Takes care of single and multi-line input. lines.append(' '.join([str(value) for value in element])) if len(input[token]) > 1: lines.insert(0,key.upper() + '{') endblock = '}' else: # Most have arguments on the same line as keyword. lines[0] = key.upper() + '{ ' + lines[0] + ' }' # Add a blank line after each line lines.append(endblock) lines.append('') return lines def writeList(lines, filename): with open(filename, 'w') as f: f.write('\n'.join(lines)) def runExecutable(execDir,workDir,executable, args,out,err): # Runs executable located in execDir from working directory workDir. # Tees stdout to file out in real-time, and stderr to file err. print(('Running exectuable: ' + executable)) # Modified by FDV: # Adding the / to make the config more generic # Modified by JJK to use os.path.join (even safer than above). execList = [os.path.join(execDir,executable)] + args p = subprocess.Popen(execList, cwd=workDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf8') while True: output = p.stdout.readline() error = p.stderr.readline() if output == '' and p.poll() is not None: break if output: print(output.strip()) out.write(output.strip() + os.linesep) if error: print(error.strip()) err.write(error.strip() + os.linesep) rc = p.poll() def readColumns(filename, columns=[1,2]): # Read file and clear out comments with open(filename, 'r') as file: cleanStr = file.read() comments = pp.ZeroOrMore(pp.pythonStyleComment).setParseAction(pp.replaceWith('')) try: cleanStr = comments.transformString(cleanStr) except pp.ParseException as pe: print(('Parsing Error using pyparsing: invalid input:', pe)) sys.exit() # Define grammar for ncols of data based on number of entries in first row floating = pp.Word(pp.nums + ".+-E").setParseAction(lambda t: float(t[0])) EOL = pp.LineEnd().suppress() row1entry = floating.copy().setWhitespaceChars(" \t") row1 = pp.Group(pp.ZeroOrMore(EOL) + pp.OneOrMore(row1entry) + EOL) row = pp.Forward() def defineTotalCols(toks): ncols = len(toks[0]) row << pp.Group(floating * ncols) return None row1.addParseAction(defineTotalCols) text = row1 + pp.ZeroOrMore(row) try: data = text.parseString(cleanStr).asList() except pp.ParseException as pe: print(('Parsing Error using pyparsing: invalid input:', pe)) sys.exit() cols = list(map(list, list(zip(*data)))) return [cols[i-1] for i in columns] #### Specific Helper Methods def writeXANESInput(input, oceaninp='ocean.in'): lines = [] #setInput(input,'feff.print',[[5,0,0,0,0,0]],Force=True) writeInput(input,oceaninp)
{"hexsha": "52610f8bfa50d48885edcf6c40e146c8185c0506", "size": 12453, "ext": "py", "lang": "Python", "max_stars_repo_path": "corvus/ocean.py", "max_stars_repo_name": "times-software/Corvus", "max_stars_repo_head_hexsha": "d220e2db28743ecb6748e2a245eb3992daa554c1", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-09-16T21:07:49.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-30T17:08:38.000Z", "max_issues_repo_path": "corvus/ocean.py", "max_issues_repo_name": "times-software/Corvus", "max_issues_repo_head_hexsha": "d220e2db28743ecb6748e2a245eb3992daa554c1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "corvus/ocean.py", "max_forks_repo_name": "times-software/Corvus", "max_forks_repo_head_hexsha": "d220e2db28743ecb6748e2a245eb3992daa554c1", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.5333333333, "max_line_length": 140, "alphanum_fraction": 0.5739179314, "include": true, "reason": "import numpy", "num_tokens": 2834}
\section{Process' Perspective} \subsection{Team} To organize our team, we make a weekly plan each Tuesday. The plan depends on the current hangups of the project and the new tasks of the week. \newline We split up the team in subgroups, depending on the complexity of the task we are taking on. Each subgroup starts their work immediately and coordinates freely until next week. During the week, we use our Discord server to keep each other updated on progress with the tasks. \subsection{CI/CD}\label{CI/CD} %\textcolor{red}{A complete description of stages and tools included in the CI/CD chains. That is, including deployment and release of your systems.} We have implemented CI/CD with CircleCI. Our CircleCI's configuration includes a list of jobs. Those jobs are: static analysis, build, deploy, and workflows. In our continuous integration chain, we use different tools and metrics to ensure the quality of our extensions to the system. \begin{itemize} \item For static analysis we use a tool called \texttt{golangci-lint}, which runs the following linters in parallel: \begin{itemize} \item \texttt{gosec} is a tool to enhance security by inspecting the source code for vulnerabilities, by scanning the Go AST. \item \texttt{gofmt} is a tool which formats the Go source code, such that for example, unnecessary parentheses are removed. \item \texttt{staticcheck} is a tool which, by using static analysis, is able to locate bugs and find performance issues, while offering simplification and enforcement of style rules. \item \texttt{gosimple} is a tool, with the purpose of simplifying source code. \item \texttt{unused} is a tool to ensure that unused variables, functions constants and types are removed. \item \texttt{typecheck} is a tool, which ensures that our variables are of the correct types. \end{itemize} \item For Quality assessment systems we have settled on a list of metrics, that serves as the standard we want to set for our system. \begin{itemize} \item \texttt{Reliability} - Static analysis tools (described above) are used to ensure reliability within our system. \item \texttt{Maintainability} - Our code should be easy to read. For example, instead of doing fancy one-liners, code should be written out in longer form for better readability. All variable names should be easy to understand. This makes the code less messy and easier to maintain. \item \texttt{Testability} - We want to have unit tests for our code, such that, whenever we push any new code to our repository, we know that it does not break anything. \item \texttt{Portability} - Our code should be as portable as possible. This means, that we should develop with the API in mind, such that we avoid platform specific solutions. \item \texttt{Reusability} - Code should be reusable, to avoid code duplicated or redundant code. \item \texttt{Technical debt} should be minimized as much as possible. It is unrealistic to avoid completely, but steps should always be made to combat it. \end{itemize} \end{itemize} The final step of our CI/CD chain is that we have a Discord bot set up that publishes a message to our Discord server, whenever there is a new commit to the repository. This way, all developers are encouraged to inspect commits and stay up to date with the code base. \subsection{Repositories} %\textcolor{red}{Organization of your repositor(ies). That is, either the structure of of mono-repository or organization of artifacts across repositories. In essence, it has to be be clear what is stored where and why.} We chose to use a mono-repository structure hosted on a single GitHub repository. The repository includes all of our code for both the API as well as the MiniTwit web-application. We chose to keep the API and application in the same repository since they are both quite small systems, and because they share several dependencies and functions. \subsection{Branching strategy} Our branching strategy is trunk-based development. We have a main branch which, contains a working state of our application. Whenever we want to make a new feature or fix a bug, we create a new branch from our main branch, where we work on the feature. When the work is done, we make a pull request from the work branch into our main branch which then needs to be approved by another team member before it gets merged into main. \subsection{Development process} %\textcolor{red}{Applied development process and tools supporting it. For example, how did you use issues, Kanban boards, etc. to organize open tasks} We add issues to our Github, following the courses weekly additions to the project. All individual issues are added to a To Do List, that tracks their completion. This is to create a quick overview of our progress with the project and maintain a backlog. For weekly tasks, we post a message on our discord, that tracks who is responsible for what. \subsection{Monitoring}\label{Monitoring} %\textcolor{red}{How do you monitor your systems and what precisely do you monitor?} To monitor our application, we use the tool Prometheus that monitors the metrics we want to track. We use the package PromAuto to setup the metrics we want to track with Prometheus. The constructors in PromAuto, in contrast to the Prometheus package, returns \textit{Collectors} that are already registered to a registry. It creates functions on 2 levels: top level functions, that return \textit{Collectors} registered to the global registry, and Factory type methods that returns \textit{Collectors} that are registered with the constructor, that the factory were created with. \newline We have chosen to track the following three metrics for our app and API \begin{itemize} \item The CPU load \item The total number of processed HTTP requests \item The average duration of requests \end{itemize} To visualize our data, we use the tool Grafana, which uses the Prometheus data and visualizes it through a web endpoint. The Grafana dashboard can be seen at \ref{appendix:dashboard}. \subsection{Logging} %\textcolor{red}{What do you log in your systems and how do you aggregate logs?} We use the ELK stack for logging. The information we log includes: \begin{itemize} \item Caddy access/error logs \item App and API error messages \item Log messages from the ELK components themselves \end{itemize} The log data is read directly from the directory that contains the logs from the Docker containers, which is mounted as a Docker volume on the Filebeat container. Filebeat then parses the data and sends it to Elasticsearch over an internal network connection. The web-frontend we use to look at our logs is Kibana. \subsection{Security} %\textcolor{red}{Brief results of the security assessment.} To estimate the security standing of our system, we sat down as a team and completed a security assessment. We started by doing a risk identification. We concluded that our assets include the user data stored on our servers as well as a web server with almost constant uptime. The related threat sources would thus be unauthorized access to sensitive data, and infrastructure provider downtime that would make our service unavailable. We visualized these risk factors in a risk matrix (see figure \ref{fig:Risk_matrix}) based on their impact and how likely they are to happen. \begin{figure}[H] \centering \includegraphics[scale=0.85]{images/risk_matrix.png} \caption{The Risk matrix visualizes threats based on their severity and likelihood.} \label{fig:Risk_matrix} \end{figure} The most crucial risk of provider downtime is DigitalOcean, since we host our application with them, which is why we assess downtime on their end as much more crucial than downtime on Github or CircleCI. \newline In relation to keeping sensitive user data safe, we identified multiple possible risk scenarios. The first risk is a physical server attack on DigitalOcean. Since we cannot improve security on their end, the best course of action is to have another provider setup ready. Our personal risk factors are leaked login credentials or an SSH key leak. To improve the security, we have implemented two-factor authentication. After pentesting our system with Zaproxy, we found out that we had a number of vulnerabilities, although none of them were too high of a risk. One of the medium-risk issues that Zaproxy found was regarding some missing HTTP headers. We chose to fix this since it was a larger risk than the others. Figure \ref{fig:Zaproxy_Updated} shows the results from running Zaproxy on the newest version of our application. We unfortunately did not have time to fix the remaining issues, however, the risks proposed by Zaproxy would be an obvious next thing to do in regards to improving the security. \newline \begin{figure}[H] \centering \includegraphics[scale=0.50]{images/security_risks_updated.png} \caption{Results from Zaproxy based on the newest version of our application.} \label{fig:Zaproxy_Updated} \end{figure} \subsection{Scaling \& Load balancing} We have attempted to set up horizontal scaling via Docker Swarm, however we did not succeed in integrating it completely. Our Swarm setup, which is currently on the \texttt{feature/swarm} git branch, can only run the MiniTwit application itself, so no monitoring nor logging. We believe this is due to issues with persistent storage, which is complicated to implement in a cluster setup.
{"hexsha": "a4c527b726b10da30058729843f906f539b02474", "size": 9544, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "report/sections/process_perspective.tex", "max_stars_repo_name": "salsitu/minitwit_thesvindler", "max_stars_repo_head_hexsha": "1b8ee619c763bafbc6cc0b19424e834741bd1809", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "report/sections/process_perspective.tex", "max_issues_repo_name": "salsitu/minitwit_thesvindler", "max_issues_repo_head_hexsha": "1b8ee619c763bafbc6cc0b19424e834741bd1809", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2022-03-27T21:54:06.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T14:38:23.000Z", "max_forks_repo_path": "report/sections/process_perspective.tex", "max_forks_repo_name": "nissemand243/Gruppe_A", "max_forks_repo_head_hexsha": "1b8ee619c763bafbc6cc0b19424e834741bd1809", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 86.7636363636, "max_line_length": 576, "alphanum_fraction": 0.7830050293, "num_tokens": 2095}
import numpy as np from model.rng.RNG import RNG class GaussianRNG(RNG): def __init__(self, density): super().__init__(density) self.mu, self.sigma = 0, self.get_mapped_sigma(density) def get_next(self): return np.random.normal(self.mu, self.sigma) @staticmethod def get_mapped_sigma(density): # determined empirically return (10 / density) * 1.25
{"hexsha": "dbdb41d91506852e4fb5e5b1e926b9312de27ffe", "size": 409, "ext": "py", "lang": "Python", "max_stars_repo_path": "model/rng/GaussianRNG.py", "max_stars_repo_name": "rieder91/som-cluster-generator", "max_stars_repo_head_hexsha": "a828e168e0fee4d5d9ac4e7206de89a2fc736b85", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "model/rng/GaussianRNG.py", "max_issues_repo_name": "rieder91/som-cluster-generator", "max_issues_repo_head_hexsha": "a828e168e0fee4d5d9ac4e7206de89a2fc736b85", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "model/rng/GaussianRNG.py", "max_forks_repo_name": "rieder91/som-cluster-generator", "max_forks_repo_head_hexsha": "a828e168e0fee4d5d9ac4e7206de89a2fc736b85", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.7222222222, "max_line_length": 63, "alphanum_fraction": 0.6601466993, "include": true, "reason": "import numpy", "num_tokens": 102}
// extending_return_type_traits.cpp -- The Boost Lambda Library -------- // // Copyright (C) 2000-2003 Jaakko Jarvi (jaakko.jarvi@cs.utu.fi) // Copyright (C) 2000-2003 Gary Powell (powellg@amazon.com) // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // // For more information, see www.boost.org // ----------------------------------------------------------------------- #include <boost/core/lightweight_test.hpp> #define BOOST_CHECK BOOST_TEST #include "boost/lambda/bind.hpp" #include "boost/lambda/lambda.hpp" #include "boost/lambda/detail/suppress_unused.hpp" #include <iostream> #include <functional> #include <algorithm> using boost::lambda::detail::suppress_unused_variable_warnings; class A {}; class B {}; using namespace boost::lambda; B operator--(const A&, int) { return B(); } B operator--(A&) { return B(); } B operator++(const A&, int) { return B(); } B operator++(A&) { return B(); } B operator-(const A&) { return B(); } B operator+(const A&) { return B(); } B operator!(const A&) { return B(); } B operator&(const A&) { return B(); } B operator*(const A&) { return B(); } namespace boost { namespace lambda { // unary + and - template<class Act> struct plain_return_type_1<unary_arithmetic_action<Act>, A > { typedef B type; }; // post incr/decr template<class Act> struct plain_return_type_1<post_increment_decrement_action<Act>, A > { typedef B type; }; // pre incr/decr template<class Act> struct plain_return_type_1<pre_increment_decrement_action<Act>, A > { typedef B type; }; // ! template<> struct plain_return_type_1<logical_action<not_action>, A> { typedef B type; }; // & template<> struct plain_return_type_1<other_action<addressof_action>, A> { typedef B type; }; // * template<> struct plain_return_type_1<other_action<contentsof_action>, A> { typedef B type; }; } // lambda } // boost void ok(B /*b*/) {} void test_unary_operators() { A a; int i = 1; ok((++_1)(a)); ok((--_1)(a)); ok((_1++)(a)); ok((_1--)(a)); ok((+_1)(a)); ok((-_1)(a)); ok((!_1)(a)); ok((&_1)(a)); ok((*_1)(a)); BOOST_CHECK((*_1)(make_const(&i)) == 1); } class X {}; class Y {}; class Z {}; Z operator+(const X&, const Y&) { return Z(); } Z operator-(const X&, const Y&) { return Z(); } X operator*(const X&, const Y&) { return X(); } Z operator/(const X&, const Y&) { return Z(); } Z operator%(const X&, const Y&) { return Z(); } class XX {}; class YY {}; class ZZ {}; class VV {}; // it is possible to support differently cv-qualified versions YY operator*(XX&, YY&) { return YY(); } ZZ operator*(const XX&, const YY&) { return ZZ(); } XX operator*(volatile XX&, volatile YY&) { return XX(); } VV operator*(const volatile XX&, const volatile YY&) { return VV(); } // the traits can be more complex: template <class T> class my_vector {}; template<class A, class B> my_vector<typename return_type_2<arithmetic_action<plus_action>, A&, B&>::type> operator+(const my_vector<A>& /*a*/, const my_vector<B>& /*b*/) { typedef typename return_type_2<arithmetic_action<plus_action>, A&, B&>::type res_type; return my_vector<res_type>(); } // bitwise ops: X operator<<(const X&, const Y&) { return X(); } Z operator>>(const X&, const Y&) { return Z(); } Z operator&(const X&, const Y&) { return Z(); } Z operator|(const X&, const Y&) { return Z(); } Z operator^(const X&, const Y&) { return Z(); } // comparison ops: X operator<(const X&, const Y&) { return X(); } Z operator>(const X&, const Y&) { return Z(); } Z operator<=(const X&, const Y&) { return Z(); } Z operator>=(const X&, const Y&) { return Z(); } Z operator==(const X&, const Y&) { return Z(); } Z operator!=(const X&, const Y&) { return Z(); } // logical X operator&&(const X&, const Y&) { return X(); } Z operator||(const X&, const Y&) { return Z(); } // arithh assignment Z operator+=( X&, const Y&) { return Z(); } Z operator-=( X&, const Y&) { return Z(); } Y operator*=( X&, const Y&) { return Y(); } Z operator/=( X&, const Y&) { return Z(); } Z operator%=( X&, const Y&) { return Z(); } // bitwise assignment Z operator<<=( X&, const Y&) { return Z(); } Z operator>>=( X&, const Y&) { return Z(); } Y operator&=( X&, const Y&) { return Y(); } Z operator|=( X&, const Y&) { return Z(); } Z operator^=( X&, const Y&) { return Z(); } // assignment class Assign { public: void operator=(const Assign& /*a*/) {} X operator[](const int& /*i*/) { return X(); } }; namespace boost { namespace lambda { // you can do action groups template<class Act> struct plain_return_type_2<arithmetic_action<Act>, X, Y> { typedef Z type; }; // or specialize the exact action template<> struct plain_return_type_2<arithmetic_action<multiply_action>, X, Y> { typedef X type; }; // if you want to make a distinction between differently cv-qualified // types, you need to specialize on a different level: template<> struct return_type_2<arithmetic_action<multiply_action>, XX, YY> { typedef YY type; }; template<> struct return_type_2<arithmetic_action<multiply_action>, const XX, const YY> { typedef ZZ type; }; template<> struct return_type_2<arithmetic_action<multiply_action>, volatile XX, volatile YY> { typedef XX type; }; template<> struct return_type_2<arithmetic_action<multiply_action>, volatile const XX, const volatile YY> { typedef VV type; }; // the mapping can be more complex: template<class A, class B> struct plain_return_type_2<arithmetic_action<plus_action>, my_vector<A>, my_vector<B> > { typedef typename return_type_2<arithmetic_action<plus_action>, A&, B&>::type res_type; typedef my_vector<res_type> type; }; // bitwise binary: // you can do action groups template<class Act> struct plain_return_type_2<bitwise_action<Act>, X, Y> { typedef Z type; }; // or specialize the exact action template<> struct plain_return_type_2<bitwise_action<leftshift_action>, X, Y> { typedef X type; }; // comparison binary: // you can do action groups template<class Act> struct plain_return_type_2<relational_action<Act>, X, Y> { typedef Z type; }; // or specialize the exact action template<> struct plain_return_type_2<relational_action<less_action>, X, Y> { typedef X type; }; // logical binary: // you can do action groups template<class Act> struct plain_return_type_2<logical_action<Act>, X, Y> { typedef Z type; }; // or specialize the exact action template<> struct plain_return_type_2<logical_action<and_action>, X, Y> { typedef X type; }; // arithmetic assignment : // you can do action groups template<class Act> struct plain_return_type_2<arithmetic_assignment_action<Act>, X, Y> { typedef Z type; }; // or specialize the exact action template<> struct plain_return_type_2<arithmetic_assignment_action<multiply_action>, X, Y> { typedef Y type; }; // arithmetic assignment : // you can do action groups template<class Act> struct plain_return_type_2<bitwise_assignment_action<Act>, X, Y> { typedef Z type; }; // or specialize the exact action template<> struct plain_return_type_2<bitwise_assignment_action<and_action>, X, Y> { typedef Y type; }; // assignment template<> struct plain_return_type_2<other_action<assignment_action>, Assign, Assign> { typedef void type; }; // subscript template<> struct plain_return_type_2<other_action<subscript_action>, Assign, int> { typedef X type; }; } // end lambda } // end boost void test_binary_operators() { X x; Y y; (_1 + _2)(x, y); (_1 - _2)(x, y); (_1 * _2)(x, y); (_1 / _2)(x, y); (_1 % _2)(x, y); // make a distinction between differently cv-qualified operators XX xx; YY yy; const XX& cxx = xx; const YY& cyy = yy; volatile XX& vxx = xx; volatile YY& vyy = yy; const volatile XX& cvxx = xx; const volatile YY& cvyy = yy; ZZ dummy1 = (_1 * _2)(cxx, cyy); YY dummy2 = (_1 * _2)(xx, yy); XX dummy3 = (_1 * _2)(vxx, vyy); VV dummy4 = (_1 * _2)(cvxx, cvyy); suppress_unused_variable_warnings(dummy1); suppress_unused_variable_warnings(dummy2); suppress_unused_variable_warnings(dummy3); suppress_unused_variable_warnings(dummy4); my_vector<int> v1; my_vector<double> v2; my_vector<double> d = (_1 + _2)(v1, v2); suppress_unused_variable_warnings(d); // bitwise (_1 << _2)(x, y); (_1 >> _2)(x, y); (_1 | _2)(x, y); (_1 & _2)(x, y); (_1 ^ _2)(x, y); // comparison (_1 < _2)(x, y); (_1 > _2)(x, y); (_1 <= _2)(x, y); (_1 >= _2)(x, y); (_1 == _2)(x, y); (_1 != _2)(x, y); // logical (_1 || _2)(x, y); (_1 && _2)(x, y); // arithmetic assignment (_1 += _2)(x, y); (_1 -= _2)(x, y); (_1 *= _2)(x, y); (_1 /= _2)(x, y); (_1 %= _2)(x, y); // bitwise assignment (_1 <<= _2)(x, y); (_1 >>= _2)(x, y); (_1 |= _2)(x, y); (_1 &= _2)(x, y); (_1 ^= _2)(x, y); } int main() { test_unary_operators(); test_binary_operators(); return boost::report_errors(); }
{"hexsha": "731379a9c125f178a00be79517047d33d40d7a70", "size": 9031, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "console/src/boost_1_78_0/libs/lambda/test/extending_rt_traits.cpp", "max_stars_repo_name": "vany152/FilesHash", "max_stars_repo_head_hexsha": "39f282807b7f1abc56dac389e8259ee3bb557a8d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 106.0, "max_stars_repo_stars_event_min_datetime": "2015-08-07T04:23:50.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-27T18:25:15.000Z", "max_issues_repo_path": "console/src/boost_1_78_0/libs/lambda/test/extending_rt_traits.cpp", "max_issues_repo_name": "vany152/FilesHash", "max_issues_repo_head_hexsha": "39f282807b7f1abc56dac389e8259ee3bb557a8d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 130.0, "max_issues_repo_issues_event_min_datetime": "2016-06-22T22:11:25.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-29T20:24:09.000Z", "max_forks_repo_path": "console/src/boost_1_78_0/libs/lambda/test/extending_rt_traits.cpp", "max_forks_repo_name": "vany152/FilesHash", "max_forks_repo_head_hexsha": "39f282807b7f1abc56dac389e8259ee3bb557a8d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 41.0, "max_forks_repo_forks_event_min_datetime": "2015-07-08T19:18:35.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-14T16:39:56.000Z", "avg_line_length": 23.1564102564, "max_line_length": 96, "alphanum_fraction": 0.6447790942, "num_tokens": 2625}
SUBROUTINE GDPUVP ( gvect, u, v, y, npts, wind, winpos, filtfc, + windxn, windyn, refvec, iret ) C************************************************************************ C* GDPUVP * C* * C* This subroutine draws a wind profile in GDPROF. * C* * C* GDPUVP ( GVECT, U, V, Y, NPTS, WIND, WINPOS, FILTFC, WINDXN, WINDYN, * C* REFVEC, IRET ) * C* * C* * C* Input parameters: * C* GVECT CHAR* Wind function * C* U(*) REAL U components * C* V(*) REAL V components * C* Y(*) REAL Vertical position * C* NPTS INTEGER Number of levels * C* WIND CHAR* Wind display specification * C* WINPOS CHAR* Wind plotting position * C* FILTFC REAL Factor to filter wind * C* WINDXN REAL Wind barb size in x * C* WINDYN REAL Wind barb size in y * C* REFVEC CHAR* Reference arrow * C* * C* Output parameters: * C* IRET INTEGER Return code * C* 0 = normal return * C** * C* Log: * C* K. Brill/GSC 12/89 * C* K. Brill/GSC 1/90 Fixed wind position problem * C* J. Nielsen/TAMU 11/91 Added filter factor * C* K. Brill/NMC 01/93 Added reference arrow * C* L. Sager/NMC 7/93 Added REFVEC parameter * C* S. Jacobs/EAI 10/93 Changed call to IN_RVEC and * C* plotting of reference arrow * C* S. Jacobs/NMC 8/94 Changed IN_RVEC to GG_RVEC * C************************************************************************ INCLUDE 'GEMPRM.PRM' C* CHARACTER*(*) gvect, wind, winpos, refvec REAL u (*), v (*), y (*), filtfc C* LOGICAL knots C* CHARACTER gv*128, wintyp*1, winuni*1 CHARACTER defstr*12 REAL xwind (LLMXLV), ywind ( LLMXLV ), + spd (LLMXLV), dir (LLMXLV) C* INCLUDE 'ERMISS.FNC' C------------------------------------------------------------------------ iret = 0 C C* Get input wind position. C CALL ST_NUMB ( winpos, iwposn, ier ) IF ( ( iwposn .lt. 1 ) .or. ( iwposn .gt. 3 ) ) iwposn = 1 C C* Get wind input for display type and color number. C knots = .false. CALL ST_LCUC ( gvect, gv, ier ) iuck = index ( gv, 'KNTV') CALL IN_WIND ( wind, wintyp, winuni, iwncol, iret ) IF ( winuni .eq. 'K' .and. iuck .eq. 0 ) knots = .true. C C* Plot winds. C IF ( ( iwncol .gt. 0 ) .and. ( npts .gt. 0 ) ) THEN CALL GSCOLR ( iwncol, ier ) C C* Transform y values to N coordinates. C CALL GQGRAF ( ii, jj, yx, x1, y1, x2, y2, ier ) DO i = 1, npts xwind (i) = x2 END DO CALL GTRANS ( 'M', 'N', npts, xwind, y, xwind, + ywind, ier ) C C* Get coordinate at edge of plotting area. C CALL GQBND ( 'P', x1, y1, x2, y2, ier ) C C* Calculate coordinate for wind in N coordinates. C xwind (1) = x2 + 1.5 * windxn * FLOAT ( iwposn ) DO i = 2, npts xwind (i) = xwind (1) END DO C C* Convert components to speed and direction. C kb = 1 ke = npts indx = 0 DO k = kb, ke IF ( ERMISS ( u ( k ) ) .or. + ERMISS ( v ( k ) ) ) THEN ELSE dd = ATAN2 ( -u ( k ), -v ( k ) ) * RTD sp = sqrt ( u ( k ) * u ( k ) + + v ( k ) * v ( k ) ) IF ( knots ) sp = sp * 1.94 indx = indx + 1 spd ( indx ) = sp dir ( indx ) = dd xwind ( indx ) = xwind ( k ) ywind ( indx ) = ywind ( k ) END IF END DO nout = indx C C* Draw wind barbs or arrows. C IF ( wintyp .eq. 'B' ) THEN IF ( filtfc .ne. 0. ) THEN brbftr = windyn * 0.6 * filtfc nout = 1 yold = ywind (1) DO i = 2, indx IF ( ( ywind (i) - yold ) .ge. brbftr ) THEN nout = nout + 1 ywind ( nout ) = ywind (i) spd ( nout ) = spd ( i ) dir ( nout ) = dir ( i ) yold = ywind (i) END IF END DO CALL GBARB ( 'V', nout, xwind, ywind, spd, + dir, ier ) ELSE CALL GBARB ( 'V', nout, xwind, ywind, spd, + dir, ier ) END IF ELSE CALL GARRW ( 'V', nout, xwind, ywind, spd, + dir, ier ) C C* Plot reference arrow if arrows were requested. C* Parse the parameter REFVEC and draw the arrow. C IF ( winuni .ne. 'N' ) THEN IF ( winuni .eq. 'K' ) defstr = 'kts' IF ( winuni .eq. 'M' ) defstr = 'm/s' CALL GG_RVEC ( refvec, defstr, ier ) END IF C END IF END IF C* RETURN END
{"hexsha": "73dc5344b36a9c2f177d08a4867f25ec68b0e5e7", "size": 4601, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "gempak/source/programs/gd/gdprof/gdpuvp.f", "max_stars_repo_name": "oxelson/gempak", "max_stars_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 42, "max_stars_repo_stars_event_min_datetime": "2015-06-03T15:26:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T22:36:03.000Z", "max_issues_repo_path": "gempak/source/programs/gd/gdprof/gdpuvp.f", "max_issues_repo_name": "oxelson/gempak", "max_issues_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 60, "max_issues_repo_issues_event_min_datetime": "2015-05-11T21:36:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T16:22:42.000Z", "max_forks_repo_path": "gempak/source/programs/gd/gdprof/gdpuvp.f", "max_forks_repo_name": "oxelson/gempak", "max_forks_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 27, "max_forks_repo_forks_event_min_datetime": "2016-06-06T21:55:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T18:23:28.000Z", "avg_line_length": 29.4935897436, "max_line_length": 73, "alphanum_fraction": 0.4807650511, "num_tokens": 1687}
# Copyright 1999-2021 Alibaba Group Holding Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import ABC, ABCMeta, abstractmethod from datetime import datetime, timedelta, tzinfo from enum import Enum from typing import Tuple, Type import numpy as np import pandas as pd from ...utils import lazy_import cupy = lazy_import("cupy", globals=globals()) cudf = lazy_import("cudf", globals=globals()) class PrimitiveType(Enum): bool = 1 int8 = 2 int16 = 3 int32 = 4 int64 = 5 uint8 = 6 uint16 = 7 uint32 = 8 uint64 = 9 float16 = 10 float32 = 11 float64 = 12 bytes = 13 string = 14 complex64 = 24 complex128 = 25 _primitive_type_to_valid_types = { PrimitiveType.bool: (bool, np.bool_), PrimitiveType.int8: (int, np.int8), PrimitiveType.int16: (int, np.int16), PrimitiveType.int32: (int, np.int32), PrimitiveType.int64: (int, np.int64), PrimitiveType.uint8: (int, np.uint8), PrimitiveType.uint16: (int, np.uint16), PrimitiveType.uint32: (int, np.uint32), PrimitiveType.uint64: (int, np.uint64), PrimitiveType.float16: (float, np.float16), PrimitiveType.float32: (float, np.float32), PrimitiveType.float64: (float, np.float64), PrimitiveType.bytes: (bytes, np.bytes_), PrimitiveType.string: (str, np.unicode_), PrimitiveType.complex64: (complex, np.complex64), PrimitiveType.complex128: (complex, np.complex128), } class AbstractFieldType(ABC): __slots__ = () @property @abstractmethod def type_name(self) -> str: """ Type name. Returns ------- type_name : str """ @property def name(self) -> str: """ Name of field type instance. Returns ------- name : str """ return self.type_name.capitalize() @property @abstractmethod def valid_types(self) -> Tuple[Type, ...]: """ Valid types. Returns ------- valid_types: tuple Valid types. """ def validate(self, value): if value is not None and not isinstance(value, self.valid_types): raise TypeError( f"value needs to be instance " f"of {self.valid_types}, got {type(value)}" ) def __call__(self, *args, **kwargs): return type(self)(*args, **kwargs) class SingletonFieldType(AbstractFieldType, metaclass=ABCMeta): __slots__ = () _instance = None def __new__(cls, *args, **kw): if cls._instance is None: inst = super().__new__(cls, *args, **kw) cls._instance = inst return cls._instance class PrimitiveFieldType(AbstractFieldType): __slots__ = ("type",) _type_to_instances = dict() def __new__(cls, *args, **kwargs): primitive_type = args[0] try: return cls._type_to_instances[primitive_type] except KeyError: inst = cls._type_to_instances[primitive_type] = super().__new__(cls) return inst def __init__(self, primitive_type: PrimitiveType): self.type = primitive_type @property def type_name(self) -> str: return self.type.name @property def valid_types(self) -> Tuple[Type, ...]: return _primitive_type_to_valid_types[self.type] class SliceType(SingletonFieldType): __slots__ = () @property def type_name(self) -> str: return "slice" @property def valid_types(self) -> Tuple[Type, ...]: return (slice,) class NDArrayType(SingletonFieldType): __slots__ = () @property def type_name(self) -> str: return "ndarray" @property def valid_types(self) -> Tuple[Type, ...]: if cupy is None: return (np.ndarray,) else: return np.ndarray, cupy.ndarray class DtypeType(SingletonFieldType): __slots__ = () @property def type_name(self) -> str: return "dtype" @property def valid_types(self) -> Tuple[Type, ...]: return np.dtype, pd.api.extensions.ExtensionDtype class KeyType(SingletonFieldType): __slots__ = () @property def type_name(self) -> str: return "dtype" @property def valid_types(self) -> Tuple[Type, ...]: from ...core.entity import ENTITY_TYPE return ENTITY_TYPE class DatetimeType(SingletonFieldType): __slots__ = () @property def type_name(self) -> str: return "datetime" @property def valid_types(self) -> Tuple[Type, ...]: return datetime, pd.Timestamp class TimedeltaType(SingletonFieldType): __slots__ = () @property def type_name(self) -> str: return "timedelta" @property def valid_types(self) -> Tuple[Type, ...]: return timedelta, pd.Timedelta class IndexType(SingletonFieldType): __slots__ = () @property def type_name(self) -> str: return "index" @property def valid_types(self) -> Tuple[Type, ...]: if cudf is None: return (pd.Index,) else: return pd.Index, cudf.Index class SeriesType(SingletonFieldType): __slots__ = () @property def type_name(self) -> str: return "series" @property def valid_types(self) -> Tuple[Type, ...]: if cudf is None: return (pd.Series,) else: return pd.Series, cudf.Series class DataFrameType(SingletonFieldType): __slots__ = () @property def type_name(self) -> str: return "dataframe" @property def valid_types(self) -> Tuple[Type, ...]: if cudf is None: return (pd.DataFrame,) else: return pd.DataFrame, cudf.DataFrame class FunctionType(SingletonFieldType): __slots__ = () @property def type_name(self) -> str: return "function" @property def valid_types(self) -> Tuple[Type, ...]: # pragma: no cover return () def validate(self, value): if value is not None and not callable(value): raise TypeError(f"value should be a function, got {type(value)}") class NamedtupleType(SingletonFieldType): __slots__ = () @property def type_name(self) -> str: return "namedtuple" @property def valid_types(self) -> Tuple[Type, ...]: return (tuple,) def validate(self, value): if not (isinstance(value, self.valid_types) and hasattr(value, "_fields")): raise TypeError( f"value should be instance of namedtuple, got {type(value)}" ) class TZInfoType(SingletonFieldType): __slots__ = () @property def type_name(self) -> str: return "tzinfo" @property def valid_types(self) -> Tuple[Type, ...]: return (tzinfo,) class IntervalArrayType(SingletonFieldType): __slots__ = () @property def type_name(self) -> str: return "interval_array" @property def valid_types(self) -> Tuple[Type, ...]: return (pd.arrays.IntervalArray,) class AnyType(SingletonFieldType): __slots__ = () @property def type_name(self) -> str: return "any" @property def valid_types(self) -> Tuple[Type, ...]: # pragma: no cover return () def validate(self, value): # any type is valid return class _CollectionType(AbstractFieldType, metaclass=ABCMeta): __slots__ = ("_field_types",) def __init__(self, *field_types): self._field_types = field_types if len(field_types) == 0: self._field_types = (AnyType(), Ellipsis) @property def name(self) -> str: base_name = super().name if self.is_homogeneous(): if isinstance(self._field_types[0], AnyType): return base_name else: return f"{base_name}[{self._field_types[0].name}, ...]" else: field_type_names = ", ".join([ft.name for ft in self._field_types]) return f"{base_name}[{field_type_names}]" def is_homogeneous(self): return len(self._field_types) == 1 or ( len(self._field_types) == 2 and self._field_types[1] is Ellipsis ) def validate(self, value): if value is None: return if not isinstance(value, self.valid_types): raise TypeError( f"value should be instance of {self.valid_types}, got {type(value)}" ) if self.is_homogeneous(): field_type: AbstractFieldType = self._field_types[0] if not isinstance(field_type, AnyType): for item in value: try: field_type.validate(item) except TypeError: raise TypeError( f"item should be instance of " f"{field_type.valid_types}, " f"got {type(item)}" ) else: if len(value) != len(self._field_types): raise ValueError( f"value should own {len(self._field_types)} items, " f"got {len(value)} items" ) for expect_field_type, item in zip(self._field_types, value): try: expect_field_type.validate(item) except TypeError: raise TypeError( f"item should be instance of " f"{expect_field_type.valid_types}, " f"got {type(item)}" ) class ListType(_CollectionType): __slots__ = () @property def type_name(self) -> str: return "list" @property def valid_types(self) -> Tuple[Type, ...]: return (list,) class TupleType(_CollectionType): __slots__ = () @property def type_name(self) -> str: return "tuple" @property def valid_types(self) -> Tuple[Type, ...]: return (tuple,) class DictType(AbstractFieldType): __slots__ = "key_type", "value_type" key_type: AbstractFieldType value_type: AbstractFieldType def __init__( self, key_type: AbstractFieldType = None, value_type: AbstractFieldType = None ): if key_type is None: key_type = AnyType() if value_type is None: value_type = AnyType() self.key_type = key_type self.value_type = value_type @property def type_name(self) -> str: return "dict" @property def name(self) -> str: if isinstance(self.key_type, AnyType) and isinstance(self.value_type, AnyType): return "Dict" else: return f"Dict[{self.key_type.name}, {self.value_type.name}]" @property def valid_types(self) -> Tuple[Type, ...]: return (dict,) def validate(self, value): super().validate(value) if value is None: return for k, v in value.items(): try: self.key_type.validate(k) except TypeError: raise TypeError( f"key should be instance of " f"{self.key_type.valid_types}, got {type(k)}" ) try: self.value_type.validate(v) except TypeError: raise TypeError( f"value should be instance of " f"{self.value_type.valid_types}, got {type(v)}" ) class ReferenceType(AbstractFieldType): __slots__ = ("reference_type",) reference_type: Type def __init__(self, reference_type: Type = None): if reference_type is None: reference_type = object self.reference_type = reference_type @property def type_name(self) -> str: return "reference" @property def valid_types(self) -> Tuple[Type, ...]: return (self.reference_type,) class FieldTypes: # primitive type bool = PrimitiveFieldType(PrimitiveType.bool) int8 = PrimitiveFieldType(PrimitiveType.int8) int16 = PrimitiveFieldType(PrimitiveType.int16) int32 = PrimitiveFieldType(PrimitiveType.int32) int64 = PrimitiveFieldType(PrimitiveType.int64) uint8 = PrimitiveFieldType(PrimitiveType.uint8) uint16 = PrimitiveFieldType(PrimitiveType.uint16) uint32 = PrimitiveFieldType(PrimitiveType.uint32) uint64 = PrimitiveFieldType(PrimitiveType.uint64) float16 = PrimitiveFieldType(PrimitiveType.float16) float32 = PrimitiveFieldType(PrimitiveType.float32) float64 = PrimitiveFieldType(PrimitiveType.float64) complex64 = PrimitiveFieldType(PrimitiveType.complex64) complex128 = PrimitiveFieldType(PrimitiveType.complex128) bytes = PrimitiveFieldType(PrimitiveType.bytes) string = PrimitiveFieldType(PrimitiveType.string) key = KeyType() # Python types slice = SliceType() datetime = DatetimeType() # alias of datetime datatime64 = DatetimeType() timedelta = TimedeltaType() # alias of timedelta timedelta64 = TimedeltaType() tzinfo = TZInfoType() function = FunctionType() namedtuple = NamedtupleType() reference = ReferenceType() any = AnyType() # equivalent to any pickled = AnyType() # collection list = ListType() tuple = TupleType() dict = DictType() # numpy ndarray = NDArrayType() # alias of ndarray arr = NDArrayType() dtype = DtypeType() # pandas index = IndexType() series = SeriesType() dataframe = DataFrameType() interval_array = IntervalArrayType() # alias of interval_array interval_arr = IntervalArrayType()
{"hexsha": "c48f1c56a18dda7f0f423f1a34b3b14aeb52ef2f", "size": 14356, "ext": "py", "lang": "Python", "max_stars_repo_path": "mars/serialization/serializables/field_type.py", "max_stars_repo_name": "yuyiming/mars", "max_stars_repo_head_hexsha": "5e6990d1ea022444dd646c56697e596ef5d7e747", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-24T08:39:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-24T08:39:26.000Z", "max_issues_repo_path": "mars/serialization/serializables/field_type.py", "max_issues_repo_name": "yuyiming/mars", "max_issues_repo_head_hexsha": "5e6990d1ea022444dd646c56697e596ef5d7e747", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mars/serialization/serializables/field_type.py", "max_forks_repo_name": "yuyiming/mars", "max_forks_repo_head_hexsha": "5e6990d1ea022444dd646c56697e596ef5d7e747", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.6357142857, "max_line_length": 87, "alphanum_fraction": 0.5980774589, "include": true, "reason": "import numpy", "num_tokens": 3321}
#include "MergeTreeDataMergerMutator.h" #include <Storages/MergeTree/MergeTreeSequentialSource.h> #include <Storages/MergeTree/MergedBlockOutputStream.h> #include <Storages/MergeTree/MergedColumnOnlyOutputStream.h> #include <Storages/MergeTree/SimpleMergeSelector.h> #include <Storages/MergeTree/AllMergeSelector.h> #include <Storages/MergeTree/TTLMergeSelector.h> #include <Storages/MergeTree/MergeList.h> #include <Storages/MergeTree/MergeTreeDataWriter.h> #include <Storages/MergeTree/StorageFromMergeTreeDataPart.h> #include <Storages/MergeTree/FutureMergedMutatedPart.h> #include <Storages/MergeTree/IMergeTreeDataPart.h> #include <Storages/MergeTree/MergeTreeData.h> #include <Storages/MergeTree/MergeProgress.h> #include <Storages/MergeTree/MergeTask.h> #include <Storages/MergeTree/ActiveDataPartSet.h> #include <Processors/Transforms/TTLTransform.h> #include <Processors/Transforms/TTLCalcTransform.h> #include <Processors/Transforms/DistinctSortedTransform.h> #include <Processors/Merges/MergingSortedTransform.h> #include <Processors/Merges/CollapsingSortedTransform.h> #include <Processors/Merges/SummingSortedTransform.h> #include <Processors/Merges/ReplacingSortedTransform.h> #include <Processors/Merges/GraphiteRollupSortedTransform.h> #include <Processors/Merges/AggregatingSortedTransform.h> #include <Processors/Merges/VersionedCollapsingTransform.h> #include <Processors/Sources/SourceFromSingleChunk.h> #include <Processors/Transforms/ExpressionTransform.h> #include <Processors/Transforms/MaterializingTransform.h> #include <Interpreters/MutationsInterpreter.h> #include <Interpreters/MergeTreeTransaction.h> #include <Interpreters/Context.h> #include <Common/interpolate.h> #include <Common/typeid_cast.h> #include <Common/escapeForFileName.h> #include <Parsers/queryToString.h> #include <cmath> #include <ctime> #include <numeric> #include <boost/algorithm/string/replace.hpp> namespace CurrentMetrics { extern const Metric BackgroundMergesAndMutationsPoolTask; } namespace DB { namespace ErrorCodes { extern const int LOGICAL_ERROR; extern const int ABORTED; } /// Do not start to merge parts, if free space is less than sum size of parts times specified coefficient. /// This value is chosen to not allow big merges to eat all free space. Thus allowing small merges to proceed. static const double DISK_USAGE_COEFFICIENT_TO_SELECT = 2; /// To do merge, reserve amount of space equals to sum size of parts times specified coefficient. /// Must be strictly less than DISK_USAGE_COEFFICIENT_TO_SELECT, /// because between selecting parts to merge and doing merge, amount of free space could have decreased. static const double DISK_USAGE_COEFFICIENT_TO_RESERVE = 1.1; MergeTreeDataMergerMutator::MergeTreeDataMergerMutator(MergeTreeData & data_, size_t max_tasks_count_) : data(data_), max_tasks_count(max_tasks_count_), log(&Poco::Logger::get(data.getLogName() + " (MergerMutator)")) { } UInt64 MergeTreeDataMergerMutator::getMaxSourcePartsSizeForMerge() const { size_t scheduled_tasks_count = CurrentMetrics::values[CurrentMetrics::BackgroundMergesAndMutationsPoolTask].load(std::memory_order_relaxed); return getMaxSourcePartsSizeForMerge(max_tasks_count, scheduled_tasks_count); } UInt64 MergeTreeDataMergerMutator::getMaxSourcePartsSizeForMerge(size_t max_count, size_t scheduled_tasks_count) const { if (scheduled_tasks_count > max_count) { throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: invalid argument passed to getMaxSourcePartsSize: scheduled_tasks_count = {} > max_count = {}", scheduled_tasks_count, max_count); } size_t free_entries = max_count - scheduled_tasks_count; const auto data_settings = data.getSettings(); /// Always allow maximum size if one or less pool entries is busy. /// One entry is probably the entry where this function is executed. /// This will protect from bad settings. UInt64 max_size = 0; if (scheduled_tasks_count <= 1 || free_entries >= data_settings->number_of_free_entries_in_pool_to_lower_max_size_of_merge) max_size = data_settings->max_bytes_to_merge_at_max_space_in_pool; else max_size = interpolateExponential( data_settings->max_bytes_to_merge_at_min_space_in_pool, data_settings->max_bytes_to_merge_at_max_space_in_pool, static_cast<double>(free_entries) / data_settings->number_of_free_entries_in_pool_to_lower_max_size_of_merge); return std::min(max_size, static_cast<UInt64>(data.getStoragePolicy()->getMaxUnreservedFreeSpace() / DISK_USAGE_COEFFICIENT_TO_SELECT)); } UInt64 MergeTreeDataMergerMutator::getMaxSourcePartSizeForMutation() const { const auto data_settings = data.getSettings(); size_t occupied = CurrentMetrics::values[CurrentMetrics::BackgroundMergesAndMutationsPoolTask].load(std::memory_order_relaxed); /// DataPart can be store only at one disk. Get maximum reservable free space at all disks. UInt64 disk_space = data.getStoragePolicy()->getMaxUnreservedFreeSpace(); /// Allow mutations only if there are enough threads, leave free threads for merges else if (occupied <= 1 || max_tasks_count - occupied >= data_settings->number_of_free_entries_in_pool_to_execute_mutation) return static_cast<UInt64>(disk_space / DISK_USAGE_COEFFICIENT_TO_RESERVE); return 0; } SelectPartsDecision MergeTreeDataMergerMutator::selectPartsToMerge( FutureMergedMutatedPartPtr future_part, bool aggressive, size_t max_total_size_to_merge, const AllowedMergingPredicate & can_merge_callback, bool merge_with_ttl_allowed, const MergeTreeTransactionPtr & txn, String * out_disable_reason) { MergeTreeData::DataPartsVector data_parts; if (txn) { /// Merge predicate (for simple MergeTree) allows to merge two parts only if both parts are visible for merge transaction. /// So at the first glance we could just get all active parts. /// Active parts include uncommitted parts, but it's ok and merge predicate handles it. /// However, it's possible that some transaction is trying to remove a part in the middle, for example, all_2_2_0. /// If parts all_1_1_0 and all_3_3_0 are active and visible for merge transaction, then we would try to merge them. /// But it's wrong, because all_2_2_0 may become active again if transaction will roll back. /// That's why we must include some outdated parts into `data_part`, more precisely, such parts that removal is not committed. MergeTreeData::DataPartsVector active_parts; MergeTreeData::DataPartsVector outdated_parts; { auto lock = data.lockParts(); active_parts = data.getDataPartsVectorForInternalUsage({MergeTreeData::DataPartState::Active}, lock); outdated_parts = data.getDataPartsVectorForInternalUsage({MergeTreeData::DataPartState::Outdated}, lock); } ActiveDataPartSet active_parts_set{data.format_version}; for (const auto & part : active_parts) active_parts_set.add(part->name); for (const auto & part : outdated_parts) { /// We don't need rolled back parts. /// NOTE When rolling back a transaction we set creation_csn to RolledBackCSN at first /// and then remove part from working set, so there's no race condition if (part->version.creation_csn == Tx::RolledBackCSN) continue; /// We don't need parts that are finally removed. /// NOTE There's a minor race condition: we may get UnknownCSN if a transaction has been just committed concurrently. /// But it's not a problem if we will add such part to `data_parts`. if (part->version.removal_csn != Tx::UnknownCSN) continue; active_parts_set.add(part->name); } /// Restore "active" parts set from selected active and outdated parts auto remove_pred = [&](const MergeTreeData::DataPartPtr & part) -> bool { return active_parts_set.getContainingPart(part->info) != part->name; }; std::erase_if(active_parts, remove_pred); std::erase_if(outdated_parts, remove_pred); std::merge(active_parts.begin(), active_parts.end(), outdated_parts.begin(), outdated_parts.end(), std::back_inserter(data_parts), MergeTreeData::LessDataPart()); } else { /// Simply get all active parts data_parts = data.getDataPartsVectorForInternalUsage(); } const auto data_settings = data.getSettings(); auto metadata_snapshot = data.getInMemoryMetadataPtr(); if (data_parts.empty()) { if (out_disable_reason) *out_disable_reason = "There are no parts in the table"; return SelectPartsDecision::CANNOT_SELECT; } time_t current_time = std::time(nullptr); IMergeSelector::PartsRanges parts_ranges; StoragePolicyPtr storage_policy = data.getStoragePolicy(); /// Volumes with stopped merges are extremely rare situation. /// Check it once and don't check each part (this is bad for performance). bool has_volumes_with_disabled_merges = storage_policy->hasAnyVolumeWithDisabledMerges(); const String * prev_partition_id = nullptr; /// Previous part only in boundaries of partition frame const MergeTreeData::DataPartPtr * prev_part = nullptr; size_t parts_selected_precondition = 0; for (const MergeTreeData::DataPartPtr & part : data_parts) { const String & partition_id = part->info.partition_id; if (!prev_partition_id || partition_id != *prev_partition_id) { if (parts_ranges.empty() || !parts_ranges.back().empty()) parts_ranges.emplace_back(); /// New partition frame. prev_partition_id = &partition_id; prev_part = nullptr; } /// Check predicate only for the first part in each range. if (!prev_part) { /* Parts can be merged with themselves for TTL needs for example. * So we have to check if this part is currently being inserted with quorum and so on and so forth. * Obviously we have to check it manually only for the first part * of each partition because it will be automatically checked for a pair of parts. */ if (!can_merge_callback(nullptr, part, txn.get(), nullptr)) continue; /// This part can be merged only with next parts (no prev part exists), so start /// new interval if previous was not empty. if (!parts_ranges.back().empty()) parts_ranges.emplace_back(); } else { /// If we cannot merge with previous part we had to start new parts /// interval (in the same partition) if (!can_merge_callback(*prev_part, part, txn.get(), nullptr)) { /// Now we have no previous part prev_part = nullptr; /// Mustn't be empty assert(!parts_ranges.back().empty()); /// Some parts cannot be merged with previous parts and also cannot be merged with themselves, /// for example, merge is already assigned for such parts, or they participate in quorum inserts /// and so on. /// Also we don't start new interval here (maybe all next parts cannot be merged and we don't want to have empty interval) if (!can_merge_callback(nullptr, part, txn.get(), nullptr)) continue; /// Starting new interval in the same partition parts_ranges.emplace_back(); } } IMergeSelector::Part part_info; part_info.size = part->getBytesOnDisk(); part_info.age = current_time - part->modification_time; part_info.level = part->info.level; part_info.data = &part; part_info.ttl_infos = &part->ttl_infos; part_info.compression_codec_desc = part->default_codec->getFullCodecDesc(); part_info.shall_participate_in_merges = has_volumes_with_disabled_merges ? part->shallParticipateInMerges(storage_policy) : true; ++parts_selected_precondition; parts_ranges.back().emplace_back(part_info); /// Check for consistency of data parts. If assertion is failed, it requires immediate investigation. if (prev_part && part->info.partition_id == (*prev_part)->info.partition_id && part->info.min_block <= (*prev_part)->info.max_block) { throw Exception(ErrorCodes::LOGICAL_ERROR, "Part {} intersects previous part {}", part->name, (*prev_part)->name); } prev_part = &part; } if (parts_selected_precondition == 0) { if (out_disable_reason) *out_disable_reason = "No parts satisfy preconditions for merge"; return SelectPartsDecision::CANNOT_SELECT; } IMergeSelector::PartsRange parts_to_merge; if (metadata_snapshot->hasAnyTTL() && merge_with_ttl_allowed && !ttl_merges_blocker.isCancelled()) { /// TTL delete is preferred to recompression TTLDeleteMergeSelector delete_ttl_selector( next_delete_ttl_merge_times_by_partition, current_time, data_settings->merge_with_ttl_timeout, data_settings->ttl_only_drop_parts); parts_to_merge = delete_ttl_selector.select(parts_ranges, max_total_size_to_merge); if (!parts_to_merge.empty()) { future_part->merge_type = MergeType::TTLDelete; } else if (metadata_snapshot->hasAnyRecompressionTTL()) { TTLRecompressMergeSelector recompress_ttl_selector( next_recompress_ttl_merge_times_by_partition, current_time, data_settings->merge_with_recompression_ttl_timeout, metadata_snapshot->getRecompressionTTLs()); parts_to_merge = recompress_ttl_selector.select(parts_ranges, max_total_size_to_merge); if (!parts_to_merge.empty()) future_part->merge_type = MergeType::TTLRecompress; } } if (parts_to_merge.empty()) { SimpleMergeSelector::Settings merge_settings; /// Override value from table settings merge_settings.max_parts_to_merge_at_once = data_settings->max_parts_to_merge_at_once; if (aggressive) merge_settings.base = 1; parts_to_merge = SimpleMergeSelector(merge_settings) .select(parts_ranges, max_total_size_to_merge); /// Do not allow to "merge" part with itself for regular merges, unless it is a TTL-merge where it is ok to remove some values with expired ttl if (parts_to_merge.size() == 1) throw Exception("Logical error: merge selector returned only one part to merge", ErrorCodes::LOGICAL_ERROR); if (parts_to_merge.empty()) { if (out_disable_reason) *out_disable_reason = "There is no need to merge parts according to merge selector algorithm"; return SelectPartsDecision::CANNOT_SELECT; } } MergeTreeData::DataPartsVector parts; parts.reserve(parts_to_merge.size()); for (IMergeSelector::Part & part_info : parts_to_merge) { const MergeTreeData::DataPartPtr & part = *static_cast<const MergeTreeData::DataPartPtr *>(part_info.data); parts.push_back(part); } LOG_DEBUG(log, "Selected {} parts from {} to {}", parts.size(), parts.front()->name, parts.back()->name); future_part->assign(std::move(parts)); return SelectPartsDecision::SELECTED; } SelectPartsDecision MergeTreeDataMergerMutator::selectAllPartsToMergeWithinPartition( FutureMergedMutatedPartPtr future_part, const AllowedMergingPredicate & can_merge, const String & partition_id, bool final, const StorageMetadataPtr & metadata_snapshot, const MergeTreeTransactionPtr & txn, String * out_disable_reason, bool optimize_skip_merged_partitions) { MergeTreeData::DataPartsVector parts = selectAllPartsFromPartition(partition_id); if (parts.empty()) { if (out_disable_reason) *out_disable_reason = "There are no parts inside partition"; return SelectPartsDecision::CANNOT_SELECT; } if (!final && parts.size() == 1) { if (out_disable_reason) *out_disable_reason = "There is only one part inside partition"; return SelectPartsDecision::CANNOT_SELECT; } /// If final, optimize_skip_merged_partitions is true and we have only one part in partition with level > 0 /// than we don't select it to merge. But if there are some expired TTL then merge is needed if (final && optimize_skip_merged_partitions && parts.size() == 1 && parts[0]->info.level > 0 && (!metadata_snapshot->hasAnyTTL() || parts[0]->checkAllTTLCalculated(metadata_snapshot))) { if (out_disable_reason) *out_disable_reason = "Partition skipped due to optimize_skip_merged_partitions"; return SelectPartsDecision::NOTHING_TO_MERGE; } auto it = parts.begin(); auto prev_it = it; UInt64 sum_bytes = 0; while (it != parts.end()) { /// For the case of one part, we check that it can be merged "with itself". if ((it != parts.begin() || parts.size() == 1) && !can_merge(*prev_it, *it, txn.get(), out_disable_reason)) { return SelectPartsDecision::CANNOT_SELECT; } sum_bytes += (*it)->getBytesOnDisk(); prev_it = it; ++it; } auto available_disk_space = data.getStoragePolicy()->getMaxUnreservedFreeSpace(); /// Enough disk space to cover the new merge with a margin. auto required_disk_space = sum_bytes * DISK_USAGE_COEFFICIENT_TO_SELECT; if (available_disk_space <= required_disk_space) { time_t now = time(nullptr); if (now - disk_space_warning_time > 3600) { disk_space_warning_time = now; LOG_WARNING(log, "Won't merge parts from {} to {} because not enough free space: {} free and unreserved" ", {} required now (+{}% on overhead); suppressing similar warnings for the next hour", parts.front()->name, (*prev_it)->name, ReadableSize(available_disk_space), ReadableSize(sum_bytes), static_cast<int>((DISK_USAGE_COEFFICIENT_TO_SELECT - 1.0) * 100)); } if (out_disable_reason) *out_disable_reason = fmt::format("Insufficient available disk space, required {}", ReadableSize(required_disk_space)); return SelectPartsDecision::CANNOT_SELECT; } LOG_DEBUG(log, "Selected {} parts from {} to {}", parts.size(), parts.front()->name, parts.back()->name); future_part->assign(std::move(parts)); return SelectPartsDecision::SELECTED; } MergeTreeData::DataPartsVector MergeTreeDataMergerMutator::selectAllPartsFromPartition(const String & partition_id) { MergeTreeData::DataPartsVector parts_from_partition; MergeTreeData::DataParts data_parts = data.getDataPartsForInternalUsage(); for (const auto & current_part : data_parts) { if (current_part->info.partition_id != partition_id) continue; parts_from_partition.push_back(current_part); } return parts_from_partition; } /// parts should be sorted. MergeTaskPtr MergeTreeDataMergerMutator::mergePartsToTemporaryPart( FutureMergedMutatedPartPtr future_part, const StorageMetadataPtr & metadata_snapshot, MergeList::Entry * merge_entry, std::unique_ptr<MergeListElement> projection_merge_list_element, TableLockHolder, time_t time_of_merge, ContextPtr context, ReservationSharedPtr space_reservation, bool deduplicate, const Names & deduplicate_by_columns, const MergeTreeData::MergingParams & merging_params, const MergeTreeTransactionPtr & txn, const IMergeTreeDataPart * parent_part, const String & suffix) { return std::make_shared<MergeTask>( future_part, const_cast<StorageMetadataPtr &>(metadata_snapshot), merge_entry, std::move(projection_merge_list_element), time_of_merge, context, space_reservation, deduplicate, deduplicate_by_columns, merging_params, parent_part, suffix, txn, &data, this, &merges_blocker, &ttl_merges_blocker); } MutateTaskPtr MergeTreeDataMergerMutator::mutatePartToTemporaryPart( FutureMergedMutatedPartPtr future_part, StorageMetadataPtr metadata_snapshot, MutationCommandsConstPtr commands, MergeListEntry * merge_entry, time_t time_of_mutation, ContextPtr context, const MergeTreeTransactionPtr & txn, ReservationSharedPtr space_reservation, TableLockHolder & holder) { return std::make_shared<MutateTask>( future_part, metadata_snapshot, commands, merge_entry, time_of_mutation, context, space_reservation, holder, txn, data, *this, merges_blocker ); } MergeTreeData::DataPartPtr MergeTreeDataMergerMutator::renameMergedTemporaryPart( MergeTreeData::MutableDataPartPtr & new_data_part, const MergeTreeData::DataPartsVector & parts, const MergeTreeTransactionPtr & txn, MergeTreeData::Transaction * out_transaction) { /// Some of source parts was possibly created in transaction, so non-transactional merge may break isolation. if (data.transactions_enabled.load(std::memory_order_relaxed) && !txn) throw Exception(ErrorCodes::ABORTED, "Cancelling merge, because it was done without starting transaction," "but transactions were enabled for this table"); /// Rename new part, add to the set and remove original parts. auto replaced_parts = data.renameTempPartAndReplace(new_data_part, txn.get(), nullptr, out_transaction); /// Let's check that all original parts have been deleted and only them. if (replaced_parts.size() != parts.size()) { /** This is normal, although this happens rarely. * * The situation - was replaced 0 parts instead of N can be, for example, in the following case * - we had A part, but there was no B and C parts; * - A, B -> AB was in the queue, but it has not been done, because there is no B part; * - AB, C -> ABC was in the queue, but it has not been done, because there are no AB and C parts; * - we have completed the task of downloading a B part; * - we started to make A, B -> AB merge, since all parts appeared; * - we decided to download ABC part from another replica, since it was impossible to make merge AB, C -> ABC; * - ABC part appeared. When it was added, old A, B, C parts were deleted; * - AB merge finished. AB part was added. But this is an obsolete part. The log will contain the message `Obsolete part added`, * then we get here. * * When M > N parts could be replaced? * - new block was added in ReplicatedMergeTreeSink; * - it was added to working dataset in memory and renamed on filesystem; * - but ZooKeeper transaction that adds it to reference dataset in ZK failed; * - and it is failed due to connection loss, so we don't rollback working dataset in memory, * because we don't know if the part was added to ZK or not * (see ReplicatedMergeTreeSink) * - then method selectPartsToMerge selects a range and sees, that EphemeralLock for the block in this part is unlocked, * and so it is possible to merge a range skipping this part. * (NOTE: Merging with part that is not in ZK is not possible, see checks in 'createLogEntryToMergeParts'.) * - and after merge, this part will be removed in addition to parts that was merged. */ LOG_WARNING(log, "Unexpected number of parts removed when adding {}: {} instead of {}", new_data_part->name, replaced_parts.size(), parts.size()); } else { for (size_t i = 0; i < parts.size(); ++i) if (parts[i]->name != replaced_parts[i]->name) throw Exception("Unexpected part removed when adding " + new_data_part->name + ": " + replaced_parts[i]->name + " instead of " + parts[i]->name, ErrorCodes::LOGICAL_ERROR); } LOG_TRACE(log, "Merged {} parts: from {} to {}", parts.size(), parts.front()->name, parts.back()->name); return new_data_part; } size_t MergeTreeDataMergerMutator::estimateNeededDiskSpace(const MergeTreeData::DataPartsVector & source_parts) { size_t res = 0; for (const MergeTreeData::DataPartPtr & part : source_parts) res += part->getBytesOnDisk(); return static_cast<size_t>(res * DISK_USAGE_COEFFICIENT_TO_RESERVE); } }
{"hexsha": "f596828ed059c3a85946b2103b201edaa784d43f", "size": 25350, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp", "max_stars_repo_name": "zzachimed/ClickHouse", "max_stars_repo_head_hexsha": "a403f1cd1b2655a60ca196d209ef443ef6d91b39", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp", "max_issues_repo_name": "zzachimed/ClickHouse", "max_issues_repo_head_hexsha": "a403f1cd1b2655a60ca196d209ef443ef6d91b39", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp", "max_forks_repo_name": "zzachimed/ClickHouse", "max_forks_repo_head_hexsha": "a403f1cd1b2655a60ca196d209ef443ef6d91b39", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.9008264463, "max_line_length": 154, "alphanum_fraction": 0.6865088757, "num_tokens": 5594}
import discord from discord.ui import Button, View from .pytari2600.pytari2600 import new_atari from PIL import Image from io import BytesIO import pygame import numpy K_A = 0 K_UP = 1 K_DOWN = 2 K_LEFT = 3 K_RIGHT = 4 async def get_acceptable_url(file, channel): message = await channel.send(file=file) return message.attachments[0].url def arr_to_discord_file(arr): rg, b = divmod(arr, 256) r, g = divmod(rg, 256) arr2 = numpy.stack([r, g, b], axis=-1) img = Image.fromarray(arr2.astype("uint8")) x, y = img.size fp = BytesIO() img.crop((0, 0, x, 260)).resize((4*x, 520), resample=Image.NEAREST).save(fp, format="PNG") fp.seek(0) return discord.File(fp, filename="frame.png") class ToggleButton(Button): def __init__(self, id, *args, **kwargs): super().__init__(*args, **kwargs, style=discord.ButtonStyle.primary) self.toggled = False self.callback = self.toggle self.id = id async def toggle(self, itc): if itc.user != self.view.session.user: return self.toggled = not self.toggled if self.toggled: self.style = discord.ButtonStyle.success else: self.style = discord.ButtonStyle.primary self.view.state[self.id] = self.toggled await self.view.update() class ControllerView(View): def __init__(self, session, *args, **kwargs): super().__init__(*args, **kwargs) self.session = session self.state = [False for _ in range(5)] buttons = [Button(style=discord.ButtonStyle.secondary, label=" ", disabled=True) for _ in range(25)] buttons[7] = ToggleButton(emoji="⬆️", id=K_UP) buttons[11] = ToggleButton(emoji="⬅️", id=K_LEFT) buttons[13] = ToggleButton(emoji="➡️", id=K_RIGHT) buttons[17] = ToggleButton(emoji="⬇️", id=K_DOWN) buttons[20] = ToggleButton(emoji="🔴", id=K_A) buttons[0] = Button(style=discord.ButtonStyle.primary, emoji="📸") buttons[0].callback = self.frame_callback for button in buttons: self.add_item(button) def get_state(self): swcha = 0xFF if self.state[K_RIGHT]: swcha ^= 0x80 if self.state[K_LEFT]: swcha ^= 0x40 if self.state[K_DOWN]: swcha ^= 0x20 if self.state[K_UP]: swcha ^= 0x10 input7 = 0xFF if self.state[K_A]: input7 = 0x7F return swcha, input7 async def update(self): await self.session.update() async def frame_callback(self, itc): if itc.user != self.session.user: return await self.session.do_frame() class AtariGame: def __init__(self, bot, channel, user, name): self.bot = bot self.channel = channel self.frame = 1 self.user = user self.name = name self.message = None self.emulator = None self.embed = discord.Embed(title="Frame 1") self.embed.set_image(url="https://cdn.discordapp.com/attachments/851697058057814068/851701176072011806/frame.jpg") self.controller = ControllerView(self) async def start(self): self.message = await self.channel.send(embed=self.embed, view=self.controller) self.emulator = new_atari("roms\\" + self.name + ".a26", headless=True) for _ in range(6): self.emulator.frame() self.frame += 1 async def update(self): await self.message.edit(embed=self.embed, view=self.controller) async def do_frame(self): self.emulator.inputs.swcha, self.emulator.inputs.input7 = self.controller.get_state() self.emulator.frame() self.frame += 1 file = arr_to_discord_file(self.emulator.stella.display_cache) url = await get_acceptable_url(file, self.bot.dump_channel) self.embed.set_image(url=url) self.embed.title = f"Frame {self.frame}" await self.update()
{"hexsha": "47ba6aeeea52dfd399273fff3cef54a1424f04bf", "size": 3933, "ext": "py", "lang": "Python", "max_stars_repo_path": "modules/emulator/atari.py", "max_stars_repo_name": "5space/nesbot", "max_stars_repo_head_hexsha": "38a9e8cadf0cbe41ee25e0850c244e2834a6e12c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "modules/emulator/atari.py", "max_issues_repo_name": "5space/nesbot", "max_issues_repo_head_hexsha": "38a9e8cadf0cbe41ee25e0850c244e2834a6e12c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "modules/emulator/atari.py", "max_forks_repo_name": "5space/nesbot", "max_forks_repo_head_hexsha": "38a9e8cadf0cbe41ee25e0850c244e2834a6e12c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.968503937, "max_line_length": 122, "alphanum_fraction": 0.6270022883, "include": true, "reason": "import numpy", "num_tokens": 1037}
# MINLP written by GAMS Convert at 04/21/18 13:55:19 # # Equation counts # Total E G L N X C B # 202 96 36 70 0 0 0 0 # # Variable counts # x b i s1s s2s sc si # Total cont binary integer sos1 sos2 scont sint # 322 98 28 0 196 0 0 0 # FX 0 0 0 0 0 0 0 0 # # Nonzero counts # Total const NL DLL # 1247 1187 60 0 # # Reformulation has removed 1 variable and 1 equation from pyomo.environ import * model = m = ConcreteModel() m.x1 = Var(within=Reals,bounds=(0,None),initialize=0) m.x2 = Var(within=Reals,bounds=(0,None),initialize=0) m.x3 = Var(within=Reals,bounds=(0,None),initialize=0) m.x4 = Var(within=Reals,bounds=(0,None),initialize=0) m.x5 = Var(within=Reals,bounds=(0,None),initialize=0) m.x6 = Var(within=Reals,bounds=(0,None),initialize=0) m.x7 = Var(within=Reals,bounds=(0,None),initialize=0) m.x8 = Var(within=Reals,bounds=(0,None),initialize=0) m.x9 = Var(within=Reals,bounds=(0,None),initialize=0) m.x10 = Var(within=Reals,bounds=(0,None),initialize=0) m.x11 = Var(within=Reals,bounds=(0,None),initialize=0) m.x12 = Var(within=Reals,bounds=(0,None),initialize=0) m.x13 = Var(within=Reals,bounds=(0,None),initialize=0) m.x14 = Var(within=Reals,bounds=(0,None),initialize=0) m.x15 = Var(within=Reals,bounds=(0,None),initialize=0) m.x16 = Var(within=Reals,bounds=(0,None),initialize=0) m.x17 = Var(within=Reals,bounds=(0,None),initialize=0) m.x18 = Var(within=Reals,bounds=(0,None),initialize=0) m.x19 = Var(within=Reals,bounds=(0,None),initialize=0) m.x20 = Var(within=Reals,bounds=(0,None),initialize=0) m.x21 = Var(within=Reals,bounds=(0,None),initialize=0) m.x22 = Var(within=Reals,bounds=(0,None),initialize=0) m.x23 = Var(within=Reals,bounds=(0,None),initialize=0) m.x24 = Var(within=Reals,bounds=(0,None),initialize=0) m.x25 = Var(within=Reals,bounds=(0,None),initialize=0) m.x26 = Var(within=Reals,bounds=(0,None),initialize=0) m.x27 = Var(within=Reals,bounds=(0,None),initialize=0) m.x28 = Var(within=Reals,bounds=(0,None),initialize=0) m.x29 = Var(within=Reals,bounds=(None,None),initialize=0) m.x30 = Var(within=Reals,bounds=(None,None),initialize=0) m.x31 = Var(within=Reals,bounds=(None,None),initialize=0) m.x32 = Var(within=Reals,bounds=(None,None),initialize=0) m.x33 = Var(within=Reals,bounds=(None,None),initialize=0) m.x34 = Var(within=Reals,bounds=(None,None),initialize=0) m.x35 = Var(within=Reals,bounds=(None,None),initialize=0) m.x36 = Var(within=Reals,bounds=(None,None),initialize=0) m.x37 = Var(within=Reals,bounds=(None,None),initialize=0) m.x38 = Var(within=Reals,bounds=(None,None),initialize=0) m.x39 = Var(within=Reals,bounds=(None,None),initialize=0) m.x40 = Var(within=Reals,bounds=(None,None),initialize=0) m.x41 = Var(within=Reals,bounds=(None,None),initialize=0) m.x42 = Var(within=Reals,bounds=(None,None),initialize=0) m.x43 = Var(within=Reals,bounds=(None,None),initialize=0) m.x44 = Var(within=Reals,bounds=(None,None),initialize=0) m.x45 = Var(within=Reals,bounds=(None,None),initialize=0) m.x46 = Var(within=Reals,bounds=(None,None),initialize=0) m.x47 = Var(within=Reals,bounds=(None,None),initialize=0) m.x48 = Var(within=Reals,bounds=(None,None),initialize=0) m.x49 = Var(within=Reals,bounds=(None,None),initialize=0) m.x50 = Var(within=Reals,bounds=(None,None),initialize=0) m.x51 = Var(within=Reals,bounds=(None,None),initialize=0) m.x52 = Var(within=Reals,bounds=(None,None),initialize=0) m.x53 = Var(within=Reals,bounds=(None,None),initialize=0) m.x54 = Var(within=Reals,bounds=(None,None),initialize=0) m.x55 = Var(within=Reals,bounds=(None,None),initialize=0) m.x56 = Var(within=Reals,bounds=(None,None),initialize=0) m.x57 = Var(within=Reals,bounds=(6.5,None),initialize=11.5) m.x58 = Var(within=Reals,bounds=(3.25,None),initialize=8.25) m.x59 = Var(within=Reals,bounds=(16.58,None),initialize=21.58) m.x60 = Var(within=Reals,bounds=(14.92,None),initialize=19.92) m.x61 = Var(within=Reals,bounds=(12.925,None),initialize=17.925) m.x62 = Var(within=Reals,bounds=(12.26,None),initialize=17.26) m.x63 = Var(within=Reals,bounds=(8.76,None),initialize=13.76) m.x64 = Var(within=Reals,bounds=(16.08,None),initialize=21.08) m.x65 = Var(within=Reals,bounds=(None,None),initialize=0) m.x66 = Var(within=Reals,bounds=(None,None),initialize=0) m.x67 = Var(within=Reals,bounds=(None,None),initialize=0) m.x68 = Var(within=Reals,bounds=(None,None),initialize=0) m.x69 = Var(within=Reals,bounds=(None,None),initialize=0) m.x70 = Var(within=Reals,bounds=(None,None),initialize=0) m.x71 = Var(within=Reals,bounds=(None,None),initialize=0) m.x72 = Var(within=Reals,bounds=(None,None),initialize=0) m.x73 = Var(within=Reals,bounds=(None,None),initialize=0) m.x74 = Var(within=Reals,bounds=(None,None),initialize=0) m.x75 = Var(within=Reals,bounds=(None,None),initialize=0) m.x76 = Var(within=Reals,bounds=(None,None),initialize=0) m.x77 = Var(within=Reals,bounds=(None,None),initialize=0) m.x78 = Var(within=Reals,bounds=(None,None),initialize=0) m.x79 = Var(within=Reals,bounds=(None,None),initialize=0) m.x80 = Var(within=Reals,bounds=(None,None),initialize=0) m.x81 = Var(within=Reals,bounds=(None,None),initialize=0) m.x82 = Var(within=Reals,bounds=(None,None),initialize=0) m.x83 = Var(within=Reals,bounds=(None,None),initialize=0) m.x84 = Var(within=Reals,bounds=(None,None),initialize=0) m.x85 = Var(within=Reals,bounds=(None,None),initialize=0) m.x86 = Var(within=Reals,bounds=(None,None),initialize=0) m.x87 = Var(within=Reals,bounds=(None,None),initialize=0) m.x88 = Var(within=Reals,bounds=(None,None),initialize=0) m.x89 = Var(within=Reals,bounds=(None,None),initialize=0) m.x90 = Var(within=Reals,bounds=(None,None),initialize=0) m.x91 = Var(within=Reals,bounds=(None,None),initialize=0) m.x92 = Var(within=Reals,bounds=(None,None),initialize=0) m.x93 = Var(within=Reals,bounds=(0,2.5),initialize=0.961470588235294) m.x94 = Var(within=Reals,bounds=(0,6),initialize=2.30752941176471) m.x95 = Var(within=Reals,bounds=(None,None),initialize=0) m.x96 = Var(within=Reals,bounds=(None,None),initialize=0) m.x97 = Var(within=Reals,bounds=(None,None),initialize=0) m.b99 = Var(within=Binary,bounds=(0,1),initialize=0.5) m.b100 = Var(within=Binary,bounds=(0,1),initialize=0.5) m.b101 = Var(within=Binary,bounds=(0,1),initialize=0.5) m.b102 = Var(within=Binary,bounds=(0,1),initialize=0.5) m.b103 = Var(within=Binary,bounds=(0,1),initialize=0.5) m.b104 = Var(within=Binary,bounds=(0,1),initialize=0.5) m.b105 = Var(within=Binary,bounds=(0,1),initialize=0.5) m.b106 = Var(within=Binary,bounds=(0,1),initialize=0.5) m.b107 = Var(within=Binary,bounds=(0,1),initialize=0.5) m.b108 = Var(within=Binary,bounds=(0,1),initialize=0.5) m.b109 = Var(within=Binary,bounds=(0,1),initialize=0.5) m.b110 = Var(within=Binary,bounds=(0,1),initialize=0.5) m.b111 = Var(within=Binary,bounds=(0,1),initialize=0.5) m.b112 = Var(within=Binary,bounds=(0,1),initialize=0.5) m.b113 = Var(within=Binary,bounds=(0,1),initialize=0.5) m.b114 = Var(within=Binary,bounds=(0,1),initialize=0.5) m.b115 = Var(within=Binary,bounds=(0,1),initialize=0.5) m.b116 = Var(within=Binary,bounds=(0,1),initialize=0.5) m.b117 = Var(within=Binary,bounds=(0,1),initialize=0.5) m.b118 = Var(within=Binary,bounds=(0,1),initialize=0.5) m.b119 = Var(within=Binary,bounds=(0,1),initialize=0.5) m.b120 = Var(within=Binary,bounds=(0,1),initialize=0.5) m.b121 = Var(within=Binary,bounds=(0,1),initialize=0.5) m.b122 = Var(within=Binary,bounds=(0,1),initialize=0.5) m.b123 = Var(within=Binary,bounds=(0,1),initialize=0.5) m.b124 = Var(within=Binary,bounds=(0,1),initialize=0.5) m.b125 = Var(within=Binary,bounds=(0,1),initialize=0.5) m.b126 = Var(within=Binary,bounds=(0,1),initialize=0.5) m.s1s127 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s128 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s129 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s130 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s131 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s132 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s133 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s134 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s135 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s136 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s137 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s138 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s139 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s140 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s141 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s142 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s143 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s144 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s145 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s146 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s147 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s148 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s149 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s150 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s151 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s152 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s153 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s154 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s155 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s156 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s157 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s158 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s159 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s160 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s161 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s162 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s163 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s164 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s165 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s166 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s167 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s168 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s169 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s170 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s171 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s172 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s173 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s174 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s175 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s176 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s177 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s178 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s179 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s180 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s181 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s182 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s183 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s184 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s185 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s186 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s187 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s188 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s189 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s190 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s191 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s192 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s193 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s194 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s195 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s196 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s197 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s198 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s199 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s200 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s201 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s202 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s203 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s204 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s205 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s206 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s207 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s208 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s209 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s210 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s211 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s212 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s213 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s214 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s215 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s216 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s217 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s218 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s219 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s220 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s221 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s222 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s223 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s224 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s225 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s226 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s227 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s228 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s229 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s230 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s231 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s232 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s233 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s234 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s235 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s236 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s237 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s238 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s239 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s240 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s241 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s242 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s243 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s244 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s245 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s246 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s247 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s248 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s249 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s250 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s251 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s252 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s253 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s254 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s255 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s256 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s257 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s258 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s259 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s260 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s261 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s262 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s263 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s264 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s265 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s266 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s267 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s268 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s269 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s270 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s271 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s272 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s273 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s274 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s275 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s276 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s277 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s278 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s279 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s280 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s281 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s282 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s283 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s284 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s285 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s286 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s287 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714) m.s1s288 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s289 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s290 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s291 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s292 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s293 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s294 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s295 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s296 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s297 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s298 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s299 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s300 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s301 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s302 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s303 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s304 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s305 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s306 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s307 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s308 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s309 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s310 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s311 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s312 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s313 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s314 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s315 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s316 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s317 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s318 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s319 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s320 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s321 = Var(within=CannotHandle,bounds=(0,None),initialize=0) m.s1s322 = Var(within=CannotHandle,bounds=(0,None),initialize=0) suffix sosno integer IN; suffix ref integer IN; let m.s1s127.sosno := 1; let m.s1s127.ref := 1; let m.s1s128.sosno := 1; let m.s1s128.ref := 2; let m.s1s129.sosno := 1; let m.s1s129.ref := 3; let m.s1s130.sosno := 1; let m.s1s130.ref := 4; let m.s1s131.sosno := 1; let m.s1s131.ref := 5; let m.s1s132.sosno := 1; let m.s1s132.ref := 6; let m.s1s133.sosno := 1; let m.s1s133.ref := 7; let m.s1s134.sosno := 2; let m.s1s134.ref := 1; let m.s1s135.sosno := 2; let m.s1s135.ref := 2; let m.s1s136.sosno := 2; let m.s1s136.ref := 3; let m.s1s137.sosno := 2; let m.s1s137.ref := 4; let m.s1s138.sosno := 2; let m.s1s138.ref := 5; let m.s1s139.sosno := 2; let m.s1s139.ref := 6; let m.s1s140.sosno := 2; let m.s1s140.ref := 7; let m.s1s141.sosno := 3; let m.s1s141.ref := 1; let m.s1s142.sosno := 3; let m.s1s142.ref := 2; let m.s1s143.sosno := 3; let m.s1s143.ref := 3; let m.s1s144.sosno := 3; let m.s1s144.ref := 4; let m.s1s145.sosno := 3; let m.s1s145.ref := 5; let m.s1s146.sosno := 3; let m.s1s146.ref := 6; let m.s1s147.sosno := 3; let m.s1s147.ref := 7; let m.s1s148.sosno := 4; let m.s1s148.ref := 1; let m.s1s149.sosno := 4; let m.s1s149.ref := 2; let m.s1s150.sosno := 4; let m.s1s150.ref := 3; let m.s1s151.sosno := 4; let m.s1s151.ref := 4; let m.s1s152.sosno := 4; let m.s1s152.ref := 5; let m.s1s153.sosno := 4; let m.s1s153.ref := 6; let m.s1s154.sosno := 4; let m.s1s154.ref := 7; let m.s1s155.sosno := 5; let m.s1s155.ref := 1; let m.s1s156.sosno := 5; let m.s1s156.ref := 2; let m.s1s157.sosno := 5; let m.s1s157.ref := 3; let m.s1s158.sosno := 5; let m.s1s158.ref := 4; let m.s1s159.sosno := 5; let m.s1s159.ref := 5; let m.s1s160.sosno := 5; let m.s1s160.ref := 6; let m.s1s161.sosno := 5; let m.s1s161.ref := 7; let m.s1s162.sosno := 6; let m.s1s162.ref := 1; let m.s1s163.sosno := 6; let m.s1s163.ref := 2; let m.s1s164.sosno := 6; let m.s1s164.ref := 3; let m.s1s165.sosno := 6; let m.s1s165.ref := 4; let m.s1s166.sosno := 6; let m.s1s166.ref := 5; let m.s1s167.sosno := 6; let m.s1s167.ref := 6; let m.s1s168.sosno := 6; let m.s1s168.ref := 7; let m.s1s169.sosno := 7; let m.s1s169.ref := 1; let m.s1s170.sosno := 7; let m.s1s170.ref := 2; let m.s1s171.sosno := 7; let m.s1s171.ref := 3; let m.s1s172.sosno := 7; let m.s1s172.ref := 4; let m.s1s173.sosno := 7; let m.s1s173.ref := 5; let m.s1s174.sosno := 7; let m.s1s174.ref := 6; let m.s1s175.sosno := 7; let m.s1s175.ref := 7; let m.s1s176.sosno := 8; let m.s1s176.ref := 1; let m.s1s177.sosno := 8; let m.s1s177.ref := 2; let m.s1s178.sosno := 8; let m.s1s178.ref := 3; let m.s1s179.sosno := 8; let m.s1s179.ref := 4; let m.s1s180.sosno := 8; let m.s1s180.ref := 5; let m.s1s181.sosno := 8; let m.s1s181.ref := 6; let m.s1s182.sosno := 8; let m.s1s182.ref := 7; let m.s1s183.sosno := 9; let m.s1s183.ref := 1; let m.s1s184.sosno := 9; let m.s1s184.ref := 2; let m.s1s185.sosno := 9; let m.s1s185.ref := 3; let m.s1s186.sosno := 9; let m.s1s186.ref := 4; let m.s1s187.sosno := 9; let m.s1s187.ref := 5; let m.s1s188.sosno := 9; let m.s1s188.ref := 6; let m.s1s189.sosno := 9; let m.s1s189.ref := 7; let m.s1s190.sosno := 10; let m.s1s190.ref := 1; let m.s1s191.sosno := 10; let m.s1s191.ref := 2; let m.s1s192.sosno := 10; let m.s1s192.ref := 3; let m.s1s193.sosno := 10; let m.s1s193.ref := 4; let m.s1s194.sosno := 10; let m.s1s194.ref := 5; let m.s1s195.sosno := 10; let m.s1s195.ref := 6; let m.s1s196.sosno := 10; let m.s1s196.ref := 7; let m.s1s197.sosno := 11; let m.s1s197.ref := 1; let m.s1s198.sosno := 11; let m.s1s198.ref := 2; let m.s1s199.sosno := 11; let m.s1s199.ref := 3; let m.s1s200.sosno := 11; let m.s1s200.ref := 4; let m.s1s201.sosno := 11; let m.s1s201.ref := 5; let m.s1s202.sosno := 11; let m.s1s202.ref := 6; let m.s1s203.sosno := 11; let m.s1s203.ref := 7; let m.s1s204.sosno := 12; let m.s1s204.ref := 1; let m.s1s205.sosno := 12; let m.s1s205.ref := 2; let m.s1s206.sosno := 12; let m.s1s206.ref := 3; let m.s1s207.sosno := 12; let m.s1s207.ref := 4; let m.s1s208.sosno := 12; let m.s1s208.ref := 5; let m.s1s209.sosno := 12; let m.s1s209.ref := 6; let m.s1s210.sosno := 12; let m.s1s210.ref := 7; let m.s1s211.sosno := 13; let m.s1s211.ref := 1; let m.s1s212.sosno := 13; let m.s1s212.ref := 2; let m.s1s213.sosno := 13; let m.s1s213.ref := 3; let m.s1s214.sosno := 13; let m.s1s214.ref := 4; let m.s1s215.sosno := 13; let m.s1s215.ref := 5; let m.s1s216.sosno := 13; let m.s1s216.ref := 6; let m.s1s217.sosno := 13; let m.s1s217.ref := 7; let m.s1s218.sosno := 14; let m.s1s218.ref := 1; let m.s1s219.sosno := 14; let m.s1s219.ref := 2; let m.s1s220.sosno := 14; let m.s1s220.ref := 3; let m.s1s221.sosno := 14; let m.s1s221.ref := 4; let m.s1s222.sosno := 14; let m.s1s222.ref := 5; let m.s1s223.sosno := 14; let m.s1s223.ref := 6; let m.s1s224.sosno := 14; let m.s1s224.ref := 7; let m.s1s225.sosno := 15; let m.s1s225.ref := 1; let m.s1s226.sosno := 15; let m.s1s226.ref := 2; let m.s1s227.sosno := 15; let m.s1s227.ref := 3; let m.s1s228.sosno := 15; let m.s1s228.ref := 4; let m.s1s229.sosno := 15; let m.s1s229.ref := 5; let m.s1s230.sosno := 15; let m.s1s230.ref := 6; let m.s1s231.sosno := 15; let m.s1s231.ref := 7; let m.s1s232.sosno := 16; let m.s1s232.ref := 1; let m.s1s233.sosno := 16; let m.s1s233.ref := 2; let m.s1s234.sosno := 16; let m.s1s234.ref := 3; let m.s1s235.sosno := 16; let m.s1s235.ref := 4; let m.s1s236.sosno := 16; let m.s1s236.ref := 5; let m.s1s237.sosno := 16; let m.s1s237.ref := 6; let m.s1s238.sosno := 16; let m.s1s238.ref := 7; let m.s1s239.sosno := 17; let m.s1s239.ref := 1; let m.s1s240.sosno := 17; let m.s1s240.ref := 2; let m.s1s241.sosno := 17; let m.s1s241.ref := 3; let m.s1s242.sosno := 17; let m.s1s242.ref := 4; let m.s1s243.sosno := 17; let m.s1s243.ref := 5; let m.s1s244.sosno := 17; let m.s1s244.ref := 6; let m.s1s245.sosno := 17; let m.s1s245.ref := 7; let m.s1s246.sosno := 18; let m.s1s246.ref := 1; let m.s1s247.sosno := 18; let m.s1s247.ref := 2; let m.s1s248.sosno := 18; let m.s1s248.ref := 3; let m.s1s249.sosno := 18; let m.s1s249.ref := 4; let m.s1s250.sosno := 18; let m.s1s250.ref := 5; let m.s1s251.sosno := 18; let m.s1s251.ref := 6; let m.s1s252.sosno := 18; let m.s1s252.ref := 7; let m.s1s253.sosno := 19; let m.s1s253.ref := 1; let m.s1s254.sosno := 19; let m.s1s254.ref := 2; let m.s1s255.sosno := 19; let m.s1s255.ref := 3; let m.s1s256.sosno := 19; let m.s1s256.ref := 4; let m.s1s257.sosno := 19; let m.s1s257.ref := 5; let m.s1s258.sosno := 19; let m.s1s258.ref := 6; let m.s1s259.sosno := 19; let m.s1s259.ref := 7; let m.s1s260.sosno := 20; let m.s1s260.ref := 1; let m.s1s261.sosno := 20; let m.s1s261.ref := 2; let m.s1s262.sosno := 20; let m.s1s262.ref := 3; let m.s1s263.sosno := 20; let m.s1s263.ref := 4; let m.s1s264.sosno := 20; let m.s1s264.ref := 5; let m.s1s265.sosno := 20; let m.s1s265.ref := 6; let m.s1s266.sosno := 20; let m.s1s266.ref := 7; let m.s1s267.sosno := 21; let m.s1s267.ref := 1; let m.s1s268.sosno := 21; let m.s1s268.ref := 2; let m.s1s269.sosno := 21; let m.s1s269.ref := 3; let m.s1s270.sosno := 21; let m.s1s270.ref := 4; let m.s1s271.sosno := 21; let m.s1s271.ref := 5; let m.s1s272.sosno := 21; let m.s1s272.ref := 6; let m.s1s273.sosno := 21; let m.s1s273.ref := 7; let m.s1s274.sosno := 22; let m.s1s274.ref := 1; let m.s1s275.sosno := 22; let m.s1s275.ref := 2; let m.s1s276.sosno := 22; let m.s1s276.ref := 3; let m.s1s277.sosno := 22; let m.s1s277.ref := 4; let m.s1s278.sosno := 22; let m.s1s278.ref := 5; let m.s1s279.sosno := 22; let m.s1s279.ref := 6; let m.s1s280.sosno := 22; let m.s1s280.ref := 7; let m.s1s281.sosno := 23; let m.s1s281.ref := 1; let m.s1s282.sosno := 23; let m.s1s282.ref := 2; let m.s1s283.sosno := 23; let m.s1s283.ref := 3; let m.s1s284.sosno := 23; let m.s1s284.ref := 4; let m.s1s285.sosno := 23; let m.s1s285.ref := 5; let m.s1s286.sosno := 23; let m.s1s286.ref := 6; let m.s1s287.sosno := 23; let m.s1s287.ref := 7; let m.s1s288.sosno := 24; let m.s1s288.ref := 1; let m.s1s289.sosno := 24; let m.s1s289.ref := 2; let m.s1s290.sosno := 24; let m.s1s290.ref := 3; let m.s1s291.sosno := 24; let m.s1s291.ref := 4; let m.s1s292.sosno := 24; let m.s1s292.ref := 5; let m.s1s293.sosno := 24; let m.s1s293.ref := 6; let m.s1s294.sosno := 24; let m.s1s294.ref := 7; let m.s1s295.sosno := 25; let m.s1s295.ref := 1; let m.s1s296.sosno := 25; let m.s1s296.ref := 2; let m.s1s297.sosno := 25; let m.s1s297.ref := 3; let m.s1s298.sosno := 25; let m.s1s298.ref := 4; let m.s1s299.sosno := 25; let m.s1s299.ref := 5; let m.s1s300.sosno := 25; let m.s1s300.ref := 6; let m.s1s301.sosno := 25; let m.s1s301.ref := 7; let m.s1s302.sosno := 26; let m.s1s302.ref := 1; let m.s1s303.sosno := 26; let m.s1s303.ref := 2; let m.s1s304.sosno := 26; let m.s1s304.ref := 3; let m.s1s305.sosno := 26; let m.s1s305.ref := 4; let m.s1s306.sosno := 26; let m.s1s306.ref := 5; let m.s1s307.sosno := 26; let m.s1s307.ref := 6; let m.s1s308.sosno := 26; let m.s1s308.ref := 7; let m.s1s309.sosno := 27; let m.s1s309.ref := 1; let m.s1s310.sosno := 27; let m.s1s310.ref := 2; let m.s1s311.sosno := 27; let m.s1s311.ref := 3; let m.s1s312.sosno := 27; let m.s1s312.ref := 4; let m.s1s313.sosno := 27; let m.s1s313.ref := 5; let m.s1s314.sosno := 27; let m.s1s314.ref := 6; let m.s1s315.sosno := 27; let m.s1s315.ref := 7; let m.s1s316.sosno := 28; let m.s1s316.ref := 1; let m.s1s317.sosno := 28; let m.s1s317.ref := 2; let m.s1s318.sosno := 28; let m.s1s318.ref := 3; let m.s1s319.sosno := 28; let m.s1s319.ref := 4; let m.s1s320.sosno := 28; let m.s1s320.ref := 5; let m.s1s321.sosno := 28; let m.s1s321.ref := 6; let m.s1s322.sosno := 28; let m.s1s322.ref := 7; m.obj = Objective(expr= 10*m.x95 + m.x96 + 10*m.x97, sense=minimize) m.c1 = Constraint(expr= - m.x1 - m.x2 - m.x3 + m.x8 + m.x14 + m.x26 + m.x93 == 0) m.c2 = Constraint(expr= - m.x4 - m.x5 - m.x6 - m.x7 + m.x9 + m.x20 + m.x24 + m.x27 + m.x94 == 0) m.c3 = Constraint(expr= m.x1 + m.x4 - m.x8 - m.x9 - m.x10 - m.x11 - m.x12 - m.x13 + m.x15 + m.x17 + m.x21 + m.x28 == 1.212) m.c4 = Constraint(expr= m.x2 + m.x10 - m.x14 - m.x15 - m.x16 + m.x18 == 0.452) m.c5 = Constraint(expr= m.x11 + m.x16 - m.x17 - m.x18 - m.x19 + m.x22 == 0.245) m.c6 = Constraint(expr= m.x5 + m.x12 + m.x19 - m.x20 - m.x21 - m.x22 - m.x23 + m.x25 == 0.652) m.c7 = Constraint(expr= m.x6 + m.x23 - m.x24 - m.x25 == 0.252) m.c8 = Constraint(expr= m.x3 + m.x7 + m.x13 - m.x26 - m.x27 - m.x28 == 0.456) m.c9 = Constraint(expr= m.x29 - 38721.1970117411*m.s1s127 - 2543.8701482414*m.s1s128 - 207.747320703761*m.s1s129 - 23.9314504121258*m.s1s130 - 1.5722267648148*m.s1s131 - 0.181112645550961*m.s1s132 - 0.0390863672545667*m.s1s133 == 0) m.c10 = Constraint(expr= m.x30 - 32510.4890865135*m.s1s134 - 2135.84468132099*m.s1s135 - 174.425573683688*m.s1s136 - 20.0929521164322*m.s1s137 - 1.32004857865156*m.s1s138 - 0.152062982061963*m.s1s139 - 0.0328170876451919*m.s1s140 == 0) m.c11 = Constraint(expr= m.x31 - 63468.4628982673*m.s1s141 - 4169.69361956223*m.s1s142 - 340.521578201805*m.s1s143 - 39.2263796008983*m.s1s144 - 2.57705917665854*m.s1s145 - 0.296864304610023*m.s1s146 - 0.0640670186196026*m.s1s147 == 0) m.c12 = Constraint(expr= m.x32 - 50797.5773435889*m.s1s148 - 3337.25325093014*m.s1s149 - 272.539627020641*m.s1s150 - 31.3951994533022*m.s1s151 - 2.06257339263358*m.s1s152 - 0.237598120158509*m.s1s153 - 0.0512766370081929*m.s1s154 == 0) m.c13 = Constraint(expr= m.x33 - 59165.7349698592*m.s1s155 - 3887.01689524085*m.s1s156 - 317.436542928413*m.s1s157 - 36.5670992066393*m.s1s158 - 2.40235218067626*m.s1s159 - 0.27673893405488*m.s1s160 - 0.0597237127048799*m.s1s161 == 0) m.c14 = Constraint(expr= m.x34 - 32977.2294678044*m.s1s162 - 2166.50816836621*m.s1s163 - 176.929733450444*m.s1s164 - 20.3814187742893*m.s1s165 - 1.339*m.s1s166 - 0.154246090843839*m.s1s167 - 0.0332882297421199*m.s1s168 == 0) m.c15 = Constraint(expr= m.x35 - 33843.9321019273*m.s1s169 - 2223.4480134252*m.s1s170 - 181.579774357788*m.s1s171 - 20.9170801874496*m.s1s172 - 1.37419139860501*m.s1s173 - 0.158299963634093*m.s1s174 - 0.0341631060391402*m.s1s175 == 0) m.c16 = Constraint(expr= m.x36 - 38721.1970117411*m.s1s176 - 2543.8701482414*m.s1s177 - 207.747320703761*m.s1s178 - 23.9314504121258*m.s1s179 - 1.5722267648148*m.s1s180 - 0.181112645550961*m.s1s181 - 0.0390863672545667*m.s1s182 == 0) m.c17 = Constraint(expr= m.x37 - 50797.5773435889*m.s1s183 - 3337.25325093014*m.s1s184 - 272.539627020641*m.s1s185 - 31.3951994533022*m.s1s186 - 2.06257339263358*m.s1s187 - 0.237598120158509*m.s1s188 - 0.0512766370081929*m.s1s189 == 0) m.c18 = Constraint(expr= m.x38 - 31810.181054648*m.s1s190 - 2089.8364782095*m.s1s191 - 170.668274619734*m.s1s192 - 19.660130090483*m.s1s193 - 1.2916134290104*m.s1s194 - 0.148787395299671*m.s1s195 - 0.0321101751776739*m.s1s196 == 0) m.c19 = Constraint(expr= m.x39 - 39461.9459070343*m.s1s197 - 2592.53519858857*m.s1s198 - 211.721593458417*m.s1s199 - 24.3892667200816*m.s1s200 - 1.60230396616872*m.s1s201 - 0.184577388442944*m.s1s202 - 0.0398341019735132*m.s1s203 == 0) m.c20 = Constraint(expr= m.x40 - 32977.2294678044*m.s1s204 - 2166.50816836621*m.s1s205 - 176.929733450444*m.s1s206 - 20.3814187742893*m.s1s207 - 1.339*m.s1s208 - 0.154246090843839*m.s1s209 - 0.0332882297421199*m.s1s210 == 0) m.c21 = Constraint(expr= m.x41 - 52785.5148814787*m.s1s211 - 3467.85497167945*m.s1s212 - 283.205327698691*m.s1s213 - 32.6238347301504*m.s1s214 - 2.14329116080854*m.s1s215 - 0.246896402610059*m.s1s216 - 0.0532833223041444*m.s1s217 == 0) m.c22 = Constraint(expr= m.x42 - 32510.4890865135*m.s1s218 - 2135.84468132099*m.s1s219 - 174.425573683688*m.s1s220 - 20.0929521164322*m.s1s221 - 1.32004857865156*m.s1s222 - 0.152062982061963*m.s1s223 - 0.0328170876451919*m.s1s224 == 0) m.c23 = Constraint(expr= m.x43 - 31810.181054648*m.s1s225 - 2089.8364782095*m.s1s226 - 170.668274619734*m.s1s227 - 19.660130090483*m.s1s228 - 1.2916134290104*m.s1s229 - 0.148787395299671*m.s1s230 - 0.0321101751776739*m.s1s231 == 0) m.c24 = Constraint(expr= m.x44 - 30677.4142839491*m.s1s232 - 2015.41699236491*m.s1s233 - 164.590743970989*m.s1s234 - 18.9600290116536*m.s1s235 - 1.24561882211213*m.s1s236 - 0.143489047044288*m.s1s237 - 0.0309667255575633*m.s1s238 == 0) m.c25 = Constraint(expr= m.x45 - 39461.9459070343*m.s1s239 - 2592.53519858857*m.s1s240 - 211.721593458417*m.s1s241 - 24.3892667200816*m.s1s242 - 1.60230396616872*m.s1s243 - 0.184577388442944*m.s1s244 - 0.0398341019735132*m.s1s245 == 0) m.c26 = Constraint(expr= m.x46 - 30677.4142839491*m.s1s246 - 2015.41699236491*m.s1s247 - 164.590743970989*m.s1s248 - 18.9600290116536*m.s1s249 - 1.24561882211213*m.s1s250 - 0.143489047044288*m.s1s251 - 0.0309667255575633*m.s1s252 == 0) m.c27 = Constraint(expr= m.x47 - 28361.2795383154*m.s1s253 - 1863.25366856746*m.s1s254 - 152.164196629274*m.s1s255 - 17.5285530220005*m.s1s256 - 1.15157500841239*m.s1s257 - 0.132655670919396*m.s1s258 - 0.0286287479053886*m.s1s259 == 0) m.c28 = Constraint(expr= m.x48 - 59165.7349698592*m.s1s260 - 3887.01689524085*m.s1s261 - 317.436542928413*m.s1s262 - 36.5670992066393*m.s1s263 - 2.40235218067626*m.s1s264 - 0.27673893405488*m.s1s265 - 0.0597237127048799*m.s1s266 == 0) m.c29 = Constraint(expr= m.x49 - 32977.2294678044*m.s1s267 - 2166.50816836621*m.s1s268 - 176.929733450444*m.s1s269 - 20.3814187742893*m.s1s270 - 1.339*m.s1s271 - 0.154246090843839*m.s1s272 - 0.0332882297421199*m.s1s273 == 0) m.c30 = Constraint(expr= m.x50 - 28361.2795383154*m.s1s274 - 1863.25366856746*m.s1s275 - 152.164196629274*m.s1s276 - 17.5285530220005*m.s1s277 - 1.15157500841239*m.s1s278 - 0.132655670919396*m.s1s279 - 0.0286287479053886*m.s1s280 == 0) m.c31 = Constraint(expr= m.x51 - 50797.5773435889*m.s1s281 - 3337.25325093014*m.s1s282 - 272.539627020641*m.s1s283 - 31.3951994533022*m.s1s284 - 2.06257339263358*m.s1s285 - 0.237598120158509*m.s1s286 - 0.0512766370081929*m.s1s287 == 0) m.c32 = Constraint(expr= m.x52 - 32977.2294678044*m.s1s288 - 2166.50816836621*m.s1s289 - 176.929733450444*m.s1s290 - 20.3814187742893*m.s1s291 - 1.339*m.s1s292 - 0.154246090843839*m.s1s293 - 0.0332882297421199*m.s1s294 == 0) m.c33 = Constraint(expr= m.x53 - 50797.5773435889*m.s1s295 - 3337.25325093014*m.s1s296 - 272.539627020641*m.s1s297 - 31.3951994533022*m.s1s298 - 2.06257339263358*m.s1s299 - 0.237598120158509*m.s1s300 - 0.0512766370081929*m.s1s301 == 0) m.c34 = Constraint(expr= m.x54 - 63468.4628982673*m.s1s302 - 4169.69361956223*m.s1s303 - 340.521578201805*m.s1s304 - 39.2263796008983*m.s1s305 - 2.57705917665854*m.s1s306 - 0.296864304610023*m.s1s307 - 0.0640670186196026*m.s1s308 == 0) m.c35 = Constraint(expr= m.x55 - 33843.9321019273*m.s1s309 - 2223.4480134252*m.s1s310 - 181.579774357788*m.s1s311 - 20.9170801874496*m.s1s312 - 1.37419139860501*m.s1s313 - 0.158299963634093*m.s1s314 - 0.0341631060391402*m.s1s315 == 0) m.c36 = Constraint(expr= m.x56 - 52785.5148814787*m.s1s316 - 3467.85497167945*m.s1s317 - 283.205327698691*m.s1s318 - 32.6238347301504*m.s1s319 - 2.14329116080854*m.s1s320 - 0.246896402610059*m.s1s321 - 0.0532833223041444*m.s1s322 == 0) m.c37 = Constraint(expr=-m.x1**2*m.x29 + m.x57 - m.x59 - m.x65 == 0) m.c38 = Constraint(expr=-m.x2**2*m.x30 + m.x57 - m.x60 - m.x66 == 0) m.c39 = Constraint(expr=-m.x3**2*m.x31 + m.x57 - m.x64 - m.x67 == 0) m.c40 = Constraint(expr=-m.x4**2*m.x32 + m.x58 - m.x59 - m.x68 == 0) m.c41 = Constraint(expr=-m.x5**2*m.x33 + m.x58 - m.x62 - m.x69 == 0) m.c42 = Constraint(expr=-m.x6**2*m.x34 + m.x58 - m.x63 - m.x70 == 0) m.c43 = Constraint(expr=-m.x7**2*m.x35 + m.x58 - m.x64 - m.x71 == 0) m.c44 = Constraint(expr=-m.x8**2*m.x36 - m.x57 + m.x59 - m.x72 == 0) m.c45 = Constraint(expr=-m.x9**2*m.x37 - m.x58 + m.x59 - m.x73 == 0) m.c46 = Constraint(expr=-m.x10**2*m.x38 + m.x59 - m.x60 - m.x74 == 0) m.c47 = Constraint(expr=-m.x11**2*m.x39 + m.x59 - m.x61 - m.x75 == 0) m.c48 = Constraint(expr=-m.x12**2*m.x40 + m.x59 - m.x62 - m.x76 == 0) m.c49 = Constraint(expr=-m.x13**2*m.x41 + m.x59 - m.x64 - m.x77 == 0) m.c50 = Constraint(expr=-m.x14**2*m.x42 - m.x57 + m.x60 - m.x78 == 0) m.c51 = Constraint(expr=-m.x15**2*m.x43 - m.x59 + m.x60 - m.x79 == 0) m.c52 = Constraint(expr=-m.x16**2*m.x44 + m.x60 - m.x61 - m.x80 == 0) m.c53 = Constraint(expr=-m.x17**2*m.x45 - m.x59 + m.x61 - m.x81 == 0) m.c54 = Constraint(expr=-m.x18**2*m.x46 - m.x60 + m.x61 - m.x82 == 0) m.c55 = Constraint(expr=-m.x19**2*m.x47 + m.x61 - m.x62 - m.x83 == 0) m.c56 = Constraint(expr=-m.x20**2*m.x48 - m.x58 + m.x62 - m.x84 == 0) m.c57 = Constraint(expr=-m.x21**2*m.x49 - m.x59 + m.x62 - m.x85 == 0) m.c58 = Constraint(expr=-m.x22**2*m.x50 - m.x61 + m.x62 - m.x86 == 0) m.c59 = Constraint(expr=-m.x23**2*m.x51 + m.x62 - m.x63 - m.x87 == 0) m.c60 = Constraint(expr=-m.x24**2*m.x52 - m.x58 + m.x63 - m.x88 == 0) m.c61 = Constraint(expr=-m.x25**2*m.x53 - m.x62 + m.x63 - m.x89 == 0) m.c62 = Constraint(expr=-m.x26**2*m.x54 - m.x57 + m.x64 - m.x90 == 0) m.c63 = Constraint(expr=-m.x27**2*m.x55 - m.x58 + m.x64 - m.x91 == 0) m.c64 = Constraint(expr=-m.x28**2*m.x56 - m.x59 + m.x64 - m.x92 == 0) m.c65 = Constraint(expr= m.x65 + 12*m.b99 <= 12) m.c66 = Constraint(expr= m.x66 + 12*m.b100 <= 12) m.c67 = Constraint(expr= m.x67 + 12*m.b101 <= 12) m.c68 = Constraint(expr= m.x68 + 12*m.b102 <= 12) m.c69 = Constraint(expr= m.x69 + 12*m.b103 <= 12) m.c70 = Constraint(expr= m.x70 + 12*m.b104 <= 12) m.c71 = Constraint(expr= m.x71 + 12*m.b105 <= 12) m.c72 = Constraint(expr= m.x72 + 12*m.b106 <= 12) m.c73 = Constraint(expr= m.x73 + 12*m.b107 <= 12) m.c74 = Constraint(expr= m.x74 + 12*m.b108 <= 12) m.c75 = Constraint(expr= m.x75 + 12*m.b109 <= 12) m.c76 = Constraint(expr= m.x76 + 12*m.b110 <= 12) m.c77 = Constraint(expr= m.x77 + 12*m.b111 <= 12) m.c78 = Constraint(expr= m.x78 + 12*m.b112 <= 12) m.c79 = Constraint(expr= m.x79 + 12*m.b113 <= 12) m.c80 = Constraint(expr= m.x80 + 12*m.b114 <= 12) m.c81 = Constraint(expr= m.x81 + 12*m.b115 <= 12) m.c82 = Constraint(expr= m.x82 + 12*m.b116 <= 12) m.c83 = Constraint(expr= m.x83 + 12*m.b117 <= 12) m.c84 = Constraint(expr= m.x84 + 12*m.b118 <= 12) m.c85 = Constraint(expr= m.x85 + 12*m.b119 <= 12) m.c86 = Constraint(expr= m.x86 + 12*m.b120 <= 12) m.c87 = Constraint(expr= m.x87 + 12*m.b121 <= 12) m.c88 = Constraint(expr= m.x88 + 12*m.b122 <= 12) m.c89 = Constraint(expr= m.x89 + 12*m.b123 <= 12) m.c90 = Constraint(expr= m.x90 + 12*m.b124 <= 12) m.c91 = Constraint(expr= m.x91 + 12*m.b125 <= 12) m.c92 = Constraint(expr= m.x92 + 12*m.b126 <= 12) m.c93 = Constraint(expr= m.x65 - 12*m.b99 >= -12) m.c94 = Constraint(expr= m.x66 - 12*m.b100 >= -12) m.c95 = Constraint(expr= m.x67 - 12*m.b101 >= -12) m.c96 = Constraint(expr= m.x68 - 12*m.b102 >= -12) m.c97 = Constraint(expr= m.x69 - 12*m.b103 >= -12) m.c98 = Constraint(expr= m.x70 - 12*m.b104 >= -12) m.c99 = Constraint(expr= m.x71 - 12*m.b105 >= -12) m.c100 = Constraint(expr= m.x72 - 12*m.b106 >= -12) m.c101 = Constraint(expr= m.x73 - 12*m.b107 >= -12) m.c102 = Constraint(expr= m.x74 - 12*m.b108 >= -12) m.c103 = Constraint(expr= m.x75 - 12*m.b109 >= -12) m.c104 = Constraint(expr= m.x76 - 12*m.b110 >= -12) m.c105 = Constraint(expr= m.x77 - 12*m.b111 >= -12) m.c106 = Constraint(expr= m.x78 - 12*m.b112 >= -12) m.c107 = Constraint(expr= m.x79 - 12*m.b113 >= -12) m.c108 = Constraint(expr= m.x80 - 12*m.b114 >= -12) m.c109 = Constraint(expr= m.x81 - 12*m.b115 >= -12) m.c110 = Constraint(expr= m.x82 - 12*m.b116 >= -12) m.c111 = Constraint(expr= m.x83 - 12*m.b117 >= -12) m.c112 = Constraint(expr= m.x84 - 12*m.b118 >= -12) m.c113 = Constraint(expr= m.x85 - 12*m.b119 >= -12) m.c114 = Constraint(expr= m.x86 - 12*m.b120 >= -12) m.c115 = Constraint(expr= m.x87 - 12*m.b121 >= -12) m.c116 = Constraint(expr= m.x88 - 12*m.b122 >= -12) m.c117 = Constraint(expr= m.x89 - 12*m.b123 >= -12) m.c118 = Constraint(expr= m.x90 - 12*m.b124 >= -12) m.c119 = Constraint(expr= m.x91 - 12*m.b125 >= -12) m.c120 = Constraint(expr= m.x92 - 12*m.b126 >= -12) m.c121 = Constraint(expr=-(1.02*m.x93*(-6.5 + m.x57) + 1.02*m.x94*(-3.25 + m.x58)) + m.x95 == 0) m.c122 = Constraint(expr= m.x96 - 9.11349113439539*m.s1s127 - 17.6144733325531*m.s1s128 - 32.2986551864818*m.s1s129 - 54.4931814987685*m.s1s130 - 105.323928905069*m.s1s131 - 177.698914733437*m.s1s132 - 257.546555368226*m.s1s133 - 7.65172765642961*m.s1s134 - 14.7891900880288*m.s1s135 - 27.118094428506*m.s1s136 - 45.7527173518919*m.s1s137 - 88.4304387640365*m.s1s138 - 149.196798497086*m.s1s139 - 216.237232413786*m.s1s140 - 14.9380525029139*m.s1s141 - 28.8721329260735*m.s1s142 - 52.941183552398*m.s1s143 - 89.3205462402005*m.s1s144 - 172.637944844116*m.s1s145 - 291.268810037089*m.s1s146 - 422.148209648796*m.s1s147 - 11.9558099050809*m.s1s148 - 23.1080813747994*m.s1s149 - 42.3719709499612*m.s1s150 - 71.4885338137291*m.s1s151 - 138.172392322055*m.s1s152 - 233.119713791557*m.s1s153 - 337.870264236031*m.s1s154 - 13.9253546563734*m.s1s155 - 26.9147996770731*m.s1s156 - 49.3521332015331*m.s1s157 - 83.2652237802191*m.s1s158 - 160.93427229773*m.s1s159 - 271.522775764452*m.s1s160 - 393.529446744536*m.s1s161 - 7.76158051882097*m.s1s162 - 15.0015127080393*m.s1s163 - 27.5074183079396*m.s1s164 - 46.4095712271164*m.s1s165 - 89.7*m.s1s166 - 151.338758602103*m.s1s167 - 219.341665817957*m.s1s168 - 7.96556922221359*m.s1s169 - 15.3957802311063*m.s1s170 - 28.2303641796868*m.s1s171 - 47.6293006671023*m.s1s172 - 92.0574820424717*m.s1s173 - 155.316221319321*m.s1s174 - 225.10637081608*m.s1s175 - 9.11349113439539*m.s1s176 - 17.6144733325531*m.s1s177 - 32.2986551864818*m.s1s178 - 54.4931814987685*m.s1s179 - 105.323928905069*m.s1s180 - 177.698914733437*m.s1s181 - 257.546555368226*m.s1s182 - 11.9558099050809*m.s1s183 - 23.1080813747994*m.s1s184 - 42.3719709499612*m.s1s185 - 71.4885338137291*m.s1s186 - 138.172392322055*m.s1s187 - 233.119713791557*m.s1s188 - 337.870264236031*m.s1s189 - 7.48690188831565*m.s1s190 - 14.4706163324673*m.s1s191 - 26.5339439013751*m.s1s192 - 44.7671586494086*m.s1s193 - 86.5255598074927*m.s1s194 - 145.982952158506*m.s1s195 - 211.579268940989*m.s1s196 - 9.28783513744935*m.s1s197 - 17.9514438466182*m.s1s198 - 32.916538800503*m.s1s199 - 55.5356535066454*m.s1s200 - 107.338809384118*m.s1s201 - 181.098351861986*m.s1s202 - 262.473503425068*m.s1s203 - 7.76158051882097*m.s1s204 - 15.0015127080393*m.s1s205 - 27.5074183079396*m.s1s206 - 46.4095712271164*m.s1s207 - 89.7*m.s1s208 - 151.338758602103*m.s1s209 - 219.341665817957*m.s1s210 - 12.4236944883441*m.s1s211 - 24.0124044704238*m.s1s212 - 44.0301766363479*m.s1s213 - 74.2862014846846*m.s1s214 - 143.579699122125*m.s1s215 - 242.242736071415*m.s1s216 - 351.092646411238*m.s1s217 - 7.65172765642961*m.s1s218 - 14.7891900880288*m.s1s219 - 27.118094428506*m.s1s220 - 45.7527173518919*m.s1s221 - 88.4304387640365*m.s1s222 - 149.196798497086*m.s1s223 - 216.237232413786*m.s1s224 - 7.48690188831565*m.s1s225 - 14.4706163324673*m.s1s226 - 26.5339439013751*m.s1s227 - 44.7671586494086*m.s1s228 - 86.5255598074927*m.s1s229 - 145.982952158506*m.s1s230 - 211.579268940989*m.s1s231 - 7.22029184733547*m.s1s232 - 13.9553148538372*m.s1s233 - 25.5890649679471*m.s1s234 - 43.1729913716576*m.s1s235 - 83.44436769489*m.s1s236 - 140.784470672041*m.s1s237 - 204.044889780639*m.s1s238 - 9.28783513744935*m.s1s239 - 17.9514438466182*m.s1s240 - 32.916538800503*m.s1s241 - 55.5356535066454*m.s1s242 - 107.338809384118*m.s1s243 - 181.098351861986*m.s1s244 - 262.473503425068*m.s1s245 - 7.22029184733547*m.s1s246 - 13.9553148538372*m.s1s247 - 25.5890649679471*m.s1s248 - 43.1729913716576*m.s1s249 - 83.44436769489*m.s1s250 - 140.784470672041*m.s1s251 - 204.044889780639*m.s1s252 - 6.67516217420068*m.s1s253 - 12.9016931463472*m.s1s254 - 23.6570989315674*m.s1s255 - 39.913444642481*m.s1s256 - 77.1443452237428*m.s1s257 - 130.155289178744*m.s1s258 - 188.639567333459*m.s1s259 - 13.9253546563734*m.s1s260 - 26.9147996770731*m.s1s261 - 49.3521332015331*m.s1s262 - 83.2652237802191*m.s1s263 - 160.93427229773*m.s1s264 - 271.522775764452*m.s1s265 - 393.529446744536*m.s1s266 - 7.76158051882097*m.s1s267 - 15.0015127080393*m.s1s268 - 27.5074183079396*m.s1s269 - 46.4095712271164*m.s1s270 - 89.7*m.s1s271 - 151.338758602103*m.s1s272 - 219.341665817957*m.s1s273 - 6.67516217420068*m.s1s274 - 12.9016931463472*m.s1s275 - 23.6570989315674*m.s1s276 - 39.913444642481*m.s1s277 - 77.1443452237428*m.s1s278 - 130.155289178744*m.s1s279 - 188.639567333459*m.s1s280 - 11.9558099050809*m.s1s281 - 23.1080813747994*m.s1s282 - 42.3719709499612*m.s1s283 - 71.4885338137291*m.s1s284 - 138.172392322055*m.s1s285 - 233.119713791557*m.s1s286 - 337.870264236031*m.s1s287 - 7.76158051882097*m.s1s288 - 15.0015127080393*m.s1s289 - 27.5074183079396*m.s1s290 - 46.4095712271164*m.s1s291 - 89.7*m.s1s292 - 151.338758602103*m.s1s293 - 219.341665817957*m.s1s294 - 11.9558099050809*m.s1s295 - 23.1080813747994*m.s1s296 - 42.3719709499612*m.s1s297 - 71.4885338137291*m.s1s298 - 138.172392322055*m.s1s299 - 233.119713791557*m.s1s300 - 337.870264236031*m.s1s301 - 14.9380525029139*m.s1s302 - 28.8721329260735*m.s1s303 - 52.941183552398*m.s1s304 - 89.3205462402005*m.s1s305 - 172.637944844116*m.s1s306 - 291.268810037089*m.s1s307 - 422.148209648796*m.s1s308 - 7.96556922221359*m.s1s309 - 15.3957802311063*m.s1s310 - 28.2303641796868*m.s1s311 - 47.6293006671023*m.s1s312 - 92.0574820424717*m.s1s313 - 155.316221319321*m.s1s314 - 225.10637081608*m.s1s315 - 12.4236944883441*m.s1s316 - 24.0124044704238*m.s1s317 - 44.0301766363479*m.s1s318 - 74.2862014846846*m.s1s319 - 143.579699122125*m.s1s320 - 242.242736071415*m.s1s321 - 351.092646411238*m.s1s322 == 0) m.c123 = Constraint(expr= - 0.2*m.x93 - 0.17*m.x94 + m.x97 == 0) m.c125 = Constraint(expr= - m.b99 + m.s1s127 + m.s1s128 + m.s1s129 + m.s1s130 + m.s1s131 + m.s1s132 + m.s1s133 == 0) m.c126 = Constraint(expr= - m.b100 + m.s1s134 + m.s1s135 + m.s1s136 + m.s1s137 + m.s1s138 + m.s1s139 + m.s1s140 == 0) m.c127 = Constraint(expr= - m.b101 + m.s1s141 + m.s1s142 + m.s1s143 + m.s1s144 + m.s1s145 + m.s1s146 + m.s1s147 == 0) m.c128 = Constraint(expr= - m.b102 + m.s1s148 + m.s1s149 + m.s1s150 + m.s1s151 + m.s1s152 + m.s1s153 + m.s1s154 == 0) m.c129 = Constraint(expr= - m.b103 + m.s1s155 + m.s1s156 + m.s1s157 + m.s1s158 + m.s1s159 + m.s1s160 + m.s1s161 == 0) m.c130 = Constraint(expr= - m.b104 + m.s1s162 + m.s1s163 + m.s1s164 + m.s1s165 + m.s1s166 + m.s1s167 + m.s1s168 == 0) m.c131 = Constraint(expr= - m.b105 + m.s1s169 + m.s1s170 + m.s1s171 + m.s1s172 + m.s1s173 + m.s1s174 + m.s1s175 == 0) m.c132 = Constraint(expr= - m.b106 + m.s1s176 + m.s1s177 + m.s1s178 + m.s1s179 + m.s1s180 + m.s1s181 + m.s1s182 == 0) m.c133 = Constraint(expr= - m.b107 + m.s1s183 + m.s1s184 + m.s1s185 + m.s1s186 + m.s1s187 + m.s1s188 + m.s1s189 == 0) m.c134 = Constraint(expr= - m.b108 + m.s1s190 + m.s1s191 + m.s1s192 + m.s1s193 + m.s1s194 + m.s1s195 + m.s1s196 == 0) m.c135 = Constraint(expr= - m.b109 + m.s1s197 + m.s1s198 + m.s1s199 + m.s1s200 + m.s1s201 + m.s1s202 + m.s1s203 == 0) m.c136 = Constraint(expr= - m.b110 + m.s1s204 + m.s1s205 + m.s1s206 + m.s1s207 + m.s1s208 + m.s1s209 + m.s1s210 == 0) m.c137 = Constraint(expr= - m.b111 + m.s1s211 + m.s1s212 + m.s1s213 + m.s1s214 + m.s1s215 + m.s1s216 + m.s1s217 == 0) m.c138 = Constraint(expr= - m.b112 + m.s1s218 + m.s1s219 + m.s1s220 + m.s1s221 + m.s1s222 + m.s1s223 + m.s1s224 == 0) m.c139 = Constraint(expr= - m.b113 + m.s1s225 + m.s1s226 + m.s1s227 + m.s1s228 + m.s1s229 + m.s1s230 + m.s1s231 == 0) m.c140 = Constraint(expr= - m.b114 + m.s1s232 + m.s1s233 + m.s1s234 + m.s1s235 + m.s1s236 + m.s1s237 + m.s1s238 == 0) m.c141 = Constraint(expr= - m.b115 + m.s1s239 + m.s1s240 + m.s1s241 + m.s1s242 + m.s1s243 + m.s1s244 + m.s1s245 == 0) m.c142 = Constraint(expr= - m.b116 + m.s1s246 + m.s1s247 + m.s1s248 + m.s1s249 + m.s1s250 + m.s1s251 + m.s1s252 == 0) m.c143 = Constraint(expr= - m.b117 + m.s1s253 + m.s1s254 + m.s1s255 + m.s1s256 + m.s1s257 + m.s1s258 + m.s1s259 == 0) m.c144 = Constraint(expr= - m.b118 + m.s1s260 + m.s1s261 + m.s1s262 + m.s1s263 + m.s1s264 + m.s1s265 + m.s1s266 == 0) m.c145 = Constraint(expr= - m.b119 + m.s1s267 + m.s1s268 + m.s1s269 + m.s1s270 + m.s1s271 + m.s1s272 + m.s1s273 == 0) m.c146 = Constraint(expr= - m.b120 + m.s1s274 + m.s1s275 + m.s1s276 + m.s1s277 + m.s1s278 + m.s1s279 + m.s1s280 == 0) m.c147 = Constraint(expr= - m.b121 + m.s1s281 + m.s1s282 + m.s1s283 + m.s1s284 + m.s1s285 + m.s1s286 + m.s1s287 == 0) m.c148 = Constraint(expr= - m.b122 + m.s1s288 + m.s1s289 + m.s1s290 + m.s1s291 + m.s1s292 + m.s1s293 + m.s1s294 == 0) m.c149 = Constraint(expr= - m.b123 + m.s1s295 + m.s1s296 + m.s1s297 + m.s1s298 + m.s1s299 + m.s1s300 + m.s1s301 == 0) m.c150 = Constraint(expr= - m.b124 + m.s1s302 + m.s1s303 + m.s1s304 + m.s1s305 + m.s1s306 + m.s1s307 + m.s1s308 == 0) m.c151 = Constraint(expr= - m.b125 + m.s1s309 + m.s1s310 + m.s1s311 + m.s1s312 + m.s1s313 + m.s1s314 + m.s1s315 == 0) m.c152 = Constraint(expr= - m.b126 + m.s1s316 + m.s1s317 + m.s1s318 + m.s1s319 + m.s1s320 + m.s1s321 + m.s1s322 == 0) m.c153 = Constraint(expr= m.x1 - 0.0176041976445841*m.s1s127 - 0.0686820348432157*m.s1s128 - 0.240338257044582*m.s1s129 - 0.708118780382974*m.s1s130 - 2*m.s1s131 - 2*m.s1s132 - 2*m.s1s133 <= 0) m.c154 = Constraint(expr= m.x2 - 0.0192122784105588*m.s1s134 - 0.0749558941482069*m.s1s135 - 0.262292300976835*m.s1s136 - 0.772802909347502*m.s1s137 - 2*m.s1s138 - 2*m.s1s139 - 2*m.s1s140 <= 0) m.c155 = Constraint(expr= m.x3 - 0.0137502828767635*m.s1s141 - 0.0536461488738445*m.s1s142 - 0.187723353667753*m.s1s143 - 0.553097263345606*m.s1s144 - 2*m.s1s145 - 2*m.s1s146 - 2*m.s1s147 <= 0) m.c156 = Constraint(expr= m.x4 - 0.0153698320860398*m.s1s148 - 0.0599647518268192*m.s1s149 - 0.209833968534382*m.s1s150 - 0.618242703881818*m.s1s151 - 2*m.s1s152 - 2*m.s1s153 - 2*m.s1s154 <= 0) m.c157 = Constraint(expr= m.x5 - 0.0142414920290718*m.s1s155 - 0.0555625806701283*m.s1s156 - 0.194429501479406*m.s1s157 - 0.572855870518057*m.s1s158 - 2*m.s1s159 - 2*m.s1s160 - 2*m.s1s161 <= 0) m.c158 = Constraint(expr= m.x6 - 0.0190758342372385*m.s1s162 - 0.0744235629590588*m.s1s163 - 0.260429520550158*m.s1s164 - 0.767314520523847*m.s1s165 - 2*m.s1s166 - 2*m.s1s167 - 2*m.s1s168 <= 0) m.c159 = Constraint(expr= m.x7 - 0.0188299954674205*m.s1s169 - 0.0734644333642121*m.s1s170 - 0.257073249355929*m.s1s171 - 0.757425796631457*m.s1s172 - 2*m.s1s173 - 2*m.s1s174 - 2*m.s1s175 <= 0) m.c160 = Constraint(expr= m.x8 - 0.0176041976445841*m.s1s176 - 0.0686820348432157*m.s1s177 - 0.240338257044582*m.s1s178 - 0.708118780382974*m.s1s179 - 2*m.s1s180 - 2*m.s1s181 - 2*m.s1s182 <= 0) m.c161 = Constraint(expr= m.x9 - 0.0153698320860398*m.s1s183 - 0.0599647518268192*m.s1s184 - 0.209833968534382*m.s1s185 - 0.618242703881818*m.s1s186 - 2*m.s1s187 - 2*m.s1s188 - 2*m.s1s189 <= 0) m.c162 = Constraint(expr= m.x10 - 0.0194226083350049*m.s1s190 - 0.0757764874800376*m.s1s191 - 0.265163793814297*m.s1s192 - 0.781263310246409*m.s1s193 - 2*m.s1s194 - 2*m.s1s195 - 2*m.s1s196 <= 0) m.c163 = Constraint(expr= m.x11 - 0.0174381887671401*m.s1s197 - 0.0680343582075014*m.s1s198 - 0.238071849619242*m.s1s199 - 0.701441168247406*m.s1s200 - 2*m.s1s201 - 2*m.s1s202 - 2*m.s1s203 <= 0) m.c164 = Constraint(expr= m.x12 - 0.0190758342372385*m.s1s204 - 0.0744235629590588*m.s1s205 - 0.260429520550158*m.s1s206 - 0.767314520523847*m.s1s207 - 2*m.s1s208 - 2*m.s1s209 - 2*m.s1s210 <= 0) m.c165 = Constraint(expr= m.x13 - 0.0150776355652448*m.s1s211 - 0.0588247594211735*m.s1s212 - 0.205844806180028*m.s1s213 - 0.606489265973719*m.s1s214 - 2*m.s1s215 - 2*m.s1s216 - 2*m.s1s217 <= 0) m.c166 = Constraint(expr= m.x14 - 0.0192122784105588*m.s1s218 - 0.0749558941482069*m.s1s219 - 0.262292300976835*m.s1s220 - 0.772802909347502*m.s1s221 - 2*m.s1s222 - 2*m.s1s223 - 2*m.s1s224 <= 0) m.c167 = Constraint(expr= m.x15 - 0.0194226083350049*m.s1s225 - 0.0757764874800376*m.s1s226 - 0.265163793814297*m.s1s227 - 0.781263310246409*m.s1s228 - 2*m.s1s229 - 2*m.s1s230 - 2*m.s1s231 <= 0) m.c168 = Constraint(expr= m.x16 - 0.0197779487583483*m.s1s232 - 0.0771628331590627*m.s1s233 - 0.270015017353593*m.s1s234 - 0.795556675515238*m.s1s235 - 2*m.s1s236 - 2*m.s1s237 - 2*m.s1s238 <= 0) m.c169 = Constraint(expr= m.x17 - 0.0174381887671401*m.s1s239 - 0.0680343582075014*m.s1s240 - 0.238071849619242*m.s1s241 - 0.701441168247406*m.s1s242 - 2*m.s1s243 - 2*m.s1s244 - 2*m.s1s245 <= 0) m.c170 = Constraint(expr= m.x18 - 0.0197779487583483*m.s1s246 - 0.0771628331590627*m.s1s247 - 0.270015017353593*m.s1s248 - 0.795556675515238*m.s1s249 - 2*m.s1s250 - 2*m.s1s251 - 2*m.s1s252 <= 0) m.c171 = Constraint(expr= m.x19 - 0.02056968839856*m.s1s253 - 0.0802517719822704*m.s1s254 - 0.280824105561038*m.s1s255 - 0.827403949655566*m.s1s256 - 2*m.s1s257 - 2*m.s1s258 - 2*m.s1s259 <= 0) m.c172 = Constraint(expr= m.x20 - 0.0142414920290718*m.s1s260 - 0.0555625806701283*m.s1s261 - 0.194429501479406*m.s1s262 - 0.572855870518057*m.s1s263 - 2*m.s1s264 - 2*m.s1s265 - 2*m.s1s266 <= 0) m.c173 = Constraint(expr= m.x21 - 0.0190758342372385*m.s1s267 - 0.0744235629590588*m.s1s268 - 0.260429520550158*m.s1s269 - 0.767314520523847*m.s1s270 - 2*m.s1s271 - 2*m.s1s272 - 2*m.s1s273 <= 0) m.c174 = Constraint(expr= m.x22 - 0.02056968839856*m.s1s274 - 0.0802517719822704*m.s1s275 - 0.280824105561038*m.s1s276 - 0.827403949655566*m.s1s277 - 2*m.s1s278 - 2*m.s1s279 - 2*m.s1s280 <= 0) m.c175 = Constraint(expr= m.x23 - 0.0153698320860398*m.s1s281 - 0.0599647518268192*m.s1s282 - 0.209833968534382*m.s1s283 - 0.618242703881818*m.s1s284 - 2*m.s1s285 - 2*m.s1s286 - 2*m.s1s287 <= 0) m.c176 = Constraint(expr= m.x24 - 0.0190758342372385*m.s1s288 - 0.0744235629590588*m.s1s289 - 0.260429520550158*m.s1s290 - 0.767314520523847*m.s1s291 - 2*m.s1s292 - 2*m.s1s293 - 2*m.s1s294 <= 0) m.c177 = Constraint(expr= m.x25 - 0.0153698320860398*m.s1s295 - 0.0599647518268192*m.s1s296 - 0.209833968534382*m.s1s297 - 0.618242703881818*m.s1s298 - 2*m.s1s299 - 2*m.s1s300 - 2*m.s1s301 <= 0) m.c178 = Constraint(expr= m.x26 - 0.0137502828767635*m.s1s302 - 0.0536461488738445*m.s1s303 - 0.187723353667753*m.s1s304 - 0.553097263345606*m.s1s305 - 2*m.s1s306 - 2*m.s1s307 - 2*m.s1s308 <= 0) m.c179 = Constraint(expr= m.x27 - 0.0188299954674205*m.s1s309 - 0.0734644333642121*m.s1s310 - 0.257073249355929*m.s1s311 - 0.757425796631457*m.s1s312 - 2*m.s1s313 - 2*m.s1s314 - 2*m.s1s315 <= 0) m.c180 = Constraint(expr= m.x28 - 0.0150776355652448*m.s1s316 - 0.0588247594211735*m.s1s317 - 0.205844806180028*m.s1s318 - 0.606489265973719*m.s1s319 - 2*m.s1s320 - 2*m.s1s321 - 2*m.s1s322 <= 0) m.c181 = Constraint(expr= m.b99 + m.b106 <= 1) m.c182 = Constraint(expr= m.b100 + m.b112 <= 1) m.c183 = Constraint(expr= m.b101 + m.b124 <= 1) m.c184 = Constraint(expr= m.b102 + m.b107 <= 1) m.c185 = Constraint(expr= m.b103 + m.b118 <= 1) m.c186 = Constraint(expr= m.b104 + m.b122 <= 1) m.c187 = Constraint(expr= m.b105 + m.b125 <= 1) m.c188 = Constraint(expr= m.b108 + m.b113 <= 1) m.c189 = Constraint(expr= m.b109 + m.b115 <= 1) m.c190 = Constraint(expr= m.b110 + m.b119 <= 1) m.c191 = Constraint(expr= m.b111 + m.b126 <= 1) m.c192 = Constraint(expr= m.b114 + m.b116 <= 1) m.c193 = Constraint(expr= m.b117 + m.b120 <= 1) m.c194 = Constraint(expr= m.b121 + m.b123 <= 1) m.c195 = Constraint(expr= m.b99 + m.b100 + m.b101 >= 2) m.c196 = Constraint(expr= m.b102 + m.b103 + m.b104 + m.b105 >= 2) m.c197 = Constraint(expr= m.b99 + m.b102 + m.b108 + m.b109 + m.b110 + m.b111 >= 2) m.c198 = Constraint(expr= m.b100 + m.b108 + m.b116 >= 2) m.c199 = Constraint(expr= m.b109 + m.b116 + m.b120 >= 2) m.c200 = Constraint(expr= m.b103 + m.b110 + m.b120 + m.b121 >= 2) m.c201 = Constraint(expr= m.b104 + m.b121 >= 2) m.c202 = Constraint(expr= m.b101 + m.b105 + m.b111 >= 2)
{"hexsha": "e86483df48cf327292c2324ab1350f1c4fe15786", "size": 64971, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/examples/minlplib/watersym2.py", "max_stars_repo_name": "ouyang-w-19/decogo", "max_stars_repo_head_hexsha": "52546480e49776251d4d27856e18a46f40c824a1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-07-03T13:19:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-06T10:48:13.000Z", "max_issues_repo_path": "tests/examples/minlplib/watersym2.py", "max_issues_repo_name": "ouyang-w-19/decogo", "max_issues_repo_head_hexsha": "52546480e49776251d4d27856e18a46f40c824a1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-07-04T14:52:14.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-15T10:17:11.000Z", "max_forks_repo_path": "tests/examples/minlplib/watersym2.py", "max_forks_repo_name": "ouyang-w-19/decogo", "max_forks_repo_head_hexsha": "52546480e49776251d4d27856e18a46f40c824a1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.1831945496, "max_line_length": 120, "alphanum_fraction": 0.6514290991, "include": true, "reason": "from pyomo", "num_tokens": 26169}
##################################################################### # # # /functions.py # # # # Copyright 2013, Monash University # # # # This file is part of the program labscript, in the labscript # # suite (see http://labscriptsuite.org), and is licensed under the # # Simplified BSD License. See the license.txt file in the root of # # the project for the full license. # # # ##################################################################### from pylab import * import numpy as np def print_time(t, description): """Print time with a descriptive string. Useful debug tool to print time at a specific point in the shot, during shot compilation. Helpful when the time is calculated. Args: t (float): Time to print description (str): Descriptive label to print with it """ print('t = {0:.9f} s:'.format(t),description) def ramp(duration, initial, final): """Defines a linear ramp. f(t) = (final - initial)*t/duration + initial Args: duration (float): Duration of ramp initial (float): Starting value of ramp final (float): Ending value of ramp Returns: func: Function that takes a single parameter `t`. """ m = (final - initial)/duration return lambda t: m*t + initial def sine(duration, amplitude, angfreq, phase, dc_offset): """Defines a sine wave. f(t) = amplitude*sin(angfreq*t + phase) + dc_offset Args: duration (float): Not used. amplitude (float): Amplitude of sine wave. angfreq (float): Angular frequency of sine wave. phase (float): Phase of sine wave. dc_offset (float): Verticle offset of sine wave. Returns: func: Function that takes a single parameter `t`. """ return lambda t: amplitude*sin(angfreq*(t) + phase) + dc_offset def sine_ramp(duration, initial, final): """Defines a square sinusoidally increasing ramp. f(t) = (final-initial)*(sin(pi*t/(2*duration)))^2 + initial Args: duration (float): Length of time for the ramp to complete. initial (float): Initial value of ramp. final (float): Final value of ramp. Returns: func: Function that takes a single parameter `t`. """ return lambda t: (final-initial)*(sin(pi*(t)/(2*duration)))**2 + initial def sine4_ramp(duration, initial, final): """Defines a quartic sinusoidally increasing ramp. f(t) = (final-initial)*(sin(pi*t/(2*duration)))^4 + initial Args: duration (float): Length of time for the ramp to complete. initial (float): Initial value of ramp. final (float): Final value of ramp. Returns: func: Function that takes a single parameter `t`. """ return lambda t: (final-initial)*(sin(pi*(t)/(2*duration)))**4 + initial def sine4_reverse_ramp(duration, initial, final): """Defines a quartic sinusoidally decreasing ramp. f(t) = (final-initial)*(sin(pi/2+pi*t/(2*duration)))^4 + initial Args: duration (float): Length of time for the ramp to complete. initial (float): Initial value of ramp. final (float): Final value of ramp. Returns: func: Function that takes a single parameter `t`. """ return lambda t: (final-initial)*(sin(pi/2+pi*(t)/(2*duration)))**4 + initial def exp_ramp(duration,initial,final,zero): """Defines an exponential ramp via offset value. f(t) = (initial-zero)*e^(-rate*t) + zero rate = log((initial-zero)/(final-zero))/duration Args: duration (float): Length of time for the ramp to complete initial (float): Initial value of ramp. final (float): Final value of ramp. zero (float): Zero offset of ramp. Returns: func: Function that takes a single parameter `t`. """ rate = 1/duration * log((initial-zero)/(final-zero)) return lambda t: (initial-zero)*exp(-rate*(t)) + zero def exp_ramp_t(duration,initial,final,time_constant): """Defines an exponential ramp via time constant. f(t) = (initial-zero)*e^(-t/time_constant) + zero zero = (final-initial*e^(-duration/time_constant))/(1-e^(-duration/time_constant)) Args: duration (float): Length of time for the ramp to complete initial (float): Initial value of ramp. final (float): Final value of ramp. zero (float): Zero offset of ramp. Returns: func: Function that takes a single parameter `t`. """ zero = (final-initial*exp(-duration/time_constant)) / (1-exp(-duration/time_constant)) return lambda t: (initial-zero)*exp(-(t)/time_constant) + zero def piecewise_accel(duration,initial,final): """Defines a piecewise acceleration. Args: duration (float): Length of time for the acceleration to complete. initial (float): Initial value. final (float): Final value. """ a = (final-initial) return lambda t: initial + a * ( (9./2 * t**3/duration**3) * (t<duration/3) + (-9*t**3/duration**3 + 27./2*t**2/duration**2 - 9./2*t/duration + 1./2) * (t<2*duration/3)*(t>=duration/3) + (9./2*t**3/duration**3 - 27./2 * t**2/duration**2 + 27./2*t/duration - 7./2) * (t>= 2*duration/3)) def square_wave(duration, level_0, level_1, frequency, phase, duty_cycle): def square_wave_fixed_parameters(t): # Phase goes from 0 to 1 (NOT 2 pi) over one period. edge_phase_0_to_1 = duty_cycle wrapped_phases = (frequency * t + phase) % 1.0 # Ensure wrapped_phases is an array. wrapped_phases = np.array(wrapped_phases) # Round phases to avoid issues with numerics. Rounding the phase only # changes the output when the phase is just below a threshold where the # output changes values. So if a phase is just below the threshold where # the output changes state (within PHASE_TOLERANCE), round it up so that # the output does change state there. The value of PHASE_TOLERANCE is # based on the fact that labscript internally rounds all times to # multiples of 0.1 ns. LABSCRIPT_TIME_RESOLUTION = 0.1e-9 # 0.1 ns. MIN_PHASE_STEP = frequency * LABSCRIPT_TIME_RESOLUTION PHASE_TOLERANCE = MIN_PHASE_STEP / 2.0 # Round phases near level_0 -> level_1 transition at phase = # edge_phase_0_to_1. is_near_edge = np.isclose( wrapped_phases, edge_phase_0_to_1, rtol=0, atol=PHASE_TOLERANCE, ) wrapped_phases[is_near_edge] = edge_phase_0_to_1 # Round phases near level_1 -> level_0 transition at phase = 1. is_near_edge = np.isclose( wrapped_phases, 1, rtol=0, atol=PHASE_TOLERANCE, ) wrapped_phases[is_near_edge] = 0 # Initialize array to store output values. outputs = np.full_like(t, level_0) # Use boolean indexing to set output to level_1 at the appropriate # times. For example level_0 for phases [0, 0.5) and level_1 for phases # [0.5, 1.0) when duty_cycle is 0.5. level_1_times = (wrapped_phases >= edge_phase_0_to_1) outputs[level_1_times] = level_1 return outputs return square_wave_fixed_parameters def pulse_sequence(pulse_sequence,period): """Returns a function that interpolates a pulse sequence. Relies on :obj:`numpy.digitize` to perform the interpolation. Args: pulse_sequence (:obj:`numpy:numpy.ndarray`): 2-D timeseries of change times and associated states. period (float): How long, in seconds, to hold the final state before repeating the sequence. Returns: func: Interpolating function that takes a single parameter `t`. Only well defined if `t` falls within the `pulse_sequence` change times. """ pulse_sequence = np.asarray(sorted(pulse_sequence, key=lambda x: x[0], reverse=True)) pulse_sequence_times = pulse_sequence[:, 0] pulse_sequence_states = pulse_sequence[:, 1] def pulse_function(t): try: len(t) is_array = True except TypeError: t = array([t]) is_array = False times = t % period indices = np.digitize(times, pulse_sequence_times, right=False) states = pulse_sequence_states[indices] if is_array: return states else: return states[0] return pulse_function
{"hexsha": "829c3a02bf13610649d01f63417bd8c50959a79a", "size": 8895, "ext": "py", "lang": "Python", "max_stars_repo_path": "labscript/functions.py", "max_stars_repo_name": "restelli/labscript", "max_stars_repo_head_hexsha": "4d263442a4381eec1ff0c628467aa59486681df6", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-02-02T10:29:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-03T04:22:48.000Z", "max_issues_repo_path": "labscript/functions.py", "max_issues_repo_name": "restelli/labscript", "max_issues_repo_head_hexsha": "4d263442a4381eec1ff0c628467aa59486681df6", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 28, "max_issues_repo_issues_event_min_datetime": "2020-05-01T16:46:25.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-16T13:54:44.000Z", "max_forks_repo_path": "labscript/functions.py", "max_forks_repo_name": "philipstarkey/labscript", "max_forks_repo_head_hexsha": "763a0f1422322f322dc3575df5bd3236f80dad8a", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 23, "max_forks_repo_forks_event_min_datetime": "2020-04-01T16:57:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-15T14:12:46.000Z", "avg_line_length": 36.7561983471, "max_line_length": 112, "alphanum_fraction": 0.5976391231, "include": true, "reason": "import numpy", "num_tokens": 2119}
import time import datetime import urllib import pandas from pytz import timezone import numpy import pyopencl as cl import pyopencl.array as cl_array import json from kafka import KafkaProducer def google_finance_data_reader(symbol, interval_seconds, num_days): url_string = "http://www.google.com/finance/getprices?q={symbol}".format(symbol=symbol.upper()) url_string += "&i={interval_seconds}&p={num_days}d&f=d,o,h,l,c,v".format(interval_seconds=interval_seconds, num_days=num_days) # print(url_string) page = urllib.urlopen(url_string) df = pandas.read_csv(page, skiprows=7, sep=',', names=['DATE', 'CLOSE', 'HIGH', 'LOW', 'OPEN', 'VOLUME']) b_date_round = df['DATE'].map(lambda dt: dt[0] == 'a') date_round = df[b_date_round]['DATE'].map(lambda dt: int(dt[1:])) df['DATE2'] = date_round df['DATE2'] = df['DATE2'].fillna(method='ffill') df['DATE3'] = df[~b_date_round]['DATE'].astype(int) * interval_seconds df['DATE3'] = df['DATE3'].fillna(0) df['DATE4'] = df['DATE2'] + df['DATE3'] df['DATE4'] = df['DATE4'].map(lambda s: datetime.datetime.fromtimestamp(int(s), timezone('US/Eastern'))) del df['DATE'] del df['DATE2'] del df['DATE3'] df = df.set_index('DATE4', verify_integrity=True) df.index.name = 'DATE' return df def pyopencl_mean(x_gpu_in): return cl_array.sum(x_gpu_in) / float(x_gpu_in.size) def pyopencl_stddev(x_gpu_in): mean = pyopencl_mean(x_gpu_in).get() element_wise__mean_diff__sq = (x_gpu_in - mean) ** 2 return pyopencl_mean(element_wise__mean_diff__sq) ** 0.5 def publish_market_predictions(predictions): msg = json.dumps(predictions) print msg producer.send("test-ks-predictions", value=msg) producer.flush() ctx = cl.create_some_context() queue = cl.CommandQueue(ctx) producer = KafkaProducer(bootstrap_servers='localhost:9092', value_serializer=str.encode) interval_seconds = 60 num_days = 1 symbols = ["AMZN", "IBM", "MSFT", "CVC", "V", "KO", "BRK-B", "AA", "GS", "SBUX", "QCOM"] # symbols = ["BAC", "C", "IBM", "AAPL", "GE", "T", "MCD", "NKE", "TWTR", "TSLA"] while True: predictions = {} for symbol in symbols: data_frame = google_finance_data_reader(symbol.replace("-", "."), interval_seconds, num_days) prices_latest = data_frame['CLOSE'].tail(30).as_matrix() prices = cl_array.to_device(queue, prices_latest) predictions[symbol] = float(pyopencl_stddev(prices).get()) publish_market_predictions({"Predictions": predictions}) time.sleep(10) # Delay for 10 seconds
{"hexsha": "65cfb81d5a02b7ba20142ca876679ff4105213c8", "size": 2647, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/main/python/google_finance_predictions.py", "max_stars_repo_name": "Sapphirine/201605-45", "max_stars_repo_head_hexsha": "4af4d6934f7abbf3b89383a9946f5b76b0fe29b4", "max_stars_repo_licenses": ["Apache-1.1"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/main/python/google_finance_predictions.py", "max_issues_repo_name": "Sapphirine/201605-45", "max_issues_repo_head_hexsha": "4af4d6934f7abbf3b89383a9946f5b76b0fe29b4", "max_issues_repo_licenses": ["Apache-1.1"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/main/python/google_finance_predictions.py", "max_forks_repo_name": "Sapphirine/201605-45", "max_forks_repo_head_hexsha": "4af4d6934f7abbf3b89383a9946f5b76b0fe29b4", "max_forks_repo_licenses": ["Apache-1.1"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.3766233766, "max_line_length": 111, "alphanum_fraction": 0.6618813751, "include": true, "reason": "import numpy", "num_tokens": 729}
import numpy as np class UncertaintyModel(object): def __init__(self, ratingMatrix): self.ratingMatrix = ratingMatrix def reset(self, seed=None): # Reset the weights as if no training was done pass def save(self, fileName): # Save the model return fileName def load(self, fileName): # Load the model return True def train(self, legalTrainIndices): # Train the weights based on current legalTrainIndices pass #def sampleForUser(self, userIndex, numSamples): def sample_for_user(self, userIndex, numSamples): # Return the samples for a given user of size # (numSamples, numItems) samples = np.ones((numSamples, self.ratingMatrix.shape[1])) return samples
{"hexsha": "2e1b306029397c938587bf8483e6af06e1ee0552", "size": 793, "ext": "py", "lang": "Python", "max_stars_repo_path": "sclrecommender/bandit/model/uncertaintyModel.py", "max_stars_repo_name": "wezteoh/Bandit_Recommendation", "max_stars_repo_head_hexsha": "a326e4d1d082e1a2113fe739bc343fb45b0b8a4a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sclrecommender/bandit/model/uncertaintyModel.py", "max_issues_repo_name": "wezteoh/Bandit_Recommendation", "max_issues_repo_head_hexsha": "a326e4d1d082e1a2113fe739bc343fb45b0b8a4a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sclrecommender/bandit/model/uncertaintyModel.py", "max_forks_repo_name": "wezteoh/Bandit_Recommendation", "max_forks_repo_head_hexsha": "a326e4d1d082e1a2113fe739bc343fb45b0b8a4a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.4333333333, "max_line_length": 67, "alphanum_fraction": 0.6469104666, "include": true, "reason": "import numpy", "num_tokens": 178}
""" postprocessing.py Postprocessing of CDIP files and QC logs. """ import os import json import numpy as np import tqdm from .constants import QC_EXTREME_WAVE_LOG_THRESHOLD def plot_qc(qcfile, outdir, exclude_flags=tuple('cefg'), plot_extreme=True): """Write plots of QC records from given log file to output folder.""" import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt if not set(exclude_flags) <= set('abcdefg'): raise ValueError('exclude_flags can only contain {a, b, c, d, e, f, g}') with open(qcfile, 'r') as f: qc_records = [json.loads(line) for line in f] os.makedirs(outdir, exist_ok=True) basename = os.path.basename(qcfile).split('.')[0] i = 1 for record in tqdm.tqdm(qc_records): qc_passed = len(record['flags_fired']) == 0 process_record = ( not qc_passed and all(flag not in exclude_flags for flag in record['flags_fired']) ) if plot_extreme: process_record |= ( qc_passed and record['relative_wave_height'] > QC_EXTREME_WAVE_LOG_THRESHOLD ) if not process_record: continue mintime, maxtime = np.min(record['time']), np.max(record['time']) # don't plot records with extreme time jumps if maxtime - mintime > 10000 and 'e' in record['flags_fired']: continue elev_range = np.nanmax(np.abs(record['elevation'])) info_left = [ f'Wave height: {record["relative_wave_height"]:.2f} SWH', f'Record start time: {record["start_date"]}', f'Source file: {record["filename"]}', ] info_right = [ f'QC flags fired: {record["flags_fired"]}' if not qc_passed else 'QC passed', ] fig, ax = plt.subplots(1, 1, figsize=(15, 4)) plt.plot(record['time'], record['elevation'], linewidth=0.5) plt.xlim(mintime, maxtime) plt.ylim(-elev_range, elev_range) ax.set_xlabel('Time (s)') ax.set_ylabel('Elevation (m)') ax.set_xticks(np.arange(mintime, maxtime, 10), minor=True) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.text(0.01, 1, '\n'.join(info_left), va='top', ha='left', transform=ax.transAxes) ax.text(0.99, 1, '\n'.join(info_right), va='top', ha='right', transform=ax.transAxes) fig.tight_layout() fig.savefig(os.path.join(outdir, f'{basename}_qc_{i:0>4}.pdf')) plt.close(fig) i += 1 # all blacklisted CDIP deployments that failed visual inspection CDIP_DEPLOYMENT_BLACKLIST = { '045p1': ['d01', 'd02', 'd03', 'd13', 'd15', 'd17', 'd19', 'd21'], '094p1': ['d01', 'd02', 'd03', 'd04', 'd05'], '096p1': ['d04'], '100p1': ['d11'], '106p1': ['d02'], '109p1': ['d05', 'd06'], '111p1': ['d06'], '132p1': ['d01'], '141p1': ['d03'], '142p1': ['d02', 'd15', 'd18'], '144p1': ['d01'], '146p1': ['d01', 'd02'], '157p1': ['d01'], '158p1': ['d02', 'd04'], '162p1': ['d07'], '163p1': ['d01', 'd05'], '167p1': ['d01'], '172p1': ['d01'], '177p1': '*', '196p1': ['d04'], '201p1': ['d03'], '205p1': '*', '206p1': '*', '261p1': '*', '430p1': ['d06'], '431p1': ['d02'], } def apply_mask(ds, dim, mask): """Apply boolean mask along dimension on xarray Dataset.""" if mask.values.all(): return ds idx = np.where(mask.values)[0] return ds.isel(wave_id_local=idx) def remove_blacklisted_cdip(ds): """Remove all records from blacklisted deployments.""" deployment_files = np.unique(ds['meta_source_file_name']) whitelist = list(deployment_files) for f in deployment_files: for station, deployments in CDIP_DEPLOYMENT_BLACKLIST.items(): if station in f: if deployments == '*' or any(d in f for d in deployments): whitelist.remove(f) return ds['meta_source_file_name'].isin(whitelist) def filter_low_swh(ds): """Remove all records with low significant wave heights.""" return ds['sea_state_30m_significant_wave_height_spectral'] > 1.0 def filter_undersampled(ds): """Remove all records that are undersampled.""" nyquist_frequency = 0.5 * ds['meta_sampling_rate'] mean_frequency = 1. / (ds['sea_state_30m_mean_period_spectral'] / np.timedelta64(1, 's')) return 3.2 * mean_frequency < nyquist_frequency def filter_drifting(ds): """Remove all records with excessive low-frequency components.""" return ds['sea_state_30m_rel_energy_in_frequency_interval'].sel(meta_frequency_band=1) > 0.1 def run_postprocessing(ds, num_filtered_dict=None, chunk_size=10_000): """Run all filters on given xarray Dataset. This is a generator that applies filters in chunks to avoid loading whole files. """ if num_filtered_dict is None: num_filtered_dict = {} else: num_filtered_dict.clear() num_records = len(ds['wave_id_local']) filters = { 'low_swh': filter_low_swh, 'undersampled': filter_undersampled, 'drifting': filter_drifting, } if 'CDIP' in ds.meta_station_name.values[0]: filters.update({'blacklist': remove_blacklisted_cdip}) num_filtered_dict.update({f: 0 for f in filters}) chunks = [ slice(i, min(i + chunk_size, num_records)) for i in range(0, num_records, chunk_size) if i < num_records ] for chunk_slice in chunks: dsi = ds.isel(meta_station_name=0, wave_id_local=chunk_slice).load() for name, filter_fun in filters.items(): mask = filter_fun(dsi) dsi = apply_mask(dsi, 'wave_id_local', mask) num_filtered_dict[name] += mask.size - mask.sum().values if len(dsi['wave_id_local']) == 0: dsi = None break yield dsi
{"hexsha": "49c8f407ed9044a391939533cea3c885bd2ee55d", "size": 5971, "ext": "py", "lang": "Python", "max_stars_repo_path": "fowd/postprocessing.py", "max_stars_repo_name": "dionhaefner/FOWD", "max_stars_repo_head_hexsha": "29fd9a6c822c7e01d6e44056e05bcc5dc8d8f53a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-12-02T17:20:54.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-21T10:00:37.000Z", "max_issues_repo_path": "fowd/postprocessing.py", "max_issues_repo_name": "dionhaefner/FOWD", "max_issues_repo_head_hexsha": "29fd9a6c822c7e01d6e44056e05bcc5dc8d8f53a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "fowd/postprocessing.py", "max_forks_repo_name": "dionhaefner/FOWD", "max_forks_repo_head_hexsha": "29fd9a6c822c7e01d6e44056e05bcc5dc8d8f53a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-12-10T14:57:20.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-09T08:16:26.000Z", "avg_line_length": 30.1565656566, "max_line_length": 96, "alphanum_fraction": 0.603249037, "include": true, "reason": "import numpy", "num_tokens": 1636}
#!/usr/bin/env python2 # # Copyright 2018 Obodroid Corporation by Lertlove # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import shutil import pprint fileDir = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.join(fileDir, "..", "..")) from PIL import Image import cv2 import pymongo from pymongo import MongoClient from sklearn.externals import joblib import argparse import imagehash from PIL import Image import numpy as np import StringIO import base64 from sklearn.grid_search import GridSearchCV from sklearn.svm import SVC from sklearn.cluster import DBSCAN import openface modelDir = os.path.join(fileDir, '..', 'models') dlibModelDir = os.path.join(modelDir, 'dlib') openfaceModelDir = os.path.join(modelDir, 'openface') parser = argparse.ArgumentParser() parser.add_argument('--dlibFacePredictor', type=str, help="Path to dlib's face predictor.", default=os.path.join(dlibModelDir, "shape_predictor_68_face_landmarks.dat")) parser.add_argument('--networkModel', type=str, help="Path to Torch network model.", default=os.path.join(openfaceModelDir, 'nn4.small2.v1.t7')) parser.add_argument('--imgDim', type=int, help="Default image dimension.", default=96) parser.add_argument('--cuda', action='store_true') parser.add_argument('--mongoURL', type=str, help="Mongo DB url.", default="192.168.1.243:27017") parser.add_argument('--sourceFolder', type=str, help="Source Folder", default="./tests/captured_images") parser.add_argument('--targetFolder', type=str, help="Target Folder", default="./tests/cluster_images") parser.add_argument('--mode', type=str, help="Function Mode", default="detect") parser.add_argument('--dth', type=float, help="Representation distance threshold", default=0.5) args = parser.parse_args() align = openface.AlignDlib(args.dlibFacePredictor) net = openface.TorchNeuralNet(args.networkModel, imgDim=args.imgDim, cuda=args.cuda) class ClusteringServer: def __init__(self): self.sourceFolder = args.sourceFolder self.targetFolder = args.targetFolder def prepareData(self,path): self.X = [] self.Y = [] for filename in os.listdir(path): if not filename.endswith('.jpg'): continue filepath = os.path.join(path, filename) try: img = Image.open(filepath) except: print("cannot open image file") continue baseFileName = os.path.splitext(os.path.basename(filename))[0] rgbFrame = self.convertImageToRgbFrame(img) bbs = align.getAllFaceBoundingBoxes(rgbFrame) faceInFile=0 for bb in bbs: faceInFile+=1 cropImage = rgbFrame[bb.top():bb.bottom(), bb.left():bb.right()] print("crop image : {}".format(len(cropImage))) if (len(cropImage) > 0) & (bb.left() > 0) & (bb.right() > 0) & (bb.top() > 0) & (bb.bottom() > 0) : cv2.imshow("cropped", cropImage) if cv2.waitKey(1) & 0xFF == ord('q'): return cropFolder = os.path.join(self.targetFolder, "crop") if not os.path.exists(cropFolder): os.makedirs(cropFolder) cropFile = baseFileName+"-"+str(faceInFile)+".jpg" cropPath = os.path.join(cropFolder, cropFile) im = Image.fromarray(cropImage) im.save(cropPath) landmarks = align.findLandmarks(rgbFrame, bb) alignedFace = align.align(args.imgDim, rgbFrame, bb, landmarks=landmarks, landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE) if alignedFace is None: continue phash = str(imagehash.phash(Image.fromarray(alignedFace))) print("phash = "+phash) rep = net.forward(alignedFace) self.X.append(rep) self.Y.append(cropFile) def cluster(self): db = DBSCAN(eps=0.5, min_samples=2).fit(self.X) labels = db.labels_ n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0) print('Estimated number of clusters: %d' % n_clusters_) for index, label in enumerate(labels): filename = self.Y[index] label = "User_" + str(label) print("index - {}, label - {}, filename - {}".format(index,label,filename)) source = os.path.join(self.targetFolder, "crop") source = os.path.join(source, filename) destination = os.path.join(self.targetFolder, label) if not os.path.exists(destination): os.makedirs(destination) destination = os.path.join(destination, filename) shutil.copyfile(source, destination) return None def detect(self): base = os.path.join(self.sourceFolder, "base") foundCnt = 0 for filename in os.listdir(base): if not filename.endswith('.jpg'): continue print("base filename = "+filename) filepath = os.path.join(base, filename) img = Image.open(filepath) rgbFrame = self.convertImageToRgbFrame(img) bb = align.getLargestFaceBoundingBox(rgbFrame) landmarks = align.findLandmarks(rgbFrame, bb) alignedFace = align.align(args.imgDim, rgbFrame, bb, landmarks=landmarks, landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE) if alignedFace is None: continue phash = str(imagehash.phash(Image.fromarray(alignedFace))) baseRep = net.forward(alignedFace) for index, cropFile in enumerate(self.Y): cropFolder = os.path.join(self.targetFolder, "crop") source = os.path.join(cropFolder, cropFile) if os.path.exists(source): print("index = {}, cropFile = {}".format(index,cropFile)) rep = self.X[index] d = baseRep - rep drep = np.dot(d, d) print("Squared l2 distance between representations: {:0.3f}".format(drep)) if drep < args.dth: print("found user") foundCnt = foundCnt+1 destination = os.path.join(self.targetFolder, "found") if not os.path.exists(destination): os.makedirs(destination) destination = os.path.join(destination, cropFile) shutil.move(source, destination) print("foundCnt - {}".format(foundCnt)) def convertImageToRgbFrame(self,img): buf = np.fliplr(np.asarray(img)) rgbFrame = np.zeros((img.height, img.width, 3), dtype=np.uint8) rgbFrame[:, :, 0] = buf[:, :, 2] rgbFrame[:, :, 1] = buf[:, :, 1] rgbFrame[:, :, 2] = buf[:, :, 0] return rgbFrame def main(reactor): clusteringServer = ClusteringServer() print("Clustering people in folder : "+clusteringServer.sourceFolder) clusteringServer.prepareData(clusteringServer.sourceFolder) if args.mode == "cluster": clusteringServer.cluster() else: clusteringServer.detect() if __name__ == '__main__': main(sys.argv)
{"hexsha": "288e48f9c1424820fe202a2ba5e3fecdaa66bf35", "size": 8444, "ext": "py", "lang": "Python", "max_stars_repo_path": "demos/clustering.py", "max_stars_repo_name": "obodroid/openface", "max_stars_repo_head_hexsha": "39866481103fd405fa59bd47488f4a56070b0eac", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "demos/clustering.py", "max_issues_repo_name": "obodroid/openface", "max_issues_repo_head_hexsha": "39866481103fd405fa59bd47488f4a56070b0eac", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "demos/clustering.py", "max_forks_repo_name": "obodroid/openface", "max_forks_repo_head_hexsha": "39866481103fd405fa59bd47488f4a56070b0eac", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.2744186047, "max_line_length": 115, "alphanum_fraction": 0.5753197537, "include": true, "reason": "import numpy", "num_tokens": 1755}
import tensorflow as tf import numpy as np from vizdoom import DoomGame import random import time from skimage import transform from collections import deque import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' def create_environment(): game = DoomGame() game.load_config("basic.cfg") game.set_doom_scenario_path("basic.wad") game.init() left = [1, 0, 0] right = [0, 1, 0] shoot = [0, 0, 1] possible_actions = [left, right, shoot] return game, possible_actions def test_environment(): game, actions = create_environment() episodes = 1 for _ in range(episodes): game.new_episode() while not game.is_episode_finished(): state = game.get_state() img = state.screen_buffer # 当前游戏画面, 2D array misc = state.game_variables # [50.] action = random.choice(actions) reward = game.make_action(action) print(action, 'reward:', reward) time.sleep(0.02) print('[*] Result:', game.get_total_reward()) time.sleep(2) game.close() def preprocess_frame(state): cropped_frame = state[30:-10, 30:-30] normalized_frame = cropped_frame / 255.0 preprocessed_frame = transform.resize(normalized_frame, [84, 84]) return preprocessed_frame def stack_states(stacked_frames, state): frame = preprocess_frame(state) stacked_frames.append(frame) stacked_state = np.stack(stacked_frames, axis=2) return stacked_state class build_DQNetwork: def __init__(self, state_size, action_size, learning_rate, name='DQNetwork'): self.state_size = state_size self.action_size = action_size self.learning_rate = learning_rate with tf.variable_scope(name): # 84x84x4 self.inputs = tf.placeholder(tf.float32, [None, *state_size], name='inputs') self.actions = tf.placeholder(tf.float32, [None, action_size], name='actions') self.target_Q = tf.placeholder(tf.float32, [None], name='target') # 20x20x32 self.conv1 = tf.layers.conv2d(inputs = self.inputs, filters = 32, kernel_size = [8, 8], strides = [4, 4], padding = 'VALID', kernel_initializer = tf.contrib.layers.xavier_initializer_conv2d(), name = 'conv1') self.conv1_batchnorm = tf.layers.batch_normalization(self.conv1, training = True, epsilon = 1e-5, name = 'batch_norm1') self.conv1_out = tf.nn.elu(self.conv1_batchnorm, name='conv1_out') # 9x9x64 self.conv2 = tf.layers.conv2d(inputs = self.conv1_out, filters = 64, kernel_size = [4, 4], strides = [2, 2], padding = 'VALID', kernel_initializer = tf.contrib.layers.xavier_initializer_conv2d(), name = 'conv2') self.conv2_batchnorm = tf.layers.batch_normalization(self.conv2, training = True, epsilon = 1e-5, name = 'batch_norm2') self.conv2_out = tf.nn.elu(self.conv2_batchnorm, name='conv2_out') # 3x3x128 self.conv3 = tf.layers.conv2d(inputs = self.conv2_out, filters = 128, kernel_size = [4, 4], strides = [2, 2], padding = 'VALID', kernel_initializer = tf.contrib.layers.xavier_initializer_conv2d(), name = 'conv3') self.conv3_batchnorm = tf.layers.batch_normalization(self.conv3, training = True, epsilon = 1e-5, name = 'batch_norm3') self.conv3_out = tf.nn.elu(self.conv3_batchnorm, name='conv3_out') # 1152 self.flatten = tf.layers.flatten(self.conv3_out) # 512 self.fc = tf.layers.dense(inputs = self.flatten, units = 512, activation = tf.nn.elu, kernel_initializer = tf.contrib.layers.xavier_initializer(), name = 'fc1') # 3 self.output = tf.layers.dense(inputs = self.fc, units = 3, activation = None, kernel_initializer = tf.contrib.layers.xavier_initializer(), name = 'output') # Q is our predicted Q value self.Q = tf.reduce_sum(tf.multiply(self.output, self.actions), axis=1) # # The loss is the difference between our predicted Q and the Q_target self.loss = tf.reduce_mean(tf.square(self.target_Q - self.Q)) self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate).minimize(self.loss) class Memory(): def __init__(self, max_size): self.buffer = deque(maxlen=max_size) def add(self, experience): self.buffer.append(experience) def sample(self, batch_size): buffer_size = len(self.buffer) index = np.random.choice(np.arange(buffer_size), size=batch_size, replace=False) return [self.buffer[i] for i in index] def train(): game, possible_actions = create_environment() # Set Hyperparameters ##################### state_size = [84, 84, 4] action_size = game.get_available_buttons_size() learning_rate = 0.0002 total_episodes = 5000 max_steps = 100 batch_size = 64 explore_max = 1.0 explore_min = 0.01 decay_rate = 0.0001 gamma = 0.99 pretrain_length = batch_size memory_size = 50000 stack_size = 4 stacked_frames = deque([np.zeros((84, 84), dtype=np.int) for i in range(stack_size)], maxlen=stack_size) memory = Memory(max_size=memory_size) ##################### # make pretrain samples ########################################### game.new_episode() for i in range(pretrain_length): if i == 0: state = game.get_state().screen_buffer state = stack_states(stacked_frames, state) action = random.choice(possible_actions) reward = game.make_action(action) done = game.is_episode_finished() if done: next_state = np.zeros(state.shape) memory.add((state, action, reward, next_state, done)) game.new_episode() else: next_state = game.get_state().screen_buffer next_state = stack_states(stacked_frames, next_state) memory.add((state, action, reward, next_state, done)) state = next_state ########################################### # train deep Q neural network ########################################### tf.reset_default_graph() DQNetwork = build_DQNetwork(state_size, action_size, learning_rate) writer = tf.summary.FileWriter('train_log') tf.summary.scalar('loss', DQNetwork.loss) saver = tf.train.Saver() rewards_list = [] decay_step = 0 game.init() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for episode in range(total_episodes): game.new_episode() step = 0 frame = game.get_state().screen_buffer state = stack_states(stacked_frames, frame) while step < max_steps: step += 1 decay_step += 1 exp_exp_tradeoff = np.random.rand() explore_probability = explore_min + (explore_max - explore_min) * np.exp(-decay_rate * decay_step) if explore_probability > exp_exp_tradeoff: action = random.choice(possible_actions) else: Qs = sess.run(DQNetwork.output, feed_dict = {DQNetwork.inputs: state.reshape(1, *state.shape)}) action = possible_actions[int(np.argmax(Qs))] reward = game.make_action(action) done = game.is_episode_finished() if done: next_state = np.zeros((84, 84), dtype=np.int) next_state = stack_states(stacked_frames, next_state) total_reward = game.get_total_reward() formated_str = 'Episode: {}, Total reward: {}, Training loss: {:.4f}, Explore P: {:.4f}' print(formated_str.format(episode, total_reward, loss, explore_probability)) rewards_list.append((episode, total_reward)) memory.add((state, action, reward, next_state, done)) step = max_steps else: next_state = game.get_state().screen_buffer next_state = stack_states(stacked_frames, next_state) memory.add((state, action, reward, next_state, done)) state = next_state # train DQNetwork == update Qtable batch = memory.sample(batch_size) states = np.array([each[0] for each in batch], ndmin=3) actions = np.array([each[1] for each in batch]) rewards = np.array([each[2] for each in batch]) next_states = np.array([each[3] for each in batch]) dones = np.array([each[4] for each in batch]) target_Qs_batch = [] target_Qs = sess.run(DQNetwork.output, feed_dict = {DQNetwork.inputs: next_states}) for i in range(batch_size): terminal = dones[i] if terminal: target_Qs_batch.append(rewards[i]) else: target = rewards[i] + gamma * np.max(target_Qs[i]) target_Qs_batch.append(target) targets = np.array([each for each in target_Qs_batch]) loss, _ = sess.run([DQNetwork.loss, DQNetwork.optimizer], feed_dict={DQNetwork.inputs: states, DQNetwork.target_Q: targets, DQNetwork.actions: actions}) # Write TF Summaries summary = sess.run(tf.summary.merge_all(), feed_dict={DQNetwork.inputs: states, DQNetwork.target_Q: targets, DQNetwork.actions: actions}) writer.add_summary(summary, episode) writer.flush() if episode % 5 == 0: save_path = saver.save(sess, './model/model.ckpt') print('[*] Model Saved:', save_path) print('Train done') ########################################### def play(): with tf.Session() as sess: state_size = [84, 84, 4] action_size = 3 learning_rate = 0.0002 DQNetwork = build_DQNetwork(state_size, action_size, learning_rate) saver = tf.train.Saver() saver.restore(sess, "./model/model.ckpt") game, possible_actions = create_environment() totalScore = 0 episodes = 10 stack_size = 4 stacked_frames = deque([np.zeros((84, 84), dtype=np.int) for i in range(stack_size)], maxlen=stack_size) for i in range(episodes): game.new_episode() while not game.is_episode_finished(): frame = game.get_state().screen_buffer state = stack_states(stacked_frames, frame) Qs = sess.run(DQNetwork.output, feed_dict={DQNetwork.inputs: state.reshape((1, *state.shape))}) action = possible_actions[int(np.argmax(Qs))] game.make_action(action) score = game.get_total_reward() print("Episode {} Score: {}".format(i, score)) totalScore += score print("[*] Average Score: ", totalScore / episodes) game.close() if __name__ == '__main__': import sys if sys.argv[1] == '--train': train() elif sys.argv[1] == '--play': play()
{"hexsha": "428ceb94fb3f902a9c40eab1da6d8fcde6c7f38b", "size": 13009, "ext": "py", "lang": "Python", "max_stars_repo_path": "DQN/Doom/DQN_Doom.py", "max_stars_repo_name": "Urinx/ReinforcementLearning", "max_stars_repo_head_hexsha": "40c00b8297503e127c6c8134a8becffb81b676e4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 76, "max_stars_repo_stars_event_min_datetime": "2018-06-04T01:05:01.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-27T08:35:40.000Z", "max_issues_repo_path": "DQN/Doom/DQN_Doom.py", "max_issues_repo_name": "slightlyLLL/ReinforcementLearning", "max_issues_repo_head_hexsha": "40c00b8297503e127c6c8134a8becffb81b676e4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "DQN/Doom/DQN_Doom.py", "max_forks_repo_name": "slightlyLLL/ReinforcementLearning", "max_forks_repo_head_hexsha": "40c00b8297503e127c6c8134a8becffb81b676e4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 17, "max_forks_repo_forks_event_min_datetime": "2018-06-04T02:58:44.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-26T08:44:40.000Z", "avg_line_length": 37.598265896, "max_line_length": 115, "alphanum_fraction": 0.5161042355, "include": true, "reason": "import numpy", "num_tokens": 2623}
module DryRun using Mocking export @dryer include("dryer.jl") # Create the initial definition of `activated` which defaults DryRun to be disabled activated() = false """ DryRun.activate() Enable `@mock` call sites to allow for calling patches instead of the original function. """ function activate() # Avoid redefining `activated` when it's already set appropriately !activated() && @eval activated() = true return nothing end """ DryRun.deactivate() Disable `@mock` call sites to only call the original function. """ function deactivate() # Avoid redefining `activated` when it's already set appropriately activated() && @eval activated() = false return nothing end const NULLIFIED = Ref{Bool}(false) """ DryRun.nullify() Force any packages loaded after this point to treat the `@mock` macro as a no-op. Doing so will maximize performance by eliminating any runtime checks taking place at the `@mock` call sites but will break any tests that require patches to be applied. Note to ensure that all `@mock` macros are inoperative be sure to call this function before loading any packages which depend on DryRun.jl. """ function nullify() global NULLIFIED[] = true return nothing end end # module
{"hexsha": "81dca054df0262ad0f0e04201b71d6d6d916e162", "size": 1254, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/DryRun.jl", "max_stars_repo_name": "samuel-massinon/DryRun.jl", "max_stars_repo_head_hexsha": "1d6435dd609f9331ad3a9f34bde1734f007a7958", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/DryRun.jl", "max_issues_repo_name": "samuel-massinon/DryRun.jl", "max_issues_repo_head_hexsha": "1d6435dd609f9331ad3a9f34bde1734f007a7958", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/DryRun.jl", "max_forks_repo_name": "samuel-massinon/DryRun.jl", "max_forks_repo_head_hexsha": "1d6435dd609f9331ad3a9f34bde1734f007a7958", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.6603773585, "max_line_length": 92, "alphanum_fraction": 0.7312599681, "num_tokens": 288}
import tensorflow as tf import numpy as np import matplotlib.pyplot as plt import pandas as pd from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler import seaborn as sns import time import datetime plt.style.use("seaborn-colorblind") data4 = pd.read_csv("C:/Users/VARUN/Desktop/AirPollution/Dataset/cpcb_dly_aq_andhra_pradesh-2004.csv")[['Sampling Date', 'City/Town/Village/Area', 'SO2', 'NO2']] data6 = pd.read_csv("C:/Users/VARUN/Desktop/AirPollution/Dataset/cpcb_dly_aq_andhra_pradesh-2006.csv")[['Sampling Date', 'City/Town/Village/Area', 'SO2', 'NO2']] data7 = pd.read_csv("C:/Users/VARUN/Desktop/AirPollution/Dataset/cpcb_dly_aq_andhra_pradesh-2007.csv")[['Sampling Date', 'City/Town/Village/Area', 'SO2', 'NO2']] data9 = pd.read_csv("C:/Users/VARUN/Desktop/AirPollution/Dataset/cpcb_dly_aq_andhra_pradesh-2009.csv")[['Sampling Date', 'City/Town/Village/Area', 'SO2', 'NO2']] data11 = pd.read_csv("C:/Users/VARUN/Desktop/AirPollution/Dataset/cpcb_dly_aq_andhra_pradesh-2011.csv")[['Sampling Date', 'City/Town/Village/Area', 'SO2', 'NO2']] data14 = pd.read_csv("C:/Users/VARUN/Desktop/AirPollution/Dataset/cpcb_dly_aq_telangana-2014.csv")[['Sampling Date', 'City/Town/Village/Area', 'SO2', 'NO2']] data = pd.concat([data4, data6, data7, data9, data11, data14], axis=0) data = data.reset_index(drop=True) #print(data.head()) for i in range(len(data)): if data['City/Town/Village/Area'][i] != 'Hyderabad': data.drop(i, inplace = True) for i in range(len(data)): try: data['Sampling Date'][i] = time.mktime(datetime.datetime.strptime(data['Sampling Date'][i], "%d/%m/%Y").timetuple()) except: pass data = data[['Sampling Date', 'SO2', 'NO2']] print(data.head()) no2 = data[['Sampling Date', 'NO2']] forecast_len = 30 x=np.array(no2.drop(['NO2'],1)) x=x[:-forecast_len] y=np.array(no2['NO2']) y=y[:-forecast_len] sc = MinMaxScaler(feature_range=(0,1)) x = sc.fit_transform(x) x_train = [] y_train = [] for i in range(3,500): x_train.append(x[i-3:i,0]) y_train.append(x[i,0]) # x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2) x_train,y_train = np.array(x_train),np.array(y_train) x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1)) model = tf.keras.models.Sequential([ tf.keras.layers.LSTM(128,return_sequences=True , input_shape = (x_train.shape[1],1)), tf.keras.layers.Dropout(0.2), tf.keras.layers.LSTM(512,return_sequences=True), tf.keras.layers.Dropout(0.2), tf.keras.layers.LSTM(512,return_sequences=True), tf.keras.layers.Dropout(0.2), tf.keras.layers.LSTM(512,return_sequences=True), tf.keras.layers.Dropout(0.2), tf.keras.layers.LSTM(256,return_sequences=True), tf.keras.layers.Dropout(0.1), tf.keras.layers.LSTM(128,return_sequences=True), tf.keras.layers.Dropout(0.1), tf.keras.layers.LSTM(64), tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(1) ]) model.compile(optimizer = 'adam', loss = 'mean_squared_error', metrics=['accuracy']) model.summary() history = model.fit(x_train,y_train,epochs=15,batch_size=3) predictions = model.predict(x_train) #print(predictions) plt.plot(range(len(x_train)), y_train, c='g') plt.plot(range(len(x_train)), predictions, c='r') plt.legend(['Green-Train', 'Red-Predictions'], loc='upper left') plt.show()
{"hexsha": "c53b7bacc1c18f1804d10413c2675b375eba7d2d", "size": 3386, "ext": "py", "lang": "Python", "max_stars_repo_path": "Air-Pollution-Levels-Exploratory-Data-Analysis-master/Test.py", "max_stars_repo_name": "varuntotakura/AirPollutionAnalysis", "max_stars_repo_head_hexsha": "ec8a1df747c018b13856a346c8236adc8268c6bb", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Air-Pollution-Levels-Exploratory-Data-Analysis-master/Test.py", "max_issues_repo_name": "varuntotakura/AirPollutionAnalysis", "max_issues_repo_head_hexsha": "ec8a1df747c018b13856a346c8236adc8268c6bb", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Air-Pollution-Levels-Exploratory-Data-Analysis-master/Test.py", "max_forks_repo_name": "varuntotakura/AirPollutionAnalysis", "max_forks_repo_head_hexsha": "ec8a1df747c018b13856a346c8236adc8268c6bb", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.4772727273, "max_line_length": 162, "alphanum_fraction": 0.7096869462, "include": true, "reason": "import numpy", "num_tokens": 982}
import string import operator as op from functools import reduce import numpy as np from . import nodal_corrections as nc class BaseConstituent(object): xdo_int = { 'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5, 'F': 6, 'G': 7, 'H': 8, 'I': 9, 'J': 10, 'K': 11, 'L': 12, 'M': 13, 'N': 14, 'O': 15, 'P': 16, 'Q': 17, 'R': -8, 'S': -7, 'T': -6, 'U': -5, 'V': -4, 'W': -3, 'X': -2, 'Y': -1, 'Z': 0 } int_xdo = {v:k for k, v in xdo_int.items()} def __init__(self, name, xdo='', coefficients=[], u=nc.u_zero, f=nc.f_unity): if xdo == '': self.coefficients = np.array(coefficients) else: self.coefficients = np.array(self.xdo_to_coefficients(xdo)) self.name = name self.u = u self.f = f def xdo_to_coefficients(self, xdo): return [self.xdo_int[l.upper()] for l in xdo if l in string.ascii_letters] def coefficients_to_xdo(self, coefficients): return ''.join([self.int_xdo[c] for c in cooefficients]) def V(self, astro): return np.dot(self.coefficients, self.astro_values(astro)) def xdo(self): return self.coefficients_to_xdo(self.coefficients) def speed(self, a): return np.dot(self.coefficients, self.astro_speeds(a)) def astro_xdo(self, a): return [a['T+h-s'], a['s'], a['h'], a['p'], a['N'], a['pp'], a['90']] def astro_speeds(self, a): return np.array([each.speed for each in self.astro_xdo(a)]) def astro_values(self, a): return np.array([each.value for each in self.astro_xdo(a)]) #Consider two out of phase constituents which travel at the same speed to #be identical def __eq__(self, c): return np.all(self.coefficients[:-1] == c.coefficients[:-1]) def __hash__(self): return hash(tuple(self.coefficients[:-1])) class CompoundConstituent(BaseConstituent): def __init__(self, members = [], **kwargs): self.members = members if 'u' not in kwargs: kwargs['u'] = self.u if 'f' not in kwargs: kwargs['f'] = self.f super(CompoundConstituent,self).__init__(**kwargs) self.coefficients = reduce(op.add,[c.coefficients * n for (c,n) in members]) def speed(self, a): return reduce(op.add, [n * c.speed(a) for (c,n) in self.members]) def V(self, a): return reduce(op.add, [n * c.V(a) for (c,n) in self.members]) def u(self, a): return reduce(op.add, [n * c.u(a) for (c,n) in self.members]) def f(self, a): return reduce(op.mul, [c.f(a) ** abs(n) for (c,n) in self.members]) ###### Base Constituents #Long Term _Z0 = BaseConstituent(name = 'Z0', xdo = 'Z ZZZ ZZZ', u = nc.u_zero, f = nc.f_unity) _Sa = BaseConstituent(name = 'Sa', xdo = 'Z ZAZ ZZZ', u = nc.u_zero, f = nc.f_unity) _Ssa = BaseConstituent(name = 'Ssa', xdo = 'Z ZBZ ZZZ', u = nc.u_zero, f = nc.f_unity) _Mm = BaseConstituent(name = 'Mm', xdo = 'Z AZY ZZZ', u = nc.u_zero, f = nc.f_Mm) _Mf = BaseConstituent(name = 'Mf', xdo = 'Z BZZ ZZZ', u = nc.u_Mf, f = nc.f_Mf) #Diurnals _Q1 = BaseConstituent(name = 'Q1', xdo = 'A XZA ZZA', u = nc.u_O1, f = nc.f_O1) _O1 = BaseConstituent(name = 'O1', xdo = 'A YZZ ZZA', u = nc.u_O1, f = nc.f_O1) _K1 = BaseConstituent(name = 'K1', xdo = 'A AZZ ZZY', u = nc.u_K1, f = nc.f_K1) _J1 = BaseConstituent(name = 'J1', xdo = 'A BZY ZZY', u = nc.u_J1, f = nc.f_J1) #M1 is a tricky business for reasons of convention, rather than theory. The #reasons for this are best summarised by Schureman paragraphs 126, 127 and in #the comments found in congen_input.txt of xtides, so I won't go over all this #again here. _M1 = BaseConstituent(name = 'M1', xdo = 'A ZZZ ZZA', u = nc.u_M1, f = nc.f_M1) _P1 = BaseConstituent(name = 'P1', xdo = 'A AXZ ZZA', u = nc.u_zero, f = nc.f_unity) _S1 = BaseConstituent(name = 'S1', xdo = 'A AYZ ZZZ', u = nc.u_zero, f = nc.f_unity) _OO1 = BaseConstituent(name = 'OO1', xdo = 'A CZZ ZZY', u = nc.u_OO1, f = nc.f_OO1) #Semi-Diurnals _2N2 = BaseConstituent(name = '2N2', xdo = 'B XZB ZZZ', u = nc.u_M2, f = nc.f_M2) _N2 = BaseConstituent(name = 'N2', xdo = 'B YZA ZZZ', u = nc.u_M2, f = nc.f_M2) _nu2 = BaseConstituent(name = 'nu2', xdo = 'B YBY ZZZ', u = nc.u_M2, f = nc.f_M2) _M2 = BaseConstituent(name = 'M2', xdo = 'B ZZZ ZZZ', u = nc.u_M2, f = nc.f_M2) _lambda2 = BaseConstituent(name = 'lambda2', xdo = 'B AXA ZZB', u = nc.u_M2, f = nc.f_M2) _L2 = BaseConstituent(name = 'L2', xdo = 'B AZY ZZB', u = nc.u_L2, f = nc.f_L2) _T2 = BaseConstituent(name = 'T2', xdo = 'B BWZ ZAZ', u = nc.u_zero, f = nc.f_unity) _S2 = BaseConstituent(name = 'S2', xdo = 'B BXZ ZZZ', u = nc.u_zero, f = nc.f_unity) _R2 = BaseConstituent(name = 'R2', xdo = 'B BYZ ZYB', u = nc.u_zero, f = nc.f_unity) _K2 = BaseConstituent(name = 'K2', xdo = 'B BZZ ZZZ', u = nc.u_K2, f = nc.f_K2) #Third-Diurnals _M3 = BaseConstituent(name = 'M3', xdo = 'C ZZZ ZZZ', u = lambda a: nc.u_Modd(a,3), f = lambda a: nc.f_Modd(a,3)) ###### Compound Constituents #Long Term _MSF = CompoundConstituent(name = 'MSF', members = [(_S2, 1), (_M2, -1)]) #Diurnal _2Q1 = CompoundConstituent(name = '2Q1', members = [(_N2, 1), (_J1, -1)]) _rho1 = CompoundConstituent(name = 'rho1', members = [(_nu2, 1), (_K1, -1)]) #Semi-Diurnal _mu2 = CompoundConstituent(name = 'mu2', members = [(_M2, 2), (_S2, -1)]) #2MS2 _2SM2 = CompoundConstituent(name = '2SM2', members = [(_S2, 2), (_M2, -1)]) #Third-Diurnal _2MK3 = CompoundConstituent(name = '2MK3', members = [(_M2, 1), (_O1, 1)]) _MK3 = CompoundConstituent(name = 'MK3', members = [(_M2, 1), (_K1, 1)]) #Quarter-Diurnal _MN4 = CompoundConstituent(name = 'MN4', members = [(_M2, 1), (_N2, 1)]) _M4 = CompoundConstituent(name = 'M4', members = [(_M2, 2)]) _MS4 = CompoundConstituent(name = 'MS4', members = [(_M2, 1), (_S2, 1)]) _S4 = CompoundConstituent(name = 'S4', members = [(_S2, 2)]) #Sixth-Diurnal _M6 = CompoundConstituent(name = 'M6', members = [(_M2, 3)]) _S6 = CompoundConstituent(name = 'S6', members = [(_S2, 3)]) #Eighth-Diurnals _M8 = CompoundConstituent(name = 'M8', members = [(_M2, 4)]) noaa = [ _M2, _S2, _N2, _K1, _M4, _O1, _M6, _MK3, _S4, _MN4, _nu2, _S6, _mu2, _2N2, _OO1, _lambda2, _S1, _M1, _J1, _Mm, _Ssa, _Sa, _MSF, _Mf, _rho1, _Q1, _T2, _R2, _2Q1, _P1, _2SM2, _M3, _L2, _2MK3, _K2, _M8, _MS4 ]
{"hexsha": "37e7274b06f65f4c44d14501457c9363744cdefd", "size": 6351, "ext": "py", "lang": "Python", "max_stars_repo_path": "pytides/constituent.py", "max_stars_repo_name": "yudevan/pytides", "max_stars_repo_head_hexsha": "507f2bc5d19fa5e427045cc2bf9ed724daf67f0c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pytides/constituent.py", "max_issues_repo_name": "yudevan/pytides", "max_issues_repo_head_hexsha": "507f2bc5d19fa5e427045cc2bf9ed724daf67f0c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pytides/constituent.py", "max_forks_repo_name": "yudevan/pytides", "max_forks_repo_head_hexsha": "507f2bc5d19fa5e427045cc2bf9ed724daf67f0c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.2037037037, "max_line_length": 123, "alphanum_fraction": 0.6049441033, "include": true, "reason": "import numpy", "num_tokens": 2464}
import numpy as np import math import time from .base import try_gpu, Timer, Accumulator from .figure import set_figsize, plt, Animator from .data import data_iter_consecutive, data_iter_random from .model import linreg import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from torch.autograd import Variable __all__ = ['evaluate_loss', 'train', 'train_ch10', 'train_2d','evaluate_accuracy', 'squared_loss', 'grad_clipping', 'sgd', 'train_and_predict_rnn', 'train_ch3', 'train_ch5','SequenceMask','MaskedSoftmaxCELoss','train_ch7', 'translate_ch7', 'to_onehot' , 'predict_rnn', 'train_and_predict_rnn_nn', 'predict_rnn_nn', 'grad_clipping_nn'] def evaluate_loss(net, data_iter, loss): """Evaluate the loss of a model on the given dataset""" metric = Accumulator(2) # sum_loss, num_examples for X, y in data_iter: metric.add(loss(net(X), y).sum().detach().numpy().item(), list(y.shape)[0]) return metric[0] / metric[1] def evaluate_accuracy(data_iter, net, device=torch.device('cpu')): """Evaluate accuracy of a model on the given data set.""" net.eval() # Switch to evaluation mode for Dropout, BatchNorm etc layers. acc_sum, n = torch.tensor([0], dtype=torch.float32, device=device), 0 for X, y in data_iter: # Copy the data to device. X, y = X.to(device), y.to(device) with torch.no_grad(): y = y.long() acc_sum += torch.sum((torch.argmax(net(X), dim=1) == y)) n += y.shape[0] return acc_sum.item()/n def squared_loss(y_hat, y): """Squared loss.""" return (y_hat - y.view(y_hat.shape)).pow(2) / 2 def grad_clipping(params, theta, device): """Clip the gradient.""" norm = torch.tensor([0], dtype=torch.float32, device=device) for param in params: norm += (param.grad ** 2).sum() norm = norm.sqrt().item() if norm > theta: for param in params: param.grad.data.mul_(theta / norm) def grad_clipping_nn(model, theta, device): """Clip the gradient for a nn model.""" grad_clipping(model.parameters(), theta, device) def sgd(params, lr, batch_size): """Mini-batch stochastic gradient descent.""" for param in params: param.data.sub_(lr*param.grad/batch_size) param.grad.data.zero_() def train_and_predict_rnn(rnn, get_params, init_rnn_state, num_hiddens, corpus_indices, vocab, device, is_random_iter, num_epochs, num_steps, lr, clipping_theta, batch_size, prefixes): """Train an RNN model and predict the next item in the sequence.""" if is_random_iter: data_iter_fn = data_iter_random else: data_iter_fn = data_iter_consecutive params = get_params() loss = nn.CrossEntropyLoss() start = time.time() for epoch in range(num_epochs): if not is_random_iter: # If adjacent sampling is used, the hidden state is initialized # at the beginning of the epoch state = init_rnn_state(batch_size, num_hiddens, device) l_sum, n = 0.0, 0 data_iter = data_iter_fn(corpus_indices, batch_size, num_steps, device) for X, Y in data_iter: if is_random_iter: # If random sampling is used, the hidden state is initialized # before each mini-batch update state = init_rnn_state(batch_size, num_hiddens, device) else: # Otherwise, the detach function needs to be used to separate # the hidden state from the computational graph to avoid # backpropagation beyond the current sample for s in state: s.detach_() inputs = to_onehot(X, len(vocab)) # outputs is num_steps terms of shape (batch_size, len(vocab)) (outputs, state) = rnn(inputs, state) # After stitching it is (num_steps * batch_size, len(vocab)) outputs = torch.cat(outputs, dim=0) # The shape of Y is (batch_size, num_steps), and then becomes # a vector with a length of batch * num_steps after # transposition. This gives it a one-to-one correspondence # with output rows y = Y.t().reshape((-1,)) # Average classification error via cross entropy loss l = loss(outputs, y.long()).mean() l.backward() with torch.no_grad(): grad_clipping(params, clipping_theta, device) # Clip the gradient sgd(params, lr, 1) # Since the error is the mean, no need to average gradients here l_sum += l.item() * y.numel() n += y.numel() if (epoch + 1) % 50 == 0: print('epoch %d, perplexity %f, time %.2f sec' % ( epoch + 1, math.exp(l_sum / n), time.time() - start)) start = time.time() if (epoch + 1) % 100 == 0: for prefix in prefixes: print(' -', predict_rnn(prefix, 50, rnn, params, init_rnn_state, num_hiddens, vocab, device)) def train(train_iter, test_iter, net, loss, optimizer, device, num_epochs): net = net.to(device) print("training on ", device) batch_count = 0 for epoch in range(num_epochs): train_l_sum, train_acc_sum, n, start = 0.0, 0.0, 0, time.time() for X, y in train_iter: X = X.to(device) y = y.to(device) y_hat = net(X) l = loss(y_hat, y) optimizer.zero_grad() l.backward() optimizer.step() train_l_sum += l.cpu().item() train_acc_sum += (y_hat.argmax(dim=1) == y).sum().cpu().item() n += y.shape[0] batch_count += 1 test_acc = evaluate_accuracy(test_iter, net) print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f, time %.1f sec' % (epoch + 1, train_l_sum / batch_count, train_acc_sum / n, test_acc, time.time() - start)) def train_ch3(net, train_iter, test_iter, criterion, num_epochs, batch_size, lr=None): """Train and evaluate a model with CPU.""" optimizer = optim.SGD(net.parameters(), lr=lr) for epoch in range(num_epochs): train_l_sum, train_acc_sum, n = 0.0, 0.0, 0 for X, y in train_iter: optimizer.zero_grad() y_hat = net(X) loss = criterion(y_hat, y) loss.backward() optimizer.step() y = y.type(torch.float32) train_l_sum += loss.item() train_acc_sum += torch.sum((torch.argmax(y_hat, dim=1).type(torch.FloatTensor) == y).detach()).float() n += list(y.size())[0] test_acc = evaluate_accuracy(test_iter, net) print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f'\ % (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc)) def train_ch5(net, train_iter, test_iter, criterion, num_epochs, batch_size, device, lr=None): """Train and evaluate a model with CPU or GPU.""" print('training on', device) net.to(device) optimizer = optim.SGD(net.parameters(), lr=lr) for epoch in range(num_epochs): train_l_sum = torch.tensor([0.0], dtype=torch.float32, device=device) train_acc_sum = torch.tensor([0.0], dtype=torch.float32, device=device) n, start = 0, time.time() for X, y in train_iter: net.train() optimizer.zero_grad() X, y = X.to(device), y.to(device) y_hat = net(X) loss = criterion(y_hat, y) loss.backward() optimizer.step() with torch.no_grad(): y = y.long() train_l_sum += loss.float() train_acc_sum += (torch.sum((torch.argmax(y_hat, dim=1) == y))).float() n += y.shape[0] test_acc = evaluate_accuracy(test_iter, net, device) print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f, ' 'time %.1f sec' % (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc, time.time() - start)) def SequenceMask(X, X_len,value=0): maxlen = X.size(1) mask = torch.arange(maxlen)[None, :].to(X_len.device) < X_len[:, None] X[~mask]=value return X class MaskedSoftmaxCELoss(nn.CrossEntropyLoss): def forward(self, pred, label, valid_length): # the sample weights shape should be (batch_size, seq_len) weights = torch.ones_like(label) weights = SequenceMask(weights, valid_length).float() self.reduction='none' output=super(MaskedSoftmaxCELoss, self).forward(pred.transpose(1,2), label) return (output*weights).mean(dim=1) def train_ch7(model, data_iter, lr, num_epochs, device): model.to(device) optimizer = optim.Adam(model.parameters(), lr=lr) loss = MaskedSoftmaxCELoss() tic = time.time() for epoch in range(1, num_epochs + 1): l_sum, num_tokens_sum = 0.0, 0.0 for batch in data_iter: optimizer.zero_grad() X, X_vlen, Y, Y_vlen = [x.to(device) for x in batch] Y_input, Y_label, Y_vlen = Y[:, :-1], Y[:, 1:], Y_vlen - 1 Y_hat, _ = model(X, Y_input, X_vlen, Y_vlen) l = loss(Y_hat, Y_label, Y_vlen).sum() l.backward() with torch.no_grad(): grad_clipping_nn(model, 5, device) num_tokens = Y_vlen.sum().item() optimizer.step() l_sum += l.sum().item() num_tokens_sum += num_tokens if epoch % 50 == 0: print("epoch {0:4d},loss {1:.3f}, time {2:.1f} sec".format( epoch, (l_sum / num_tokens_sum), time.time() - tic)) tic = time.time() def translate_ch7(model, src_sentence, src_vocab, tgt_vocab, max_len, device): """Translate based on an encoder-decoder model with greedy search.""" src_tokens = src_vocab[src_sentence.lower().split(' ')] src_len = len(src_tokens) if src_len < max_len: src_tokens += [src_vocab.pad] * (max_len - src_len) enc_X = torch.tensor(src_tokens, device=device) enc_valid_length = torch.tensor([src_len], device=device) # use expand_dim to add the batch_size dimension. enc_outputs = model.encoder(enc_X.unsqueeze(dim=0), enc_valid_length) dec_state = model.decoder.init_state(enc_outputs, enc_valid_length) dec_X = torch.tensor([tgt_vocab.bos], device=device).unsqueeze(dim=0) predict_tokens = [] for _ in range(max_len): Y, dec_state = model.decoder(dec_X, dec_state) # The token with highest score is used as the next time step input. dec_X = Y.argmax(dim=2) py = dec_X.squeeze(dim=0).int().item() if py == tgt_vocab.eos: break predict_tokens.append(py) return ' '.join(tgt_vocab.to_tokens(predict_tokens)) def to_onehot(X,size): return F.one_hot(X.long().transpose(0,-1), size) def predict_rnn(prefix, num_chars, rnn, params, init_rnn_state, num_hiddens, vocab, device): """Predict next chars with an RNN model""" state = init_rnn_state(1, num_hiddens, device) output = [vocab[prefix[0]]] for t in range(num_chars + len(prefix) - 1): # The output of the previous time step is taken as the input of the # current time step. X = to_onehot(torch.tensor([output[-1]], dtype=torch.float32, device=device), len(vocab)) # Calculate the output and update the hidden state (Y, state) = rnn(X, state, params) # The input to the next time step is the character in the prefix or # the current best predicted character if t < len(prefix) - 1: # Read off from the given sequence of characters output.append(vocab[prefix[t + 1]]) else: # This is maximum likelihood decoding. Modify this if you want # use sampling, beam search or beam sampling for better sequences. output.append(int(Y[0].argmax(dim=1).item())) return ''.join([vocab.idx_to_token[i] for i in output]) def predict_rnn_nn(prefix, num_chars, batch_size, num_hiddens, num_layers, model, vocab, device): """Predict next chars with a RNN model.""" # Use the model's member function to initialize the hidden state state = model.begin_state(num_hiddens=num_hiddens, device=device, num_layers=num_layers) output = [vocab[prefix[0]]] for t in range(num_chars + len(prefix) - 1): X = torch.tensor([output[-1]], dtype=torch.float32, device=device).reshape((1, 1)) # Forward computation does not require incoming model parameters (Y, state) = model(X, state) if t < len(prefix) - 1: output.append(vocab[prefix[t + 1]]) else: output.append(int(Y.argmax(dim=1).item())) return ''.join([vocab.idx_to_token[i] for i in output]) def train_and_predict_rnn_nn(model, num_hiddens, init_gru_state, corpus_indices, vocab, device, num_epochs, num_steps, lr, clipping_theta, batch_size, prefixes, num_layers=1): """Train a RNN model and predict the next item in the sequence.""" loss = nn.CrossEntropyLoss() optm = torch.optim.SGD(model.parameters(), lr=lr) start = time.time() for epoch in range(1, num_epochs+1): l_sum, n = 0.0, 0 data_iter = data_iter_consecutive( corpus_indices, batch_size, num_steps, device) state = model.begin_state(batch_size=batch_size, num_hiddens=num_hiddens, device=device ,num_layers=num_layers) for X, Y in data_iter: for s in state: s.detach() X = X.to(dtype=torch.long) (output, state) = model(X, state) y = Y.t().reshape((-1,)) l = loss(output, y.long()).mean() optm.zero_grad() l.backward(retain_graph=True) with torch.no_grad(): # Clip the gradient grad_clipping_nn(model, clipping_theta, device) # Since the error has already taken the mean, the gradient does # not need to be averaged optm.step() l_sum += l.item() * y.numel() n += y.numel() if epoch % (num_epochs // 4) == 0: print('epoch %d, perplexity %f, time %.2f sec' % ( epoch, math.exp(l_sum / n), time.time() - start)) start = time.time() if epoch % (num_epochs // 2) == 0: for prefix in prefixes: print(' -', predict_rnn_nn(prefix, 50, batch_size, num_hiddens, num_layers, model, vocab, device)) def train_2d(trainer): """Optimize a 2-dim objective function with a customized trainer.""" # s1 and s2 are internal state variables and will # be used later in the chapter x1, x2, s1, s2 = -5, -2, 0, 0 results = [(x1, x2)] for i in range(20): x1, x2, s1, s2 = trainer(x1, x2, s1, s2) results.append((x1, x2)) print('epoch %d, x1 %f, x2 %f' % (i + 1, x1, x2)) return results def train_ch10(trainer, hyperparams, data_iter, feature_dim, num_epochs=2): # Initialization w1 = np.random.normal(scale=0.01, size=(feature_dim, 1)) b1 = np.zeros(1) w = Variable(torch.from_numpy(w1), requires_grad=True) b = Variable(torch.from_numpy(b1), requires_grad=True) if trainer.__name__ == 'SGD': optimizer = trainer([w, b], lr=hyperparams['lr'], momentum=hyperparams['momentum']) elif trainer.__name__ == 'RMSprop': optimizer = trainer([w, b], lr=hyperparams['lr'], alpha=hyperparams['gamma']) net, loss = lambda X: linreg(X, w, b), squared_loss # Train animator = Animator(xlabel='epoch', ylabel='loss', xlim=[0, num_epochs], ylim=[0.22, 0.35]) n, timer = 0, Timer() for _ in range(num_epochs): for X, y in data_iter: X, y = Variable(X), Variable(y) optimizer.zero_grad() output = net(X) l = loss(output, y).mean() l.backward() optimizer.step() n += X.shape[0] if n % 200 == 0: timer.stop() animator.add(n/X.shape[0]/len(data_iter), evaluate_loss(net, data_iter, loss)) timer.start() print('loss: %.3f, %.3f sec/epoch'%(animator.Y[0][-1], timer.avg())) # return timer.cumsum(), animator.Y[0]
{"hexsha": "070d94176a01972f335ba956f04be70a63318d70", "size": 16714, "ext": "py", "lang": "Python", "max_stars_repo_path": "d2l/train.py", "max_stars_repo_name": "cingtiye/d2l-pytorch", "max_stars_repo_head_hexsha": "9152fa5309fd48ac06b85e961ca13d44a425a647", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-03-18T13:53:53.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-27T15:22:00.000Z", "max_issues_repo_path": "d2l/train.py", "max_issues_repo_name": "cingtiye/d2l-pytorch", "max_issues_repo_head_hexsha": "9152fa5309fd48ac06b85e961ca13d44a425a647", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "d2l/train.py", "max_forks_repo_name": "cingtiye/d2l-pytorch", "max_forks_repo_head_hexsha": "9152fa5309fd48ac06b85e961ca13d44a425a647", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.0773195876, "max_line_length": 119, "alphanum_fraction": 0.5910015556, "include": true, "reason": "import numpy", "num_tokens": 4195}
/* Copyright 2014-2015 Glen Joseph Fernandes (glenjofe@gmail.com) Distributed under the Boost Software License, Version 1.0. (http://www.boost.org/LICENSE_1_0.txt) */ #include <boost/align/alignment_of.hpp> #include <boost/align/is_aligned.hpp> #include <boost/core/lightweight_test.hpp> #include <boost/config.hpp> template<std::size_t N> struct A { }; template<std::size_t N> void test(char* p, A<N>) { BOOST_TEST(boost::alignment::is_aligned(p, N)); BOOST_TEST(!boost::alignment::is_aligned(p + 1, N)); } void test(char* p, A<1>) { BOOST_TEST(boost::alignment::is_aligned(p, 1)); } template<class T> void test() { T o; test(reinterpret_cast<char*>(&o), A<boost::alignment::alignment_of<T>::value>()); } class X; int main() { test<bool>(); test<char>(); test<wchar_t>(); #if !defined(BOOST_NO_CXX11_CHAR16_T) test<char16_t>(); #endif #if !defined(BOOST_NO_CXX11_CHAR32_T) test<char32_t>(); #endif test<short>(); test<int>(); test<long>(); #if !defined(BOOST_NO_LONG_LONG) && !defined(_MSC_VER) test<long long>(); #endif test<float>(); #if !defined(BOOST_MSVC) test<double>(); test<long double>(); #endif test<void*>(); test<char*>(); test<int*>(); test<X*>(); test<void(*)()>(); #if !defined(BOOST_MSVC) test<int X::*>(); test<int(X::*)()>(); #endif return boost::report_errors(); }
{"hexsha": "13d330f5a9ecb3d9e26c6d8cbad2612e5e2fcb51", "size": 1472, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "REDSI_1160929_1161573/boost_1_67_0/libs/align/test/is_aligned_test.cpp", "max_stars_repo_name": "Wultyc/ISEP_1718_2A2S_REDSI_TrabalhoGrupo", "max_stars_repo_head_hexsha": "eb0f7ef64e188fe871f47c2ef9cdef36d8a66bc8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 32.0, "max_stars_repo_stars_event_min_datetime": "2019-02-27T06:57:07.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-29T10:56:19.000Z", "max_issues_repo_path": "REDSI_1160929_1161573/boost_1_67_0/libs/align/test/is_aligned_test.cpp", "max_issues_repo_name": "Wultyc/ISEP_1718_2A2S_REDSI_TrabalhoGrupo", "max_issues_repo_head_hexsha": "eb0f7ef64e188fe871f47c2ef9cdef36d8a66bc8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2019-03-04T11:21:00.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-24T01:36:31.000Z", "max_forks_repo_path": "REDSI_1160929_1161573/boost_1_67_0/libs/align/test/is_aligned_test.cpp", "max_forks_repo_name": "Wultyc/ISEP_1718_2A2S_REDSI_TrabalhoGrupo", "max_forks_repo_head_hexsha": "eb0f7ef64e188fe871f47c2ef9cdef36d8a66bc8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5.0, "max_forks_repo_forks_event_min_datetime": "2019-08-20T13:45:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-01T18:23:49.000Z", "avg_line_length": 20.4444444444, "max_line_length": 59, "alphanum_fraction": 0.6039402174, "num_tokens": 384}
tmpdir/fdpic-shared.so: file format elf32-(little|big)arm DYNAMIC RELOCATION RECORDS OFFSET TYPE VALUE .* R_ARM_FUNCDESC_VALUE my_shared_func3
{"hexsha": "22b755e51b61c14f8d164cb42375545f852c5ec3", "size": 168, "ext": "r", "lang": "R", "max_stars_repo_path": "contrib/gnu/gdb/dist/ld/testsuite/ld-arm/fdpic-shared.r", "max_stars_repo_name": "TheSledgeHammer/2.11BSD", "max_stars_repo_head_hexsha": "fe61f0b9aaa273783cd027c7b5ec77e95ead2153", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-05-04T17:09:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-04T07:19:26.000Z", "max_issues_repo_path": "ld/testsuite/ld-arm/fdpic-shared.r", "max_issues_repo_name": "greyblue9/binutils-gdb", "max_issues_repo_head_hexsha": "05377632b124fe7600eea7f4ee0e9a35d1b0cbdc", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ld/testsuite/ld-arm/fdpic-shared.r", "max_forks_repo_name": "greyblue9/binutils-gdb", "max_forks_repo_head_hexsha": "05377632b124fe7600eea7f4ee0e9a35d1b0cbdc", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.6666666667, "max_line_length": 61, "alphanum_fraction": 0.7142857143, "num_tokens": 46}
import torch import numpy as np from sklearn.metrics import precision_recall_fscore_support from torch import nn, Tensor, optim from typing import Tuple, Optional from src.tasks import OmniTask from src.utils.data import OmniDataset from argparse import Namespace class ClassificationTask(OmniTask): def __init__( self, name: str, args: Namespace, pad_token_id: int, eos_token_id: Optional[int] ): super(ClassificationTask, self).__init__(name, args) self.pad_token_id = pad_token_id self.eos_token_id = eos_token_id @classmethod def get_loss_fn( cls, reduction='none', ignore_index=-1 ): return nn.CrossEntropyLoss( reduction=reduction, ignore_index=ignore_index ) @classmethod def compute_correct( cls, logits: Tensor, labels: Tensor, **kwargs ) -> Tuple[Tensor, int]: with torch.no_grad(): pred_idx = logits.argmax(1) n_correct = pred_idx.eq(labels).sum().item() return pred_idx, n_correct def train( self, model: nn.Module, optimizer: optim.Optimizer, scheduler: optim.lr_scheduler.LambdaLR, dataloader: OmniDataset, device, **kwargs ) -> Tuple[float, float]: model.train() optimizer.zero_grad() loss_fn = self.get_loss_fn() total_loss = 0 n_pred_total = 0 n_pred_correct = 0 steps = 0 for batch_idx, batch in enumerate(dataloader): batch = tuple(t.to(device) for t in batch) src_seq_t, label_t = batch with torch.set_grad_enabled(True): logits_t, *_ = model(src_seq_t) loss_t = loss_fn(logits_t, label_t) loss_t = loss_t.mean(-1) if self.args.gradient_accumulation_steps > 1: # scale the loss if gradient accumulation is used loss_t = loss_t / self.args.gradient_accumulation_steps loss_t.backward() nn.utils.clip_grad_norm(model.parameters(), self.args.max_grad_norm) # accumulate the gradients if batch_idx % self.args.gradient_accumulation_steps == 0: optimizer.step() scheduler.step() optimizer.zero_grad() # update metrics steps += 1 pred_t, n_correct = self.compute_correct(logits_t, label_t) total_loss += loss_t.item() n_pred_total += label_t.size(0) n_pred_correct += n_correct if batch_idx % 100: torch.cuda.empty_cache() print(f"batch : {batch_idx}") # TODO: add gradual unfreeze if (steps / self.args.gradient_accumulation_steps) == self.args.steps_per_epoch: break steps /= self.args.gradient_accumulation_steps total_loss = total_loss / steps accuracy = n_pred_correct / n_pred_total self.global_step += int(steps) return total_loss, accuracy @classmethod def eval( cls, model: nn.Module, dataloader: OmniDataset, device, **kwargs ): model.eval() loss_fn = cls.get_loss_fn() total_loss = 0 n_pred_total = 0 n_pred_correct = 0 steps = 0 preds = [] labels = [] for batch_idx, batch in enumerate(dataloader): batch = tuple(t.to(device) for t in batch) src_seq_t, labels_t = batch with torch.set_grad_enabled(False): logits_t, *_ = model(src_seq_t) loss_t = loss_fn(logits_t, labels_t).mean(-1).item() pred_t, n_correct = cls.compute_correct(logits_t, labels_t) preds.append(pred_t.detach_().cpu().numpy()) labels.append(labels_t.detach_().cpu().numpy()) total_loss += loss_t n_pred_total += labels_t.size(0) n_pred_correct += n_correct steps += 1 total_loss /= steps accuracy = n_pred_correct / n_pred_total labels = np.concatenate(labels) preds = np.concatenate(preds) prec, rec, f_score, _ = precision_recall_fscore_support( labels, preds, average="macro" ) scores = dict( loss=total_loss, accuracy=accuracy, precision=prec, recall=rec, f_score=f_score ) return scores
{"hexsha": "bcc50c11a4499ad89fc1b7b22aa7b1b6ecf30232", "size": 4778, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/tasks/classification.py", "max_stars_repo_name": "andompesta/omnitext", "max_stars_repo_head_hexsha": "da6467b6cd9086b2278f7a1560596261f125800e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/tasks/classification.py", "max_issues_repo_name": "andompesta/omnitext", "max_issues_repo_head_hexsha": "da6467b6cd9086b2278f7a1560596261f125800e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/tasks/classification.py", "max_forks_repo_name": "andompesta/omnitext", "max_forks_repo_head_hexsha": "da6467b6cd9086b2278f7a1560596261f125800e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.1341463415, "max_line_length": 92, "alphanum_fraction": 0.55169527, "include": true, "reason": "import numpy", "num_tokens": 996}
C$Procedure LBUPD ( Line buffer, update ) SUBROUTINE LBUPD_1 ( NLINE, NCOM, PTRS ) IMPLICIT NONE C$ Abstract C C Update internal information in a line buffer. C C$ Disclaimer C C THIS SOFTWARE AND ANY RELATED MATERIALS WERE CREATED BY THE C CALIFORNIA INSTITUTE OF TECHNOLOGY (CALTECH) UNDER A U.S. C GOVERNMENT CONTRACT WITH THE NATIONAL AERONAUTICS AND SPACE C ADMINISTRATION (NASA). THE SOFTWARE IS TECHNOLOGY AND SOFTWARE C PUBLICLY AVAILABLE UNDER U.S. EXPORT LAWS AND IS PROVIDED "AS-IS" C TO THE RECIPIENT WITHOUT WARRANTY OF ANY KIND, INCLUDING ANY C WARRANTIES OF PERFORMANCE OR MERCHANTABILITY OR FITNESS FOR A C PARTICULAR USE OR PURPOSE (AS SET FORTH IN UNITED STATES UCC C SECTIONS 2312-2313) OR FOR ANY PURPOSE WHATSOEVER, FOR THE C SOFTWARE AND RELATED MATERIALS, HOWEVER USED. C C IN NO EVENT SHALL CALTECH, ITS JET PROPULSION LABORATORY, OR NASA C BE LIABLE FOR ANY DAMAGES AND/OR COSTS, INCLUDING, BUT NOT C LIMITED TO, INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, C INCLUDING ECONOMIC DAMAGE OR INJURY TO PROPERTY AND LOST PROFITS, C REGARDLESS OF WHETHER CALTECH, JPL, OR NASA BE ADVISED, HAVE C REASON TO KNOW, OR, IN FACT, SHALL KNOW OF THE POSSIBILITY. C C RECIPIENT BEARS ALL RISK RELATING TO QUALITY AND PERFORMANCE OF C THE SOFTWARE AND ANY RELATED MATERIALS, AND AGREES TO INDEMNIFY C CALTECH AND NASA FOR ALL THIRD-PARTY CLAIMS RESULTING FROM THE C ACTIONS OF RECIPIENT IN THE USE OF THE SOFTWARE. C C$ Required_Reading C C CB, LB C C$ Keywords C C ASCII C CHARACTER C STRING C TEXT C C$ Declarations INTEGER LBCELL PARAMETER ( LBCELL = -5 ) INTEGER NLINE INTEGER NCOM INTEGER PTRS ( LBCELL:* ) C$ Brief_I/O C C Variable I/O Description C -------- --- -------------------------------------------------- C NLINE I Number of lines stored in the buffer. C NCOM I Number of complement intervals in the buffer. C PTRS I,O Pointer compnent of the buffer. C C$ Detailed_Input C C NLINE is the number of lines stored in the buffer, as C the result of some change. C C NCOM is the number of complement intervals in the buffer, C as the result of the same change. C C PTRS is the pointer component of a line buffer. C C$ Detailed_Output C C PTRS is the updated pointer component of a line buffer. C C$ Files C C None. C C$ Exceptions C C 1) The error 'SPICE(LBCORRUPTED)' is signalled whenever any C of the following conditions is detected. C C -- NLINE is less than zero. C C -- NCOM is less than one. C C -- The sum of NLINE and NCOM is greater than the maximum C number of lines that can be stored in the buffer. C C$ Particulars C C LBUPD is are provided for use by the LB routines in SPICELIB, and C should not be called directly except by those routines. C C$ Examples C C LBUPD is used by LBINS and LBREM. C C$ Restrictions C C None. C C$ Literature_References C C None. C C$ Author_and_Institution C C Dagny Taggart, (JPL) C C$ Version C C- Beta Version 1.0.0, 19-JAN-1989 (DT) C C-& C C SPICELIB functions C LOGICAL RETURN INTEGER SIZEI C C Standard error handling. C IF ( RETURN() ) THEN RETURN ELSE CALL CHKIN ( 'LBUPD_1' ) IF ( NLINE .LT. 0 . .OR. NCOM .LT. 1 . .OR. ( NLINE + NCOM ) * 2 .GT. SIZEI ( PTRS ) ) THEN CALL SETMSG ( 'Tried to store # lines, # holes.' ) CALL ERRINT ( '#', NLINE ) CALL ERRINT ( '#', NCOM ) CALL SIGERR ( 'SPICE(LBCORRUPTED)' ) CALL CHKOUT ( 'LBUPD_1' ) RETURN END IF END IF C C Save the current number of lines in element -2. We can infer the C cardinality of the cell from the total number of intervals. C PTRS(-2) = NLINE CALL SCARDI ( 2 * ( NLINE + NCOM ), PTRS ) CALL CHKOUT ( 'LBUPD_1' ) RETURN END
{"hexsha": "a7d19fe737d25c09a7fb4e62a2a206df665e3388", "size": 4385, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "source/nasa_f/lbupd_1.f", "max_stars_repo_name": "agforero/FTFramework", "max_stars_repo_head_hexsha": "6caf0bc7bae8dc54a62da62df37e852625f0427d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-08-19T21:43:50.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-20T02:57:25.000Z", "max_issues_repo_path": "source/nasa_f/lbupd_1.f", "max_issues_repo_name": "agforero/fortran-testing-framework", "max_issues_repo_head_hexsha": "6caf0bc7bae8dc54a62da62df37e852625f0427d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-08-07T21:17:16.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-09T02:18:07.000Z", "max_forks_repo_path": "source/nasa_f/lbupd_1.f", "max_forks_repo_name": "agforero/fortran-testing-framework", "max_forks_repo_head_hexsha": "6caf0bc7bae8dc54a62da62df37e852625f0427d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-31T08:41:53.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T08:41:53.000Z", "avg_line_length": 27.5786163522, "max_line_length": 71, "alphanum_fraction": 0.6031927024, "num_tokens": 1219}
""" An implementation of a general zero-knowledge proof protocl for claims in NP WARNING:: DO NOT USE THIS IN ANY SECURITY-CRITICAL CODE. This code has not been tested and probably has many security vulnerabilities. In particular, it use sage's default random number generator, which probably is not suitable for cryptographic use. Example Usage: Generate a random set of quadratic equations over `F_2` and a prover for them:: sage: instance, prover = make_random_instance(20, 10) (This creates a set of 10 quadratic equations over 20 variables.) Create an agent to interact with the prover:: sage: verifier = Verifier(instance) Conduct the verification protocol once (1/4 chance of catching cheaters). sage: verifier.interact_once(prover) True Repeat the testing protocol 100 times (if the prover is not genuine, it would a chance of less than 10^(-12) of getting away): sage: verifier.interact(prover, 100) True """ import hashlib import random from sage.rings.finite_rings.integer_mod_ring import IntegerModRing from sage.modules.free_module import VectorSpace from sage.quadratic_forms.quadratic_form import QuadraticForm from sage.structure.sage_object import dumps ZZ2 = IntegerModRing(2) def hash_sage_object(x): return hashlib.sha512(dumps(x)).digest() class Instance(object): def __init__(self, n, quad_forms, results): self.quad_forms = quad_forms self.domain_space = VectorSpace(ZZ2, n) self.range_space = VectorSpace(ZZ2, len(results)) self.results = self.range_space(results) def __call__(self, vector): result = [] for q in self.quad_forms: result.append(q(vector)) return self.range_space(result) def partial_map(self, x, y): result = [] for q in self.quad_forms: result.append(q(x + y) - q(x)) return self.range_space(result) def check(self, solution): return self(solution) == self.results class Prover(object): def __init__(self, instance, solution): if not instance.check(solution): raise ValueError, "The prover must be given a valid solution" self.instance = instance self.quad_forms = instance.quad_forms self.domain_space = instance.domain_space self.range_space = instance.range_space self.results = instance.results self.solution = self.domain_space(solution) self.step = 0 def step0(self): if self.step != 0: raise Exception, "Wrong step" self.step = self.step + 1 random = self.domain_space.random_element x = [] x.append(random()) x.append(random()) x.append(self.solution - x[0] - x[1]) self.x = x random = self.range_space.random_element c = [] c.append(random()) c.append(random()) c.append(-c[0] - c[1]) self.c = tuple(c) self.r = [] for i in range(3): self.r.append(c[i] + self.instance.partial_map(x[i], x[i-1])) result = [] result.append(hash_sage_object(self.c)) for i in range(3): result.append(hash_sage_object(self.x[i])) result.append(hash_sage_object(self.r[i])) return result def step1(self, i): if self.step != 1: raise Exception, "Wrong step" self.step = 0 if i < 3: return (self.x[i], self.x[i-1], self.c, self.r[i]) elif i == 3: return self.r else: raise ValueError, "Challenge %d mus be a number from 0 to 3" % i def bilinear_map(self, x, y): result = [] for q in self.quad_forms: result.append(q(x + y) - q(x)) return self.range_space(result) class Verifier(object): def __init__(self, instance): self.instance = instance def interact(self, prover, ntimes): for _ in range(ntimes): if not self.interact_once(prover): return False return True def interact_once(self, prover): h = prover.step0() hc = h[0] hx = h[1:7:2] hr = h[2:8:2] i = random.randint(0,3) if i < 3: xi, xim, c, ri = prover.step1(i) return hash_sage_object(xi) == hx[i] and \ hash_sage_object(xim) == hx[i-1] and \ hash_sage_object(c) == hc and \ hash_sage_object(ri) == hr[i] and \ xi in self.instance.domain_space and \ xim in self.instance.domain_space and \ c[0] in self.instance.range_space and \ c[0] + c[1] + c[2] == 0 and \ self.instance.partial_map(xi, xim) + c[i] == ri else: r = prover.step1(i) l = 0 for j in range(3): if hash_sage_object(r[j]) != hr[j] or \ r[j] not in self.instance.range_space: return False l = l + r[j] return l == self.instance.results def make_random_instance(n, m): solution = VectorSpace(ZZ2, n).random_element() results = [] quad_forms = [] for _ in range(m): e = [] for _ in range(n*(n+1)/2): e.append(ZZ2.random_element()) quad_form = QuadraticForm(ZZ2, n, e) quad_forms.append(quad_form) results.append(quad_form(solution)) instance = Instance(n, quad_forms, results) prover = Prover(instance, solution) return (instance, prover)
{"hexsha": "d34dc2648d16b7ccecd48a0324c806fdf030f2dc", "size": 5595, "ext": "py", "lang": "Python", "max_stars_repo_path": "zero_knowledge.py", "max_stars_repo_name": "itaibn/zero-knowledge", "max_stars_repo_head_hexsha": "4a270ebb9baf1163006eaef104799072aaec7d84", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2016-10-02T02:18:20.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-07T01:47:06.000Z", "max_issues_repo_path": "zero_knowledge.py", "max_issues_repo_name": "itaibn/zero-knowledge", "max_issues_repo_head_hexsha": "4a270ebb9baf1163006eaef104799072aaec7d84", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "zero_knowledge.py", "max_forks_repo_name": "itaibn/zero-knowledge", "max_forks_repo_head_hexsha": "4a270ebb9baf1163006eaef104799072aaec7d84", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2017-07-28T14:11:08.000Z", "max_forks_repo_forks_event_max_datetime": "2018-07-02T13:45:06.000Z", "avg_line_length": 30.7417582418, "max_line_length": 80, "alphanum_fraction": 0.5948168007, "include": true, "reason": "from sage", "num_tokens": 1350}
# coding=utf-8 import cv2 import numpy as np import pygame import time import trigger_email #if you get error while importing the google how to install <Package Name> in python 3.6 THRESHOLD = 40 camera = cv2.VideoCapture(0) es = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (9,4)) kernel = np.ones((5,5), np.uint8) background = None # Write test video fps = 2 #camera.get(cv2.CAP_PROP_FPS) pygame.mixer.init() cameraSound = pygame.mixer.Sound("snapshotsound.ogg") size = (int(camera.get(cv2.CAP_PROP_FRAME_WIDTH)), int(camera.get(cv2.CAP_PROP_FRAME_HEIGHT))) videoWriter = cv2.VideoWriter('C:\\basic_motion_detection.avi', cv2.VideoWriter_fourcc('D', 'I', 'V', 'X'), fps, size) count = 3 while (True): while count > 0 : time.sleep(1) count -= 1 print(str(count) + "...") ret, frame = camera.read() # The first frame as the background if background is None: background = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) background = cv2.GaussianBlur(background, (21,21), 0) continue gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray_frame = cv2.GaussianBlur(gray_frame, (21,21), 0) # Compare the difference between each frame of image and the background #print(background.shape, gray_frame.shape) diff = cv2.absdiff(background, gray_frame) diff = cv2.threshold(diff, THRESHOLD, 255, cv2.THRESH_BINARY)[1] diff = cv2.dilate(diff, es, iterations=2) # Calculate the outline of the target in the image image, cnts, hierarchy = cv2.findContours(diff.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) print ("Detecting " + str(len(cnts)) + " Moving Objects") if len(cnts) > 0: cameraSound.play() trigger_email.send_email() for c in cnts: if cv2.contourArea(c) < 1500: continue # Calculate the bounding box (x, y, w, h) = cv2.boundingRect(c) cv2.rectangle(frame, (x,y), (x+w, y+h), (0,255,0), 2) cv2.imshow("contours", frame) videoWriter.write(frame) cv2.imshow("dif", diff) # cv2.imwrite('didff.jpg', diff) if cv2.waitKey(int(1000/12)) &0xff == ord('q'): break cv2.destroyAllWindows() camera.release()
{"hexsha": "293637e8d9c302076ba4f81fa0c4c5a573ea2a98", "size": 2321, "ext": "py", "lang": "Python", "max_stars_repo_path": "PythonCode/Windows.py", "max_stars_repo_name": "omkaryadav/UltimateSecurityCam", "max_stars_repo_head_hexsha": "7241c4c568bc3f2d530f79fd6b0f25f5920606ae", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "PythonCode/Windows.py", "max_issues_repo_name": "omkaryadav/UltimateSecurityCam", "max_issues_repo_head_hexsha": "7241c4c568bc3f2d530f79fd6b0f25f5920606ae", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PythonCode/Windows.py", "max_forks_repo_name": "omkaryadav/UltimateSecurityCam", "max_forks_repo_head_hexsha": "7241c4c568bc3f2d530f79fd6b0f25f5920606ae", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.2361111111, "max_line_length": 89, "alphanum_fraction": 0.6363636364, "include": true, "reason": "import numpy", "num_tokens": 631}
from df_compare import df_compare import pandas as pd import numpy as np import datetime import pytest import warnings import logging logging.basicConfig(level=logging.WARNING) warnings.filterwarnings(action='ignore', category=pd.core.common.SettingWithCopyWarning) @pytest.fixture(scope='session') def base_dict(): """Simple dict to make DataFrames from. Has 4 dtypes""" d = { 'i': [0, 1, 2], 'f': [0.0, np.nan, 2.0], 'd': pd.Series(['2018-01-01', '2019-01-01', '2020-01-01'], dtype='datetime64[ns]'), 's': ['0', '1', '2'], 'b': [False, True, True], } return d @pytest.fixture(scope='session') def base_df(base_dict): """Simple DataFrame to make tests from. Has 4 dtypes""" return pd.DataFrame(base_dict) @pytest.fixture def character_set(): """ Characters of comparison. All of the keys that may show up in df_compare.""" return {'rows', 'columns', 'dtypes', 'index', 'int', 'bool', 'float', 'datetime', 'object', 'nan', } def test_nrows(base_df, character_set): """ Test number of rows when same, different, and its description""" # Match df_obs = base_df.copy() diffs = df_compare(df_obs=df_obs, df_exp=base_df) assert 'rows' not in diffs # Duplicate values in index df_obs2 = pd.concat([df_obs, base_df]) diffs2 = df_compare(df_obs=df_obs2, df_exp=base_df) assert 'rows' in diffs2 assert 'index' in diffs2 assert not diffs2['complete'] assert diffs2['rows'] == f'rows differ: df_obs has {len(df_obs2)}. df_exp has {len(base_df)}' # Extra rows, but some match index values df_obs3 = df_obs2.reset_index(drop=True) diffs3 = df_compare(df_obs=df_obs3, df_exp=base_df) assert 'rows' in diffs3 assert 'index' in diffs3 assert 'int' not in diffs3 assert all([key not in diffs for key in character_set - {'rows', 'index'}]) assert diffs3['complete'] def test_columns(base_df, character_set): df_obs = base_df.copy() df_obs['s_copy'] = df_obs['s'] # Add a column diffs = df_compare(df_obs=df_obs, df_exp=base_df) assert all([key not in diffs for key in character_set - {'columns'}]) assert diffs['columns'] == f"columns are different: 1 total. (first few) not_in_obs: []. not_in_exp: ['s_copy']." def test_dtypes(base_df, character_set): df_obs = base_df.copy() df_obs['s'] = df_obs['s'].astype(int) # Switch type diffs = df_compare(df_obs=df_obs, df_exp=base_df) assert all([key not in diffs for key in character_set - {'dtypes'}]) assert diffs.get('dtypes') is not None def test_index(base_df, character_set): df_exp = base_df.copy() df_exp = df_exp.set_index('s') df_obs = base_df.copy() df_obs['s'].iloc[1] = '3' df_obs = df_obs.set_index('s') diffs = df_compare(df_obs=df_obs, df_exp=df_exp) assert all([key not in diffs for key in character_set - {'index'}]) assert diffs.get('index') is not None assert '3' in diffs['index'] def test_ints(base_df, character_set): """Test comparison of integers.""" df_obs = base_df.copy() df_obs['i'].iloc[:2] = [3, 2] # Change a couple values diffs = df_compare(df_obs=df_obs, df_exp=base_df) assert all([key not in diffs for key in character_set - {'int'}]) assert diffs.get('int') is not None assert isinstance(diffs['int'], str) def test_bools(base_df, character_set): df_obs = base_df.copy() df_obs['b'].iloc[:2] = [True, True] # Change a couple values diffs = df_compare(df_obs=df_obs, df_exp=base_df) assert all([key not in diffs for key in character_set - {'bool'}]) assert diffs.get('bool') is not None assert isinstance(diffs['bool'], str) def test_floats(base_df, character_set): df_obs = base_df.copy() df_obs['f'].iloc[2] = 2.5 diffs = df_compare(df_obs=df_obs, df_exp=base_df) assert all([key not in diffs for key in character_set - {'float'}]) assert 'floats are different:' in diffs.get('float') def test_strings(base_df, character_set): """TODO This naive test does not test non-string objects, nor does it handle StringDtype of pandas 1.0""" df_obs = base_df.copy() df_obs['s'].iloc[:2] = ['foo', 'bar'] # Change a couple values diffs = df_compare(df_obs=df_obs, df_exp=base_df) assert all([key not in diffs for key in character_set - {'object'}]) assert diffs.get('object') is not None assert isinstance(diffs['object'], str) def test_dates(base_df, character_set): """Test calendar dates. Time and zone are tossed away""" df_obs = base_df.copy() df_obs['d'] = pd.Series(['2017-12-31', '2019-01-01', '2020-01-01'], dtype='datetime64[ns]') diffs = df_compare(df_obs=df_obs, df_exp=base_df) assert all([key not in diffs for key in character_set - {'datetime'}]) assert 'datetime' in diffs df_obs.loc[0, 'd'] = pd.NaT df_exp = base_df.copy() df_exp.loc[0, 'd'] = pd.NaT diffs = df_compare(df_obs=df_obs, df_exp=df_exp) assert 'datetime' not in diffs def test_nan(base_df, character_set): # No difference when values are different, but nan is same place df_obs = base_df.copy() df_obs['f'] = [10, np.nan, 10.0] diffs = df_compare(df_obs=df_obs, df_exp=base_df) assert diffs['complete'] is True assert all([key not in diffs for key in character_set - {'float'}]) assert 'nan' not in diffs # Differences, plus NaT, and optionally Nullable Integer df_exp = base_df.copy() if pd.__version__ >= '1.0': df_exp['Int64'] = pd.array([0, 1, None]) df_obs['Int64'] = pd.array([0, 1, 2], dtype=pd.Int64Dtype()) df_obs.loc[0, 'd'] = pd.NaT diffs = df_compare(df_obs=df_obs, df_exp=df_exp) assert all([key not in diffs for key in character_set - {'nan', 'float', 'datetime'}]) assert 'float' in diffs assert 'nan' in diffs assert 'datetime' in diffs assert 'nans are different:' in diffs.get('nan')
{"hexsha": "ed56c54d41aa57ef9a62aaf98de26a9a0c71897e", "size": 5959, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/test_core.py", "max_stars_repo_name": "caseyclements/df_compare", "max_stars_repo_head_hexsha": "4ece5c4d1c78c25b629c7b748fea610d39be0843", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/test_core.py", "max_issues_repo_name": "caseyclements/df_compare", "max_issues_repo_head_hexsha": "4ece5c4d1c78c25b629c7b748fea610d39be0843", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-08-09T13:29:18.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-09T13:29:18.000Z", "max_forks_repo_path": "test/test_core.py", "max_forks_repo_name": "caseyclements/df_compare", "max_forks_repo_head_hexsha": "4ece5c4d1c78c25b629c7b748fea610d39be0843", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.8479532164, "max_line_length": 117, "alphanum_fraction": 0.6588353751, "include": true, "reason": "import numpy", "num_tokens": 1689}
# -*- coding: utf-8 -*- """ Optimization of hyper parameters. Both grid search and random search using the ``hyperopt`` library are supported. The hyper parameter specification of a reconstructor class, optionally including default options for optimization, are specified in the class attribute :attr:`~dival.Reconstructor.HYPER_PARAMS`. """ from itertools import product from warnings import warn import numpy as np from hyperopt import hp, fmin, tpe, Trials, space_eval from tqdm import tqdm from dival.util.std_out_err_redirect_tqdm import std_out_err_redirect_tqdm from dival.measure import Measure from dival import LearnedReconstructor def optimize_hyper_params(reconstructor, validation_data, measure, dataset=None, HYPER_PARAMS_override=None, hyperopt_max_evals=1000, hyperopt_max_evals_retrain=1000, hyperopt_rstate=None, show_progressbar=True, tqdm_file=None): """Optimize hyper parameters of a reconstructor. Parameters ---------- reconstructor : :class:`.Reconstructor` The reconstructor. validation_data : :class:`.DataPairs` The test data on which the performance is measured. measure : :class:`.Measure` or str The measure to use as the objective. The sign is chosen automatically depending on the measures :attr:`~Measure.measure_type`. dataset : :class:`.Dataset`, optional The dataset used for training `reconstructor` if it is a :class:`LearnedReconstructor`. HYPER_PARAMS_override : dict, optional Hyper parameter specification overriding the defaults in ``type(reconstructor).HYPER_PARAMS``. The structure of this dict is the same as the structure of :attr:`Reconstructor.HYPER_PARAMS`, except that all fields are optional. Here, each value of a dict for one parameter is treated as an entity, i.e. specifying the dict ``HYPER_PARAMS[...]['grid_search_options']`` overrides the whole dict, not only the specified keys in it. hyperopt_max_evals : int, optional Number of evaluations for different combinations of the parameters that are optimized by ``hyperopt`` and that do not require retraining. Should be chosen depending on the complexity of dependence and the number of such parameters. hyperopt_max_evals_retrain : int, optional Number of evaluations for different combinations of the parameters that are optimized by ``hyperopt`` and that require retraining. Should be chosen depending on the complexity of dependence and the number of such parameters. hyperopt_rstate : :class:`np.random.RandomState`, optional Random state for the random searches performed by ``hyperopt``. show_progressbar : bool, optional Whether to show a progress bar for the optimization. Default: ``True``. tqdm_file : file-like object File/stream to pass to ``tqdm``. """ if isinstance(measure, str): measure = Measure.get_by_short_name(measure) if dataset is None and isinstance(reconstructor, LearnedReconstructor): raise ValueError('dataset required for training of ' '`LearnedReconstructor`') if HYPER_PARAMS_override is None: HYPER_PARAMS_override = {} for k in HYPER_PARAMS_override.keys(): if k not in type(reconstructor).HYPER_PARAMS.keys(): warn("unknown hyper param '{}' for reconstructor of type '{}'" .format(k, type(reconstructor))) params = {} params_retrain = {} for k, v in type(reconstructor).HYPER_PARAMS.items(): param = v.copy() param.update(HYPER_PARAMS_override.get(k, {})) param.setdefault('method', 'grid_search') retrain = v.get('retrain', False) if retrain: params_retrain[k] = param else: params[k] = param loss_sign = 1 if measure.measure_type == 'distance' else -1 def fn(x): reconstructor.hyper_params.update(x) reconstructions = [reconstructor.reconstruct(observation) for observation in validation_data.observations] measure_values = [measure.apply(r, g) for r, g in zip(reconstructions, validation_data.ground_truth)] loss = loss_sign * np.mean(measure_values) return {'status': 'ok', 'loss': loss} def fn_retrain(x): reconstructor.hyper_params.update(x) reconstructor.train(dataset) best_sub_hp = _optimize_hyper_params_impl( reconstructor, fn, params, hyperopt_max_evals=hyperopt_max_evals, hyperopt_rstate=hyperopt_rstate, show_progressbar=False) reconstructions = [reconstructor.reconstruct(observation) for observation in validation_data.observations] measure_values = [measure.apply(r, g) for r, g in zip(reconstructions, validation_data.ground_truth)] loss = loss_sign * np.mean(measure_values) return {'status': 'ok', 'loss': loss, 'best_sub_hp': best_sub_hp} if params_retrain: best_hyper_params = _optimize_hyper_params_impl( reconstructor, fn_retrain, params_retrain, hyperopt_max_evals=hyperopt_max_evals_retrain, hyperopt_rstate=hyperopt_rstate, show_progressbar=show_progressbar, tqdm_file=tqdm_file) else: best_hyper_params = _optimize_hyper_params_impl( reconstructor, fn, params, hyperopt_max_evals=hyperopt_max_evals, hyperopt_rstate=hyperopt_rstate, show_progressbar=show_progressbar, tqdm_file=tqdm_file) return best_hyper_params def _optimize_hyper_params_impl(reconstructor, fn, params, hyperopt_max_evals=1000, hyperopt_rstate=None, show_progressbar=True, tqdm_file=None): grid_search_params = [] grid_search_param_choices = [] hyperopt_space = {} for k, param in params.items(): method = param['method'] if method == 'grid_search': grid_search_options = param.get('grid_search_options', {}) choices = param.get('choices') if choices is None: range_ = param.get('range') if range_ is not None: grid_type = grid_search_options.get('type', 'linear') if grid_type == 'linear': n = grid_search_options.get('num_samples', 10) choices = np.linspace(range_[0], range_[1], n) elif grid_type == 'logarithmic': n = grid_search_options.get('num_samples', 10) b = grid_search_options.get('log_base', 10.) choices = np.logspace(range_[0], range_[1], n, base=b) else: raise ValueError( "unknown grid type '{grid_type}' in {reco_cls}." "HYPER_PARAMS['{k}']['grid_search_options']". format( grid_type=grid_type, reco_cls=reconstructor.__class__.__name__, k=k)) else: raise ValueError( "neither 'choices' nor 'range' is specified in " "{reco_cls}.HYPER_PARAMS['{k}'], one of them must be " "specified for grid search".format( reco_cls=reconstructor.__class__.__name__, k=k)) grid_search_params.append(k) grid_search_param_choices.append(choices) elif method == 'hyperopt': hyperopt_options = param.get('hyperopt_options', {}) space = hyperopt_options.get('space') if space is None: choices = param.get('choices') if choices is None: range_ = param.get('range') if range_ is not None: space_type = hyperopt_options.get('type', 'uniform') if space_type == 'uniform': space = hp.uniform(k, range_[0], range_[1]) else: raise ValueError( "unknown hyperopt space type '{space_type}' " "in {reco_cls}.HYPER_PARAMS['{k}']" "['hyperopt_options']".format( space_type=space_type, reco_cls=reconstructor.__class__.__name__, k=k)) else: raise ValueError( "neither 'choices' nor 'range' is specified in " "{reco_cls}.HYPER_PARAMS['{k}']" "['hyperopt_options']. One of these or " "{reco_cls}.HYPER_PARAMS['{k}']" "['hyperopt_options']['space'] must be specified " "for hyperopt param search".format( reco_cls=reconstructor.__class__.__name__, k=k)) else: space = hp.choice(k, choices) hyperopt_space[k] = space else: raise ValueError("unknown method '{method}' for " "{reco_cls}.HYPER_PARAMS['{k}']".format( method=method, reco_cls=reconstructor.__class__.__name__, k=k)) best_loss = np.inf best_hyper_params = None with std_out_err_redirect_tqdm(tqdm_file) as orig_stdout: grid_search_total = np.prod([len(c) for c in grid_search_param_choices]) for grid_search_values in tqdm( product(*grid_search_param_choices), desc='hyper param opt. for {reco_cls}'.format( reco_cls=type(reconstructor).__name__), total=grid_search_total, file=orig_stdout, leave=False, disable=not show_progressbar): grid_search_param_dict = dict(zip(grid_search_params, grid_search_values)) reconstructor.hyper_params.update(grid_search_param_dict) if len(hyperopt_space) == 0: result = fn({}) if result['loss'] < best_loss: best_loss = result['loss'] best_hyper_params = result.get('best_sub_hp', {}) best_hyper_params.update(grid_search_param_dict) else: trials = Trials() argmin = fmin(fn=fn, space=hyperopt_space, algo=tpe.suggest, max_evals=hyperopt_max_evals, trials=trials, rstate=hyperopt_rstate, show_progressbar=False) best_trial = trials.best_trial if best_trial['result']['loss'] < best_loss: best_loss = best_trial['result']['loss'] best_hyper_params = best_trial['result'].get('best_sub_hp', {}) best_hyper_params.update(grid_search_param_dict) best_hyper_params.update(space_eval(hyperopt_space, argmin)) if best_hyper_params is not None: reconstructor.hyper_params.update(best_hyper_params) return best_hyper_params
{"hexsha": "88df9d4018c89be81542a3a02938f800a834f5c8", "size": 12096, "ext": "py", "lang": "Python", "max_stars_repo_path": "dival/hyper_param_search.py", "max_stars_repo_name": "MBaltz/dival", "max_stars_repo_head_hexsha": "b7c10ed471d05242312a7d4916900c92e0c36cdb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 42, "max_stars_repo_stars_event_min_datetime": "2019-08-06T11:41:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T08:57:41.000Z", "max_issues_repo_path": "dival/hyper_param_search.py", "max_issues_repo_name": "MBaltz/dival", "max_issues_repo_head_hexsha": "b7c10ed471d05242312a7d4916900c92e0c36cdb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2019-12-03T22:02:32.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-04T07:29:46.000Z", "max_forks_repo_path": "dival/hyper_param_search.py", "max_forks_repo_name": "MBaltz/dival", "max_forks_repo_head_hexsha": "b7c10ed471d05242312a7d4916900c92e0c36cdb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2019-10-07T09:21:55.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-24T09:08:01.000Z", "avg_line_length": 45.9923954373, "max_line_length": 79, "alphanum_fraction": 0.5636574074, "include": true, "reason": "import numpy", "num_tokens": 2343}
%% %% Automatically generated file from DocOnce source %% (https://github.com/hplgit/doconce/) %% %% % #ifdef PTEX2TEX_EXPLANATION %% %% The file follows the ptex2tex extended LaTeX format, see %% ptex2tex: http://code.google.com/p/ptex2tex/ %% %% Run %% ptex2tex myfile %% or %% doconce ptex2tex myfile %% %% to turn myfile.p.tex into an ordinary LaTeX file myfile.tex. %% (The ptex2tex program: http://code.google.com/p/ptex2tex) %% Many preprocess options can be added to ptex2tex or doconce ptex2tex %% %% ptex2tex -DMINTED myfile %% doconce ptex2tex myfile envir=minted %% %% ptex2tex will typeset code environments according to a global or local %% .ptex2tex.cfg configure file. doconce ptex2tex will typeset code %% according to options on the command line (just type doconce ptex2tex to %% see examples). If doconce ptex2tex has envir=minted, it enables the %% minted style without needing -DMINTED. % #endif % #define PREAMBLE % #ifdef PREAMBLE %-------------------- begin preamble ---------------------- \documentclass[% oneside, % oneside: electronic viewing, twoside: printing final, % draft: marks overfull hboxes, figures with paths 10pt]{article} \listfiles % print all files needed to compile this document \usepackage{relsize,makeidx,color,setspace,amsmath,amsfonts,amssymb} \usepackage[table]{xcolor} \usepackage{bm,ltablex,microtype} \usepackage[pdftex]{graphicx} \usepackage{ptex2tex} % #ifdef MINTED \usepackage{minted} \usemintedstyle{default} % #endif \usepackage[T1]{fontenc} %\usepackage[latin1]{inputenc} \usepackage{ucs} \usepackage[utf8x]{inputenc} \usepackage{lmodern} % Latin Modern fonts derived from Computer Modern % Hyperlinks in PDF: \definecolor{linkcolor}{rgb}{0,0,0.4} \usepackage{hyperref} \hypersetup{ breaklinks=true, colorlinks=true, linkcolor=linkcolor, urlcolor=linkcolor, citecolor=black, filecolor=black, %filecolor=blue, pdfmenubar=true, pdftoolbar=true, bookmarksdepth=3 % Uncomment (and tweak) for PDF bookmarks with more levels than the TOC } %\hyperbaseurl{} % hyperlinks are relative to this root \setcounter{tocdepth}{2} % levels in table of contents % --- fancyhdr package for fancy headers --- \usepackage{fancyhdr} \fancyhf{} % sets both header and footer to nothing \renewcommand{\headrulewidth}{0pt} \fancyfoot[LE,RO]{\thepage} % Ensure copyright on titlepage (article style) and chapter pages (book style) \fancypagestyle{plain}{ \fancyhf{} \fancyfoot[C]{{\footnotesize \copyright\ 1999-2018, Computational Physics PHY480/905. Released under CC Attribution-NonCommercial 4.0 license}} % \renewcommand{\footrulewidth}{0mm} \renewcommand{\headrulewidth}{0mm} } % Ensure copyright on titlepages with \thispagestyle{empty} \fancypagestyle{empty}{ \fancyhf{} \fancyfoot[C]{{\footnotesize \copyright\ 1999-2018, Computational Physics PHY480/905. Released under CC Attribution-NonCommercial 4.0 license}} \renewcommand{\footrulewidth}{0mm} \renewcommand{\headrulewidth}{0mm} } \pagestyle{fancy} % prevent orhpans and widows \clubpenalty = 10000 \widowpenalty = 10000 % --- end of standard preamble for documents --- % insert custom LaTeX commands... \raggedbottom \makeindex \usepackage[totoc]{idxlayout} % for index in the toc \usepackage[nottoc]{tocbibind} % for references/bibliography in the toc %-------------------- end preamble ---------------------- \begin{document} % matching end for #ifdef PREAMBLE % #endif \newcommand{\exercisesection}[1]{\subsection*{#1}} % ------------------- main content ---------------------- % ----------------- title ------------------------- \thispagestyle{empty} \begin{center} {\LARGE\bf \begin{spacing}{1.25} Project 1, deadline February 5 \end{spacing} } \end{center} % ----------------- author(s) ------------------------- \begin{center} {\bf Computational Physics PHY480/905} \end{center} \begin{center} % List of all institutions: \centerline{{\small Department of Physics and Astronomy, Michigan State University}} \end{center} % ----------------- end author(s) ------------------------- % --- begin date --- \begin{center} Spring semester 2018 \end{center} % --- end date --- \vspace{1cm} \subsection{Introduction} The aim of this project is to get familiar with various vector and matrix operations, from dynamic memory allocation to the usage of programs in the library package of the course. For Fortran users memory handling and most matrix and vector operations are included in the ANSI standard of Fortran 90/95. Array handling in Python is also rather trivial. For C++ user however, there are several possible options. Two are listed here. \begin{itemize} \item For this exercise we recommend that you make your own functions for dynamic memory allocation of a vector and a matrix. You don't need to write a class for this operations. Use then the library package lib.cpp with its header file lib.hpp for obtaining LU-decomposed matrices, solve linear equations etc. \item A very good and often recommended library for C++ handling of arrays is the library Armadillo, to be found at \url{arma.sourceforge.net}. We will discuss the usage of this library during the lab sessions and lectures. Armadillo has also an interface to Lapack functions for solving systems of linear equations. \end{itemize} \noindent Your program, whether it is written in C++, Python or Fortran2008, should include dynamic memory handling of matrices and vectors. The material needed for this project is covered by chapter 6 of the lecture notes, in particular section 6.4 and subsequent sections. Many important differential equations in Science can be written as linear second-order differential equations \begin{equation*} \frac{d^2y}{dx^2}+k^2(x)y = f(x), \end{equation*} where $f$ is normally called the inhomogeneous term and $k^2$ is a real function. A classical equation from electromagnetism is Poisson's equation. The electrostatic potential $\Phi$ is generated by a localized charge distribution $\rho (\mathbf{r})$. In three dimensions it reads \begin{equation*} \nabla^2 \Phi = -4\pi \rho (\mathbf{r}). \end{equation*} With a spherically symmetric $\Phi$ and $\rho (\mathbf{r})$ the equations simplifies to a one-dimensional equation in $r$, namely \begin{equation*} \frac{1}{r^2}\frac{d}{dr}\left(r^2\frac{d\Phi}{dr}\right) = -4\pi \rho(r), \end{equation*} which can be rewritten via a substitution $\Phi(r)= \phi(r)/r$ as \begin{equation*} \frac{d^2\phi}{dr^2}= -4\pi r\rho(r). \end{equation*} The inhomogeneous term $f$ or source term is given by the charge distribution $\rho$ multiplied by $r$ and the constant $-4\pi$. We will rewrite this equation by letting $\phi\rightarrow u$ and $r\rightarrow x$. The general one-dimensional Poisson equation reads then \begin{equation*} -u''(x) = f(x). \end{equation*} \paragraph{Project 1 a):} In this project we will solve the one-dimensional Poisson equation with Dirichlet boundary conditions by rewriting it as a set of linear equations. To be more explicit we will solve the equation \begin{equation*} -u''(x) = f(x), \hspace{0.5cm} x\in(0,1), \hspace{0.5cm} u(0) = u(1) = 0. \end{equation*} and we define the discretized approximation to $u$ as $v_i$ with grid points $x_i=ih$ in the interval from $x_0=0$ to $x_{n+1}=1$. The step length or spacing is defined as $h=1/(n+1)$. We have then the boundary conditions $v_0 = v_{n+1} = 0$. We approximate the second derivative of $u$ with \begin{equation*} -\frac{v_{i+1}+v_{i-1}-2v_i}{h^2} = f_i \hspace{0.5cm} \mathrm{for} \hspace{0.1cm} i=1,\dots, n, \end{equation*} where $f_i=f(x_i)$. Show that you can rewrite this equation as a linear set of equations of the form \begin{equation*} \mathbf{A}\mathbf{v} = \tilde{\mathbf{b}}, \end{equation*} where $\mathbf{A}$ is an $n\times n$ tridiagonal matrix which we rewrite as \[ \mathbf{A} = \begin{bmatrix} 2& -1& 0 &\dots & \dots &0 \\ -1 & 2 & -1 &0 &\dots &\dots \\ 0&-1 &2 & -1 & 0 & \dots \\ & \dots & \dots &\dots &\dots & \dots \\ 0&\dots & &-1 &2& -1 \\ 0&\dots & & 0 &-1 & 2 \\ \end{bmatrix}, \] and $\tilde{b}_i=h^2f_i$. In our case we will assume that the source term is $f(x) = 100e^{-10x}$, and keep the same interval and boundary conditions. Then the above differential equation has a closed-form solution given by $u(x) = 1-(1-e^{-10})x-e^{-10x}$ (convince yourself that this is correct by inserting the solution in the Poisson equation). We will compare our numerical solution with this result in the next exercise. \paragraph{Project 1 b):} We can rewrite our matrix $\mathbf{A}$ in terms of one-dimensional vectors $a,b,c$ of length $1:n$. Our linear equation reads \[ \mathbf{A} = \begin{bmatrix} b_1& c_1 & 0 &\dots & \dots &\dots \\ a_1 & b_2 & c_2 &\dots &\dots &\dots \\ & a_2 & b_3 & c_3 & \dots & \dots \\ & \dots & \dots &\dots &\dots & \dots \\ & & &a_{n-2} &b_{n-1}& c_{n-1} \\ & & & &a_{n-1} & b_n \\ \end{bmatrix}\begin{bmatrix} v_1\\ v_2\\ \dots \\ \dots \\ \dots \\ v_n\\ \end{bmatrix} =\begin{bmatrix} \tilde{b}_1\\ \tilde{b}_2\\ \dots \\ \dots \\ \dots \\ \tilde{b}_n\\ \end{bmatrix}. \] Note well that we do not include the endpoints since the boundary conditions are used resulting in a fixed value for $v_i$. A tridiagonal matrix is a special form of banded matrix where all the elements are zero except for those on and immediately above and below the leading diagonal. Develop a general algorithm first which does not assume that we have a matrix with the same elements along the diagonal and the non-diagonal elements. The algorithm for solving this set of equations is rather simple and requires two steps only, a decomposition and forward substitution and finally a backward substitution. Your first task is to set up the general algorithm (assuming different values for the matrix elements) for solving this set of linear equations. Find also the precise number of floating point operations needed to solve the above equations. Then you should code the above algorithm and solve the problem for matrices of the size $10\times 10$, $100\times 100$ and $1000\times 1000$. That means that you select $n=10$, $n=100$ and $n=1000$ grid points. Compare your results (make plots) with the closed-form solution for the different number of grid points in the interval $x\in(0,1)$. The different number of grid points corresponds to different step lengths $h$. \paragraph{Project 1 c):} Use thereafter the fact that the matrix has identical matrix elements along the diagonal and identical (but different) values for the non-diagonal elements. Specialize your algorithm to the special case and find the number of floating point operations for this specific tri-diagonal matrix. Compare the CPU time with the general algorithm from the previous point for matrices up to $n=10^6$ grid points. \paragraph{Project 1 d):} Compute the relative error in the data set $i=1,\dots, n$,by setting up \[ \epsilon_i=log_{10}\left(\left|\frac{v_i-u_i} {u_i}\right|\right), \] as function of $log_{10}(h)$ for the function values $u_i$ and $v_i$. For each step length extract the max value of the relative error. Try to increase $n$ to $n=10^7$ or higher. Make a table of the results and comment your results. \paragraph{Project 1 e):} Compare your results with those from the LU decomposition codes for the matrix of sizes $10\times 10$, $100\times 100$ and $1000\times 1000$. Here you should use the library functions provided on the webpage of the course. Alternatively, if you use armadillo as a library, you can use the similar function for LU decomposition. The armadillo function for the LU decomposition is called $LU$ while the function for solving linear sets of equations is called $solve$. Use for example the unix function \emph{time} when you run your codes and compare the time usage between LU decomposition and your tridiagonal solver. Alternatively, you can use the functions in C++, Fortran or Python that measure the time used. Make a table of the results and comment the differences in execution time How many floating point operations does the LU decomposition use to solve the set of linear equations? Can you run the standard LU decomposition for a matrix of the size $10^5\times 10^5$? Comment your results. To compute the elapsed time in c++ you can use the following statements \bcppcod ... #include "time.h" // you have to include the time.h header int main() { // declarations of variables ... clock_t start, finish; // declare start and final time start = clock(); // your code is here, do something and then get final time finish = clock(); ( (finish - start)/CLOCKS_PER_SEC ); ... \ecppcod Similarly, in Fortran, this simple example shows how to compute the elapsed time. \bfcod PROGRAM time REAL :: etime ! Declare the type of etime() REAL :: elapsed(2) ! For receiving user and system time REAL :: total ! For receiving total time INTEGER :: i, j WRITE(*,*) 'Start' DO i = 1, 5000000 j = j + 1 ENDDO total = ETIME(elapsed) WRITE(*,*) 'End: total=', total, ' user=', elapsed(1), & ' system=', elapsed(2) END PROGRAM time \efcod \subsection{Introduction to numerical projects} Here follows a brief recipe and recommendation on how to write a report for each project. \begin{itemize} \item Give a short description of the nature of the problem and the eventual numerical methods you have used. \item Describe the algorithm you have used and/or developed. Here you may find it convenient to use pseudocoding. In many cases you can describe the algorithm in the program itself. \item Include the source code of your program. Comment your program properly. \item If possible, try to find analytic solutions, or known limits in order to test your program when developing the code. \item Include your results either in figure form or in a table. Remember to label your results. All tables and figures should have relevant captions and labels on the axes. \item Try to evaluate the reliabilty and numerical stability/precision of your results. If possible, include a qualitative and/or quantitative discussion of the numerical stability, eventual loss of precision etc. \item Try to give an interpretation of you results in your answers to the problems. \item Critique: if possible include your comments and reflections about the exercise, whether you felt you learnt something, ideas for improvements and other thoughts you've made when solving the exercise. We wish to keep this course at the interactive level and your comments can help us improve it. \item Try to establish a practice where you log your work at the computerlab. You may find such a logbook very handy at later stages in your work, especially when you don't properly remember what a previous test version of your program did. Here you could also record the time spent on solving the exercise, various algorithms you may have tested or other topics which you feel worthy of mentioning. \end{itemize} \noindent \subsection{Format for electronic delivery of report and programs} The preferred format for the report is a PDF file. You can also use DOC or postscript formats or as an ipython notebook file. As programming language we prefer that you choose between C/C++, Fortran2008 or Python. The following prescription should be followed when preparing the report: \begin{itemize} \item Use your github repository to upload your report. Indicate where the report is by creating for example a \textbf{Report} folder. Please send us as soon as possible your github username. \item Place your programs in a folder called for example \textbf{Programs} or \textbf{src}, in order to indicate where your programs are. You can use a README file to tell us how your github folders are organized. \item In your git repository, please include a folder which contains selected results. These can be in the form of output from your code for a selected set of runs and input parameters. \item In this and all later projects, you should include tests (for example unit tests) of your code(s). \item Comments from us on your projects, with score and detailed feedback will be emailed to you. \end{itemize} \noindent Finally, we encourage you to work two and two together. Optimal working groups consist of 2-3 students. You can then hand in a common report. % ------------------- end of main content --------------- % #ifdef PREAMBLE \end{document} % #endif
{"hexsha": "a9b0ca13676b060169d8e633d63c7ec3eac70da7", "size": 17464, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "doc/Projects/2017/Project1/pdf/Project1.p.tex", "max_stars_repo_name": "kimrojas/ComputationalPhysicsMSU", "max_stars_repo_head_hexsha": "a47cfc18b3ad6adb23045b3f49fab18c0333f556", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 63, "max_stars_repo_stars_event_min_datetime": "2016-01-14T06:54:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T04:03:16.000Z", "max_issues_repo_path": "doc/Projects/2017/Project1/pdf/Project1.p.tex", "max_issues_repo_name": "dnhdang94/ComputationalPhysicsMSU", "max_issues_repo_head_hexsha": "16990c74cf06eb5b933982137f0536d669567259", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "doc/Projects/2017/Project1/pdf/Project1.p.tex", "max_forks_repo_name": "dnhdang94/ComputationalPhysicsMSU", "max_forks_repo_head_hexsha": "16990c74cf06eb5b933982137f0536d669567259", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 44, "max_forks_repo_forks_event_min_datetime": "2016-01-04T21:08:04.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-09T08:40:29.000Z", "avg_line_length": 38.4669603524, "max_line_length": 405, "alphanum_fraction": 0.6894754924, "num_tokens": 4534}
import pandas as pd import numpy as np from keras.models import Sequential from keras.layers import Dense, Input from keras.layers import LSTM from keras.layers import GRU from keras.layers import Dropout from keras.layers import TimeDistributed from keras.callbacks import EarlyStopping from keras.layers import Conv1D, Conv2D from keras.layers import MaxPooling2D,GlobalAveragePooling1D from keras.layers import Flatten from keras.models import load_model from keras.layers import Bidirectional from keras.layers.normalization import BatchNormalization from keras.callbacks import History ,ModelCheckpoint from keras.layers import Activation, LeakyReLU import collections import sys import pickle import os os.environ['CUDA_VISIBLE_DEVICES'] = "0" ## load data def normalize_1(x): x = (x - x.mean())/x.std() return x def normalize_2(x): x = x/255. return x # load encoder to decode label with open('encoder.pkl', 'rb') as f: encoder = pickle.load(f) test_path = sys.argv[1] output_path = sys.argv[2] test = pd.read_csv(test_path) test_X = np.array([row.split(" ") for row in test["feature"].tolist()],dtype=np.float32) test_X = normalize_2(test_X.reshape(-1,48,48,1)) print("load model ...") model = load_model("model/model1-00204-0.72588.h5") print("prediting...") p = model.predict(test_X) pred_y = encoder.inverse_transform(p) sample = pd.read_csv("sample.csv") sample["label"] = pred_y sample.to_csv(output_path,index=None)
{"hexsha": "764e7a07a05046acdae791908724a66fdb279789", "size": 1457, "ext": "py", "lang": "Python", "max_stars_repo_path": "hw3/CNN_predict.py", "max_stars_repo_name": "thtang/ML2017FALL", "max_stars_repo_head_hexsha": "9060a11c29600c835286ee0866e15ede25850182", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 76, "max_stars_repo_stars_event_min_datetime": "2018-01-22T06:03:32.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-24T10:18:53.000Z", "max_issues_repo_path": "hw3/CNN_predict.py", "max_issues_repo_name": "Cooper111/ML2017FALL-1", "max_issues_repo_head_hexsha": "838924edf3933fa9a67bae7198d3daebf21716f3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hw3/CNN_predict.py", "max_forks_repo_name": "Cooper111/ML2017FALL-1", "max_forks_repo_head_hexsha": "838924edf3933fa9a67bae7198d3daebf21716f3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 24, "max_forks_repo_forks_event_min_datetime": "2018-03-04T16:03:11.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-17T08:54:18.000Z", "avg_line_length": 28.0192307692, "max_line_length": 88, "alphanum_fraction": 0.7666437886, "include": true, "reason": "import numpy", "num_tokens": 361}
// Copyright (c) 2012-2018, The CryptoNote developers, The Bytecoin developers, [ ] developers. // Licensed under the GNU Lesser General Public License. See LICENSE for details. #include "DBsqlite3.hpp" #include <boost/lexical_cast.hpp> #include <iostream> #include "PathTools.hpp" #include "common/string.hpp" using namespace platform; static void sqlite_check(int rc, const char *msg) { if (rc != SQLITE_OK) throw platform::sqlite::Error(msg + common::to_string(rc)); } sqlite::Dbi::~Dbi() { sqlite3_close(handle); handle = nullptr; } sqlite::Stmt::Stmt(Stmt &&other) { std::swap(handle, other.handle); } sqlite::Stmt::~Stmt() { sqlite3_finalize(handle); handle = nullptr; } DBsqlite::DBsqlite(const std::string &full_path, uint64_t max_db_size) : full_path(full_path + ".sqlite") { // lmdb_check(::mdb_env_set_mapsize(db_env.handle, max_db_size), "mdb_env_set_mapsize "); std::cout << "sqlite3_libversion=" << sqlite3_libversion() << std::endl; // create_directories_if_necessary(full_path); sqlite_check(sqlite3_open(this->full_path.c_str(), &db_dbi.handle), "sqlite3_open "); char *err_msg = nullptr; // TODO - we leak err_msg sqlite_check( sqlite3_exec(db_dbi.handle, "CREATE TABLE IF NOT EXISTS kv_table(kk BLOB PRIMARY KEY COLLATE BINARY, vv BLOB NOT NULL) WITHOUT ROWID", 0, 0, &err_msg), err_msg); sqlite_check(sqlite3_prepare_v2(db_dbi.handle, "SELECT kk, vv FROM kv_table WHERE kk = ?", -1, &stmt_get.handle, 0), "sqlite3_prepare_v2 stmt_get "); sqlite_check( sqlite3_prepare_v2(db_dbi.handle, "INSERT INTO kv_table (kk, vv) VALUES (?, ?)", -1, &stmt_insert.handle, 0), "sqlite3_prepare_v2 stmt_insert "); sqlite_check( sqlite3_prepare_v2(db_dbi.handle, "REPLACE INTO kv_table (kk, vv) VALUES (?, ?)", -1, &stmt_update.handle, 0), "sqlite3_prepare_v2 stmt_update "); sqlite_check(sqlite3_prepare_v2(db_dbi.handle, "DELETE FROM kv_table WHERE kk = ?", -1, &stmt_del.handle, 0), "sqlite3_prepare_v2 stmt_del "); sqlite_check(sqlite3_prepare_v2(db_dbi.handle, "SELECT count(kk) FROM kv_table", -1, &stmt_select_star.handle, 0), "sqlite3_prepare_v2 stmt_select_star "); sqlite_check(sqlite3_exec(db_dbi.handle, "BEGIN TRANSACTION", 0, 0, &err_msg), err_msg); std::cout << "SQLite applying DB journal, can take up to several minutes..." << std::endl; commit_db_txn(); // We apply journal from last crash/exit immediately // std::cout << "rows=" << get_approximate_items_count() << std::endl; } size_t DBsqlite::test_get_approximate_size() const { return 0; } size_t DBsqlite::get_approximate_items_count() const { return 1; // Sqlite does full table scan on select count(*), we do not want that behavior // sqlite3_reset(stmt_select_star.handle); // auto rc = sqlite3_step(stmt_select_star.handle); // if (rc != SQLITE_ROW) // throw platform::sqlite::Error("DB::get_approximate_items_count failed sqlite3_step in get " + // common::to_string(rc)); // return boost::lexical_cast<size_t>(sqlite3_column_int64(stmt_select_star.handle, 0)); } static const size_t max_key_size = 128; DBsqlite::Cursor::Cursor( const DBsqlite *db, const sqlite::Dbi &db_dbi, const std::string &prefix, const std::string &middle, bool forward) : db(db), prefix(prefix), forward(forward) { std::string start = prefix + middle; std::string finish = start; if (finish.size() < max_key_size) finish += std::string(max_key_size - finish.size(), char(0xff)); // char('~') std::string sql = forward ? "SELECT kk, vv FROM kv_table WHERE kk >= ? ORDER BY kk ASC" : "SELECT kk, vv FROM kv_table WHERE kk <= ? ORDER BY kk DESC"; sqlite_check( sqlite3_prepare_v2(db_dbi.handle, sql.c_str(), -1, &stmt_get.handle, 0), "sqlite3_prepare_v2 Cursor stmt_get "); sqlite_check(sqlite3_bind_blob(stmt_get.handle, 1, forward ? start.data() : finish.data(), static_cast<int>(forward ? start.size() : finish.size()), SQLITE_TRANSIENT), "DB::Cursor sqlite3_bind_blob 1 "); step_and_check(); } void DBsqlite::Cursor::next() { step_and_check(); } void DBsqlite::Cursor::erase() { if (is_end) return; // Some precaution sqlite3_reset(stmt_get.handle); std::string mykey = prefix + suffix; const_cast<DBsqlite *>(db)->del(mykey, true); sqlite_check(sqlite3_bind_blob(stmt_get.handle, 1, mykey.data(), static_cast<int>(mykey.size()), SQLITE_TRANSIENT), "DB::Cursor erase sqlite3_bind_blob 1 "); step_and_check(); } void DBsqlite::Cursor::step_and_check() { auto rc = sqlite3_step(stmt_get.handle); if (rc == SQLITE_DONE) { data = nullptr; size = 0; is_end = true; suffix = std::string(); return; } if (rc != SQLITE_ROW) throw platform::sqlite::Error("Cursor step failed sqlite3_step in step_and_check " + common::to_string(rc)); size = sqlite3_column_bytes(stmt_get.handle, 0); data = reinterpret_cast<const char *>(sqlite3_column_blob(stmt_get.handle, 0)); std::string itkey(data, size); size = sqlite3_column_bytes(stmt_get.handle, 1); data = reinterpret_cast<const char *>(sqlite3_column_blob(stmt_get.handle, 1)); if (itkey.size() < prefix.size() || std::char_traits<char>::compare(prefix.data(), itkey.data(), prefix.size()) != 0) { data = nullptr; size = 0; is_end = true; suffix = std::string(); return; } suffix = std::string(itkey.data() + prefix.size(), itkey.size() - prefix.size()); } /*void DB::Cursor::check_prefix(const lmdb::Val &itkey) { if (is_end || itkey.size() < prefix.size() || std::char_traits<char>::compare(prefix.data(), itkey.data(), prefix.size()) != 0) { is_end = true; suffix = std::string(); return; } suffix = std::string(itkey.data() + prefix.size(), itkey.size() - prefix.size()); }*/ std::string DBsqlite::Cursor::get_value_string() const { return std::string(data, size); } common::BinaryArray DBsqlite::Cursor::get_value_array() const { return common::BinaryArray(data, data + size); } DBsqlite::Cursor DBsqlite::begin(const std::string &prefix, const std::string &middle) const { return Cursor(this, db_dbi, prefix, middle, true); } DBsqlite::Cursor DBsqlite::rbegin(const std::string &prefix, const std::string &middle) const { return Cursor(this, db_dbi, prefix, middle, false); } void DBsqlite::commit_db_txn() { char *err_msg = nullptr; // TODO - we leak err_msg sqlite_check(sqlite3_exec(db_dbi.handle, "COMMIT TRANSACTION", 0, 0, &err_msg), err_msg); sqlite_check(sqlite3_exec(db_dbi.handle, "BEGIN TRANSACTION", 0, 0, &err_msg), err_msg); } static void put(sqlite::Stmt &stmt, const std::string &key, const void *data, size_t size) { sqlite3_reset(stmt.handle); sqlite_check( sqlite3_bind_blob(stmt.handle, 1, key.data(), static_cast<int>(key.size()), 0), "DB::put sqlite3_bind_blob 1 "); sqlite_check(sqlite3_bind_blob(stmt.handle, 2, data, static_cast<int>(size), 0), "DB::put sqlite3_bind_blob 2 "); auto rc = sqlite3_step(stmt.handle); if (rc != SQLITE_DONE) throw platform::sqlite::Error("DB::put failed sqlite3_step in put " + common::to_string(rc)); } void DBsqlite::put(const std::string &key, const common::BinaryArray &value, bool nooverwrite) { sqlite::Stmt &stmt = nooverwrite ? stmt_insert : stmt_update; ::put(stmt, key, value.data(), value.size()); } void DBsqlite::put(const std::string &key, const std::string &value, bool nooverwrite) { sqlite::Stmt &stmt = nooverwrite ? stmt_insert : stmt_update; ::put(stmt, key, value.data(), value.size()); } static std::pair<const unsigned char *, size_t> get(const sqlite::Stmt &stmt, const std::string &key) { sqlite3_reset(stmt.handle); sqlite_check( sqlite3_bind_blob(stmt.handle, 1, key.data(), static_cast<int>(key.size()), 0), "DB::get sqlite3_bind_blob 1 "); auto rc = sqlite3_step(stmt.handle); if (rc == SQLITE_DONE) return std::make_pair(nullptr, 0); if (rc != SQLITE_ROW) throw platform::sqlite::Error("DB::get failed sqlite3_step in get " + common::to_string(rc)); auto si = sqlite3_column_bytes(stmt.handle, 0); auto da = reinterpret_cast<const unsigned char *>(sqlite3_column_blob(stmt.handle, 0)); si = sqlite3_column_bytes(stmt.handle, 1); da = reinterpret_cast<const unsigned char *>(sqlite3_column_blob(stmt.handle, 1)); return std::make_pair(da, si); } bool DBsqlite::get(const std::string &key, common::BinaryArray &value) const { auto result = ::get(stmt_get, key); if (!result.first) return false; value.assign(result.first, result.first + result.second); return true; } bool DBsqlite::get(const std::string &key, std::string &value) const { auto result = ::get(stmt_get, key); if (!result.first) return false; value.assign(result.first, result.first + result.second); return true; } void DBsqlite::del(const std::string &key, bool mustexist) { sqlite3_reset(stmt_del.handle); sqlite_check(sqlite3_bind_blob(stmt_del.handle, 1, key.data(), static_cast<int>(key.size()), 0), "DB::del sqlite3_bind_blob 1 "); auto rc = sqlite3_step(stmt_del.handle); if (rc != SQLITE_DONE) throw platform::sqlite::Error("DB::del failed sqlite3_step in del " + common::to_string(rc)); int deleted_rows = sqlite3_changes(db_dbi.handle); if (mustexist && deleted_rows != 1) throw platform::sqlite::Error("DB::del row does not exits"); } std::string DBsqlite::to_ascending_key(uint32_t key) { char buf[32] = {}; sprintf(buf, "%08X", key); return std::string(buf); } uint32_t DBsqlite::from_ascending_key(const std::string &key) { long long unsigned val = 0; if (sscanf(key.c_str(), "%llx", &val) != 1) throw std::runtime_error("from_ascending_key failed to convert key=" + key); return boost::lexical_cast<uint32_t>(val); // TODO - std::stoull(key, nullptr, 16) when Google updates NDK compiler } std::string DBsqlite::clean_key(const std::string &key) { std::string result = key; for (char &ch : result) { unsigned char uch = ch; if (uch >= 128) uch -= 128; if (uch == 127) uch = 'F'; if (uch < 32) uch = '0' + uch; ch = uch; } return result; } void DBsqlite::delete_db(const std::string &path) { // std::remove((path + "/data.mdb").c_str()); // std::remove((path + "/lock.mdb").c_str()); std::remove(path.c_str()); } void DBsqlite::run_tests() { delete_db("temp_db"); { DBsqlite db("temp_db"); std::string str; bool res = db.get("history/ha", str); std::cout << "res=" << res << std::endl; db.put("history/ha", "ua", false); db.put("history/hb", "ub", false); db.put("history/hc", "uc", false); db.put("history/ha", "uaa", false); try { db.put("history/ha", "uab", true); std::cout << "value erroneously overwritten" << std::endl; } catch (...) { } db.del("history/hd", false); try { db.del("history/hd", true); std::cout << "value erroneously deleted" << std::endl; } catch (...) { } res = db.get("history/ha", str); std::cout << "res=" << res << std::endl; db.put("unspent/ua", "ua", false); db.put("unspent/ub", "ub", false); db.put("unspent/uc", "uc", false); db.commit_db_txn(); std::cout << "-- all keys forward --" << std::endl; for (auto cur = db.begin(std::string()); !cur.end(); cur.next()) { std::cout << cur.get_suffix() << std::endl; } std::cout << "-- all keys backward --" << std::endl; for (auto cur = db.rbegin(std::string()); !cur.end(); cur.next()) { std::cout << cur.get_suffix() << std::endl; } std::cout << "-- history forward --" << std::endl; for (auto cur = db.begin("history/"); !cur.end(); cur.next()) { std::cout << cur.get_suffix() << std::endl; } std::cout << "-- history backward --" << std::endl; for (auto cur = db.rbegin("history/"); !cur.end(); cur.next()) { std::cout << cur.get_suffix() << std::endl; } std::cout << "-- unspent forward --" << std::endl; for (auto cur = db.begin("unspent/"); !cur.end(); cur.next()) { std::cout << cur.get_suffix() << std::endl; } std::cout << "-- unspent backward --" << std::endl; for (auto cur = db.rbegin("unspent/"); !cur.end(); cur.next()) { std::cout << cur.get_suffix() << std::endl; } std::cout << "-- alpha forward --" << std::endl; for (auto cur = db.begin("alpha/"); !cur.end(); cur.next()) { std::cout << cur.get_suffix() << std::endl; } std::cout << "-- alpha backward --" << std::endl; for (auto cur = db.rbegin("alpha/"); !cur.end(); cur.next()) { std::cout << cur.get_suffix() << std::endl; } std::cout << "-- zero forward --" << std::endl; for (auto cur = db.begin("zero/"); !cur.end(); cur.next()) { std::cout << cur.get_suffix() << std::endl; } std::cout << "-- zero backward --" << std::endl; for (auto cur = db.rbegin("zero/"); !cur.end(); cur.next()) { std::cout << cur.get_suffix() << std::endl; } int c = 0; std::cout << "-- deleting c=2 iterating forward --" << std::endl; for (auto cur = db.begin(std::string()); !cur.end(); ++c) { if (c == 2) { std::cout << "deleting " << cur.get_suffix() << std::endl; cur.erase(); } else { std::cout << cur.get_suffix() << std::endl; cur.next(); } } std::cout << "-- all keys forward --" << std::endl; for (auto cur = db.begin(std::string()); !cur.end(); cur.next()) { std::cout << cur.get_suffix() << std::endl; } c = 0; std::cout << "-- deleting c=2 iterating backward --" << std::endl; for (auto cur = db.rbegin(std::string()); !cur.end(); ++c) { if (c == 2) { std::cout << "deleting " << cur.get_suffix() << std::endl; cur.erase(); } else { std::cout << cur.get_suffix() << std::endl; cur.next(); } } std::cout << "-- all keys forward --" << std::endl; for (auto cur = db.begin(std::string()); !cur.end(); cur.next()) { std::cout << cur.get_suffix() << std::endl; } } delete_db("temp_db"); }
{"hexsha": "a7ef4dec69e43c1950a6786e059fafc197fcc529", "size": 14169, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/platform/DBsqlite3.cpp", "max_stars_repo_name": "VaultB/vaultb", "max_stars_repo_head_hexsha": "de41b954ea4d8a4f54a5ab95adf14af85809f55e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/platform/DBsqlite3.cpp", "max_issues_repo_name": "VaultB/vaultb", "max_issues_repo_head_hexsha": "de41b954ea4d8a4f54a5ab95adf14af85809f55e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/platform/DBsqlite3.cpp", "max_forks_repo_name": "VaultB/vaultb", "max_forks_repo_head_hexsha": "de41b954ea4d8a4f54a5ab95adf14af85809f55e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.6890756303, "max_line_length": 119, "alphanum_fraction": 0.6363893006, "num_tokens": 3926}
// Distributed under the MIT License. // See LICENSE.txt for details. #include "Framework/TestingFramework.hpp" #include <boost/functional/hash.hpp> #include <cstddef> #include <deque> #include <memory> #include <utility> #include "DataStructures/DataBox/DataBox.hpp" #include "DataStructures/DataBox/PrefixHelpers.hpp" #include "DataStructures/DataBox/Prefixes.hpp" #include "DataStructures/DataBox/Tag.hpp" #include "DataStructures/DataVector.hpp" #include "DataStructures/FixedHashMap.hpp" #include "DataStructures/Tensor/Tensor.hpp" #include "DataStructures/Variables.hpp" #include "DataStructures/VariablesTag.hpp" #include "Domain/LogicalCoordinates.hpp" #include "Domain/Structure/Direction.hpp" #include "Domain/Structure/ElementId.hpp" #include "Domain/Structure/MaxNumberOfNeighbors.hpp" #include "Domain/Tags.hpp" #include "Evolution/DgSubcell/Actions/TciAndRollback.hpp" #include "Evolution/DgSubcell/ActiveGrid.hpp" #include "Evolution/DgSubcell/Mesh.hpp" #include "Evolution/DgSubcell/NeighborData.hpp" #include "Evolution/DgSubcell/Projection.hpp" #include "Evolution/DgSubcell/Reconstruction.hpp" #include "Evolution/DgSubcell/SubcellOptions.hpp" #include "Evolution/DgSubcell/Tags/ActiveGrid.hpp" #include "Evolution/DgSubcell/Tags/Inactive.hpp" #include "Evolution/DgSubcell/Tags/Mesh.hpp" #include "Evolution/DgSubcell/Tags/NeighborData.hpp" #include "Evolution/DgSubcell/Tags/SubcellOptions.hpp" #include "Evolution/DgSubcell/Tags/TciGridHistory.hpp" #include "Framework/ActionTesting.hpp" #include "NumericalAlgorithms/Spectral/Mesh.hpp" #include "Parallel/RegisterDerivedClassesWithCharm.hpp" #include "Time/History.hpp" #include "Time/Slab.hpp" #include "Time/Tags.hpp" #include "Time/Time.hpp" #include "Time/TimeStepId.hpp" #include "Time/TimeSteppers/AdamsBashforthN.hpp" #include "Time/TimeSteppers/RungeKutta3.hpp" #include "Time/TimeSteppers/TimeStepper.hpp" #include "Utilities/ErrorHandling/Error.hpp" #include "Utilities/Gsl.hpp" #include "Utilities/TMPL.hpp" #include "Utilities/TaggedTuple.hpp" namespace { struct Var1 : db::SimpleTag { using type = Scalar<DataVector>; }; struct PrimVar1 : db::SimpleTag { using type = Scalar<DataVector>; }; template <size_t Dim, bool HasPrims> struct System { static constexpr size_t volume_dim = Dim; static constexpr bool has_primitive_and_conservative_vars = HasPrims; using variables_tag = Tags::Variables<tmpl::list<Var1>>; using primitive_variables_tag = ::Tags::Variables<tmpl::list<PrimVar1>>; }; template <size_t> struct DummyLabel; template <size_t Dim, typename Metavariables> struct component { using metavariables = Metavariables; using chare_type = ActionTesting::MockArrayChare; using array_index = size_t; using initial_tags = tmpl::append< tmpl::list< ::Tags::TimeStepId, domain::Tags::Mesh<Dim>, evolution::dg::subcell::Tags::Mesh<Dim>, domain::Tags::Element<Dim>, evolution::dg::subcell::Tags::ActiveGrid, evolution::dg::subcell::Tags::DidRollback, evolution::dg::subcell::Tags::NeighborDataForReconstructionAndRdmpTci< Dim>, Tags::Variables<tmpl::list<Var1>>, evolution::dg::subcell::Tags::Inactive< Tags::Variables<tmpl::list<Var1>>>, Tags::HistoryEvolvedVariables<Tags::Variables<tmpl::list<Var1>>>, SelfStart::Tags::InitialValue<Tags::Variables<tmpl::list<Var1>>>>, tmpl::conditional_t< Metavariables::has_prims, tmpl::list<Tags::Variables<tmpl::list<PrimVar1>>, SelfStart::Tags::InitialValue< Tags::Variables<tmpl::list<PrimVar1>>>>, tmpl::list<>>>; using phase_dependent_action_list = tmpl::list<Parallel::PhaseActions< typename Metavariables::Phase, Metavariables::Phase::Initialization, tmpl::list<ActionTesting::InitializeDataBox<initial_tags>, evolution::dg::subcell::Actions::TciAndRollback< typename Metavariables::TciOnDgGrid>, Actions::Label<DummyLabel<0>>, Actions::Label<evolution::dg::subcell::Actions::Labels:: BeginSubcellAfterDgRollback>, Actions::Label<DummyLabel<1>>>>>; }; template <size_t Dim, bool HasPrims> struct Metavariables { static constexpr size_t volume_dim = Dim; static constexpr bool has_prims = HasPrims; using component_list = tmpl::list<component<Dim, Metavariables>>; using system = System<Dim, HasPrims>; using analytic_variables_tags = typename system::variables_tag::tags_list; using const_global_cache_tags = tmpl::list<evolution::dg::subcell::Tags::SubcellOptions>; enum class Phase { Initialization, Exit }; // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) static bool rdmp_fails; // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) static bool tci_fails; // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) static bool tci_invoked; struct TciOnDgGrid { using return_tags = tmpl::list<>; using argument_tags = tmpl::list<evolution::dg::subcell::Tags::Inactive< Tags::Variables<tmpl::list<Var1>>>, Tags::Variables<tmpl::list<Var1>>, domain::Tags::Mesh<Dim>>; static bool apply( const Variables<tmpl::list< evolution::dg::subcell::Tags::Inactive<Var1>>>& subcell_vars, const Variables<tmpl::list<Var1>>& dg_vars, const Mesh<Dim>& dg_mesh, const double persson_exponent) { Variables<tmpl::list<evolution::dg::subcell::Tags::Inactive<Var1>>> projected_vars{subcell_vars.number_of_grid_points()}; evolution::dg::subcell::fd::project( make_not_null(&projected_vars), dg_vars, dg_mesh, evolution::dg::subcell::fd::mesh(dg_mesh).extents()); CHECK(projected_vars == subcell_vars); CHECK(approx(persson_exponent) == 4.0); tci_invoked = true; return tci_fails; } }; }; template <size_t Dim, bool HasPrims> // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) bool Metavariables<Dim, HasPrims>::rdmp_fails = false; template <size_t Dim, bool HasPrims> // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) bool Metavariables<Dim, HasPrims>::tci_fails = false; template <size_t Dim, bool HasPrims> // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) bool Metavariables<Dim, HasPrims>::tci_invoked = false; template <size_t Dim> Element<Dim> create_element(const bool with_neighbors) { DirectionMap<Dim, Neighbors<Dim>> neighbors{}; if (with_neighbors) { for (size_t i = 0; i < 2 * Dim; ++i) { neighbors[gsl::at(Direction<Dim>::all_directions(), i)] = Neighbors<Dim>{{ElementId<Dim>{i + 1, {}}}, {}}; } } return Element<Dim>{ElementId<Dim>{0, {}}, neighbors}; } template <size_t Dim, bool HasPrims> void test_impl(const bool rdmp_fails, const bool tci_fails, const bool always_use_subcell, const bool self_starting, const bool with_neighbors) { CAPTURE(Dim); CAPTURE(rdmp_fails); CAPTURE(tci_fails); CAPTURE(always_use_subcell); CAPTURE(self_starting); CAPTURE(with_neighbors); using metavars = Metavariables<Dim, HasPrims>; metavars::rdmp_fails = rdmp_fails; metavars::tci_fails = tci_fails; metavars::tci_invoked = false; using comp = component<Dim, metavars>; using MockRuntimeSystem = ActionTesting::MockRuntimeSystem<metavars>; MockRuntimeSystem runner{{evolution::dg::subcell::SubcellOptions{ 1.0e-3, 1.0e-4, 2.0e-3, 2.0e-4, 5.0, 4.0, always_use_subcell}}}; const TimeStepId time_step_id{true, self_starting ? -1 : 1, Time{Slab{1.0, 2.0}, {0, 10}}}; const Mesh<Dim> dg_mesh{5, Spectral::Basis::Legendre, Spectral::Quadrature::GaussLobatto}; const Mesh<Dim> subcell_mesh = evolution::dg::subcell::fd::mesh(dg_mesh); const Element<Dim> element = create_element<Dim>(with_neighbors); const evolution::dg::subcell::ActiveGrid active_grid = evolution::dg::subcell::ActiveGrid::Dg; FixedHashMap<maximum_number_of_neighbors(Dim) + 1, std::pair<Direction<Dim>, ElementId<Dim>>, evolution::dg::subcell::NeighborData, boost::hash<std::pair<Direction<Dim>, ElementId<Dim>>>> neighbor_data{}; const std::pair self_id{Direction<Dim>::lower_xi(), ElementId<Dim>::external_boundary_id()}; neighbor_data[self_id] = {}; // max and min of +-2 at last time level means reconstructed vars will be in // limit neighbor_data[self_id].max_variables_values.push_back(2.0); neighbor_data[self_id].min_variables_values.push_back(-2.0); using evolved_vars_tags = tmpl::list<Var1>; Variables<evolved_vars_tags> evolved_vars{dg_mesh.number_of_grid_points()}; // Set Var1 to the logical coords, since those are linear get(get<Var1>(evolved_vars)) = get<0>(logical_coordinates(dg_mesh)); if (rdmp_fails) { get(get<Var1>(evolved_vars))[0] = 100.0; } using prim_vars_tags = tmpl::list<PrimVar1>; Variables<prim_vars_tags> prim_vars{dg_mesh.number_of_grid_points()}; get(get<PrimVar1>(prim_vars)) = get<0>(logical_coordinates(dg_mesh)) + 1000.0; const Variables<tmpl::list<evolution::dg::subcell::Tags::Inactive<Var1>>> inactive_evolved_vars{subcell_mesh.number_of_grid_points(), 1.0}; constexpr size_t history_size = 5; TimeSteppers::History< Variables<evolved_vars_tags>, Variables<db::wrap_tags_in<Tags::dt, evolved_vars_tags>>> time_stepper_history{history_size}; for (size_t i = 0; i < history_size; ++i) { Variables<db::wrap_tags_in<Tags::dt, evolved_vars_tags>> dt_vars{ dg_mesh.number_of_grid_points()}; get(get<Tags::dt<Var1>>(dt_vars)) = (i + 20.0) * get<0>(logical_coordinates(dg_mesh)); time_stepper_history.insert( {true, 1, Time{Slab{1.0, 2.0}, {static_cast<int>(5 - i), 10}}}, dt_vars); } Variables<evolved_vars_tags> vars{dg_mesh.number_of_grid_points()}; get(get<Var1>(vars)) = (history_size + 1.0) * get<0>(logical_coordinates(dg_mesh)); time_stepper_history.most_recent_value() = vars; const bool did_rollback = false; Variables<evolved_vars_tags> initial_value_evolved_vars{ dg_mesh.number_of_grid_points(), 1.0e8}; Variables<prim_vars_tags> initial_value_prim_vars{ dg_mesh.number_of_grid_points(), 1.0e10}; if constexpr (HasPrims) { ActionTesting::emplace_array_component_and_initialize<comp>( &runner, ActionTesting::NodeId{0}, ActionTesting::LocalCoreId{0}, 0, {time_step_id, dg_mesh, subcell_mesh, element, active_grid, did_rollback, neighbor_data, evolved_vars, inactive_evolved_vars, time_stepper_history, initial_value_evolved_vars, prim_vars, initial_value_prim_vars}); } else { (void)prim_vars; (void)initial_value_prim_vars; ActionTesting::emplace_array_component_and_initialize<comp>( &runner, ActionTesting::NodeId{0}, ActionTesting::LocalCoreId{0}, 0, {time_step_id, dg_mesh, subcell_mesh, element, active_grid, did_rollback, neighbor_data, evolved_vars, inactive_evolved_vars, time_stepper_history, initial_value_evolved_vars}); } // Invoke the TciAndSwitchToDg action on the runner ActionTesting::next_action<comp>(make_not_null(&runner), 0); const auto active_grid_from_box = ActionTesting::get_databox_tag<comp, evolution::dg::subcell::Tags::ActiveGrid>( runner, 0); const auto& inactive_vars_from_box = ActionTesting::get_databox_tag<comp, evolution::dg::subcell::Tags::Inactive< Tags::Variables<evolved_vars_tags>>>( runner, 0); const auto& active_vars_from_box = ActionTesting::get_databox_tag<comp, Tags::Variables<evolved_vars_tags>>( runner, 0); const auto& time_stepper_history_from_box = ActionTesting::get_databox_tag<comp, Tags::HistoryEvolvedVariables<>>( runner, 0); const auto& neighbor_data_from_box = ActionTesting::get_databox_tag< comp, evolution::dg::subcell::Tags:: NeighborDataForReconstructionAndRdmpTci<Dim>>(runner, 0); const auto& did_rollback_from_box = ActionTesting::get_databox_tag<comp, evolution::dg::subcell::Tags::DidRollback>( runner, 0); const auto& initial_value_evolved_vars_from_box = get<0>(ActionTesting::get_databox_tag< comp, SelfStart::Tags::InitialValue<Tags::Variables<evolved_vars_tags>>>( runner, 0)); const bool expected_rollback = with_neighbors and (always_use_subcell or rdmp_fails or tci_fails); if (expected_rollback) { CHECK(ActionTesting::get_next_action_index<comp>(runner, 0) == 4); CHECK(active_grid_from_box == evolution::dg::subcell::ActiveGrid::Subcell); CHECK_FALSE(neighbor_data_from_box.empty()); CHECK(did_rollback_from_box); CHECK(time_stepper_history_from_box.size() == history_size - 1); CHECK(time_stepper_history_from_box.integration_order() == time_stepper_history.integration_order()); TimeSteppers::History< Variables<evolved_vars_tags>, Variables<db::wrap_tags_in<Tags::dt, evolved_vars_tags>>> time_stepper_history_subcells{time_stepper_history.integration_order()}; time_stepper_history_subcells.most_recent_value() = evolution::dg::subcell::fd::project( time_stepper_history.most_recent_value(), dg_mesh, subcell_mesh.extents()); const auto end_it = std::prev(time_stepper_history.end()); for (auto it = time_stepper_history.begin(); it != end_it; ++it) { time_stepper_history_subcells.insert( it.time_step_id(), evolution::dg::subcell::fd::project(it.derivative(), dg_mesh, subcell_mesh.extents())); } REQUIRE(time_stepper_history_subcells.size() == time_stepper_history_from_box.size()); for (auto expected_it = time_stepper_history_subcells.begin(), it = time_stepper_history_from_box.begin(); expected_it != time_stepper_history_subcells.end(); ++it, ++expected_it) { CHECK(it.time_step_id() == expected_it.time_step_id()); CHECK(it.derivative() == expected_it.derivative()); } CHECK(get<evolution::dg::subcell::Tags::Inactive<Var1>>( inactive_vars_from_box) == get<Var1>(time_stepper_history.most_recent_value())); CHECK(active_vars_from_box == evolution::dg::subcell::fd::project( time_stepper_history.most_recent_value(), dg_mesh, subcell_mesh.extents())); if (self_starting) { CHECK(initial_value_evolved_vars_from_box == evolution::dg::subcell::fd::project( initial_value_evolved_vars, dg_mesh, subcell_mesh.extents())); if constexpr (HasPrims) { const auto& initial_value_prim_vars_from_box = get<0>( ActionTesting::get_databox_tag< comp, SelfStart::Tags::InitialValue<Tags::Variables<prim_vars_tags>>>( runner, 0)); CHECK(initial_value_prim_vars_from_box == evolution::dg::subcell::fd::project( initial_value_prim_vars, dg_mesh, subcell_mesh.extents())); } } } else { CHECK(ActionTesting::get_next_action_index<comp>(runner, 0) == 2); CHECK(active_grid_from_box == evolution::dg::subcell::ActiveGrid::Dg); CHECK(neighbor_data_from_box.empty()); CHECK_FALSE(did_rollback_from_box); CHECK(time_stepper_history_from_box.size() == history_size); CHECK(time_stepper_history_from_box.integration_order() == time_stepper_history.integration_order()); CHECK(time_stepper_history_from_box.most_recent_value() == time_stepper_history.most_recent_value()); for (auto expected_it = time_stepper_history.begin(), it = time_stepper_history_from_box.begin(); expected_it != time_stepper_history.end(); ++it, ++expected_it) { CHECK(it.time_step_id() == expected_it.time_step_id()); CHECK(it.derivative() == expected_it.derivative()); } if (with_neighbors) { CHECK(get(get<evolution::dg::subcell::Tags::Inactive<Var1>>( inactive_vars_from_box)) == evolution::dg::subcell::fd::project( get(get<Var1>(evolved_vars)), dg_mesh, subcell_mesh.extents())); } CHECK(active_vars_from_box == evolved_vars); if (self_starting) { CHECK(initial_value_evolved_vars_from_box == initial_value_evolved_vars); if constexpr (HasPrims) { const auto& initial_value_prim_vars_from_box = get<0>( ActionTesting::get_databox_tag< comp, SelfStart::Tags::InitialValue<Tags::Variables<prim_vars_tags>>>( runner, 0)); CHECK(initial_value_prim_vars_from_box == initial_value_prim_vars); } } } } template <size_t Dim> void test() { for (const bool rdmp_fails : {true, false}) { for (const bool tci_fails : {false, true}) { for (const bool always_use_subcell : {false, true}) { for (const bool self_starting : {false, true}) { for (const bool have_neighbors : {false, true}) { test_impl<Dim, true>(rdmp_fails, tci_fails, always_use_subcell, self_starting, have_neighbors); test_impl<Dim, false>(rdmp_fails, tci_fails, always_use_subcell, self_starting, have_neighbors); } } } } } } SPECTRE_TEST_CASE("Unit.Evolution.Subcell.Actions.TciAndRollback", "[Evolution][Unit]") { // We test the following cases: // 1. Test RDMP passes/fails (check TciMutator not called on failure) // 2. Test always_use_subcells // 3. Test TciMutator passes/fails // // Below is a list of quantities to verify were handled/set correctly by the // action: // - history projected with correct size (one fewer) // - active vars become projection of latest in history // - active_grid is correct // - did_rollback is correct // - if self-start check initial value (and prims) were projected test<1>(); test<2>(); test<3>(); } } // namespace
{"hexsha": "8116fa3b50d806ea90a633d39694880631dbf29c", "size": 18549, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tests/Unit/Evolution/DgSubcell/Actions/Test_TciAndRollback.cpp", "max_stars_repo_name": "kidder/spectre", "max_stars_repo_head_hexsha": "97ae95f72320f9f67895d3303824e64de6fd9077", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2021-04-02T16:49:35.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-02T16:49:35.000Z", "max_issues_repo_path": "tests/Unit/Evolution/DgSubcell/Actions/Test_TciAndRollback.cpp", "max_issues_repo_name": "GitHimanshuc/spectre", "max_issues_repo_head_hexsha": "4de4033ba36547113293fe4dbdd77591485a4aee", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 19.0, "max_issues_repo_issues_event_min_datetime": "2019-02-27T22:13:47.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-03T16:21:08.000Z", "max_forks_repo_path": "tests/Unit/Evolution/DgSubcell/Actions/Test_TciAndRollback.cpp", "max_forks_repo_name": "geoffrey4444/spectre", "max_forks_repo_head_hexsha": "9350d61830b360e2d5b273fdd176dcc841dbefb0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.8713318284, "max_line_length": 80, "alphanum_fraction": 0.6854277859, "num_tokens": 4732}
------------------------------------------------------------------------------ -- Properties of the alter list ------------------------------------------------------------------------------ {-# OPTIONS --exact-split #-} {-# OPTIONS --no-sized-types #-} {-# OPTIONS --no-universe-polymorphism #-} {-# OPTIONS --without-K #-} module FOT.FOTC.UnguardedCorecursion.Alter.PropertiesATP where open import FOT.FOTC.UnguardedCorecursion.Alter.Alter open import FOTC.Base open import FOTC.Base.List open import FOTC.Data.Stream.Type open import FOTC.Relation.Binary.Bisimilarity.Type ------------------------------------------------------------------------------ -- TODO (23 December 2013). -- alter-Stream : Stream alter -- alter-Stream = Stream-coind A h refl -- where -- A : D → Set -- A xs = xs ≡ xs -- {-# ATP definition A #-} -- postulate h : A alter → ∃[ x' ] ∃[ xs' ] alter ≡ x' ∷ xs' ∧ A xs' -- {-# ATP prove h #-} -- TODO (23 December 2013). -- alter'-Stream : Stream alter' -- alter'-Stream = Stream-coind A h refl -- where -- A : D → Set -- A xs = xs ≡ xs -- {-# ATP definition A #-} -- postulate h : A alter' → ∃[ x' ] ∃[ xs' ] alter' ≡ x' ∷ xs' ∧ A xs' -- {-# ATP prove h #-} -- TODO (23 December 2013). -- alter≈alter' : alter ≈ alter' -- alter≈alter' = ≈-coind B h₁ h₂ -- where -- B : D → D → Set -- B xs ys = xs ≡ xs -- {-# ATP definition B #-} -- postulate h₁ : B alter alter' → ∃[ x' ] ∃[ xs' ] ∃[ ys' ] -- alter ≡ x' ∷ xs' ∧ alter' ≡ x' ∷ ys' ∧ B xs' ys' -- {-# ATP prove h₁ #-} -- postulate h₂ : B alter alter' -- {-# ATP prove h₂ #-}
{"hexsha": "fa987bc1b8031c011466d09dd17b0db29d4f4258", "size": 1655, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "notes/FOT/FOTC/UnguardedCorecursion/Alter/PropertiesATP.agda", "max_stars_repo_name": "asr/fotc", "max_stars_repo_head_hexsha": "2fc9f2b81052a2e0822669f02036c5750371b72d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2015-09-03T20:53:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-12T16:09:54.000Z", "max_issues_repo_path": "notes/FOT/FOTC/UnguardedCorecursion/Alter/PropertiesATP.agda", "max_issues_repo_name": "asr/fotc", "max_issues_repo_head_hexsha": "2fc9f2b81052a2e0822669f02036c5750371b72d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2016-10-12T17:28:16.000Z", "max_issues_repo_issues_event_max_datetime": "2017-01-01T14:34:26.000Z", "max_forks_repo_path": "notes/FOT/FOTC/UnguardedCorecursion/Alter/PropertiesATP.agda", "max_forks_repo_name": "asr/fotc", "max_forks_repo_head_hexsha": "2fc9f2b81052a2e0822669f02036c5750371b72d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2016-09-19T14:18:30.000Z", "max_forks_repo_forks_event_max_datetime": "2018-03-14T08:50:00.000Z", "avg_line_length": 29.5535714286, "max_line_length": 78, "alphanum_fraction": 0.4694864048, "num_tokens": 482}
''' Comparison of Continuous No-Regret Algorithms @date: May 26, 2015 ''' # Set up infrastructure and basic problem parameters import matplotlib as mpl mpl.use('Agg') # this is needed when running on a linux server over terminal import multiprocessing as mp import numpy as np import datetime, os import pickle from ContNoRegret.Domains import nBox, UnionOfDisjointnBoxes, DifferenceOfnBoxes, unitbox, hollowbox, vboxes, vL from ContNoRegret.LossFunctions import QuadraticLossFunction, random_QuadraticLosses from ContNoRegret.NoRegretAlgos import ContNoRegretProblem from ContNoRegret.utils import CNR_worker, plot_results, save_results, circular_tour from ContNoRegret.animate import save_animations from ContNoRegret.Potentials import (ExponentialPotential, IdentityPotential, pNormPotential, ExpPPotential, pExpPotential, FractionalLinearPotential) # this is the location of the folder for the results results_path = '' desc = 'NIPS2_CNR_vs' tmpfolder = '/Volumes/tmp/' # if possible, choose this to be a RamDisk # some flags for keeping a record of the simulation parameters save_res = True show_plots = False save_anims = True show_anims = False T = 2500 # Time horizon M = 10.0 # Uniform bound on the function (in the dual norm) L = 5.0 # Uniform bound on the Lipschitz constant N = 2500 # Number of parallel algorithm instances Ngrid = 250000 # Number of gridpoints for the sampling step vs = [0.50]#, 0.50, 0.25, 0.10, 0.05] doms, paths = [], [] for v in vs: d,p = vL(v, Npath=T, epsilon=0.15) doms.append(d) paths.append(p) # before running the computation, read this file so we can later save a copy in the results folder with open(__file__, 'r') as f: thisfile = f.read() problems = [] # loop over the domains with different vs for dom,path in zip(doms, paths): lossfuncs, Mnew, lambdamax = random_QuadraticLosses(dom, path, L, M, pd=True) # create the problem problems.append(ContNoRegretProblem(dom, lossfuncs, L, Mnew, desc=desc)) # Select a couple of potentials for the Dual Averaging algorithm potentials = [pNormPotential(1.05), pNormPotential(1.75)] alpha, theta = potentials[0].alpha_opt(dom.n), potentials[0].theta_opt(dom, M) etas = theta*(1+np.arange(T))**(-alpha) # the following runs fine if the script is the __main__ method, # but crashes when running from ipython pool = mp.Pool(mp.cpu_count()-1) processes = [] for i,prob in enumerate(problems): for pot in potentials: processes.append(pool.apply_async(CNR_worker, (prob, N,'DA'), {'opt_rate':False, 'Ngrid':Ngrid, 'potential':pot, 'pid':len(processes), 'tmpfolder':tmpfolder, 'etas':etas, 'label':'v={0:.2f}, '.format(prob.domain.v)+pot.desc, 'animate':[]})) # wait for the processes to finish an collect the results (as file handlers) results = [process.get() for process in processes] # plot results and/or save a persistent copy (pickled) of the detailed results timenow = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M')# create a time stamp for unambiguously naming the results folder results_directory = '{}{}/'.format(results_path, timenow) if save_res: os.makedirs(results_directory, exist_ok=True) # this could probably use a safer implementation plot_results(results, 100, results_directory, show_plots) if save_anims: save_animations(results, 10, results_directory, show_anims) save_results(results, results_directory) # store the previously read-in contents of this file in the results folder with open(results_directory+str(__file__), 'w') as f: f.write(thisfile) else: plot_results(results, offset=100)
{"hexsha": "3710c3ace455c228c08f89d4d372abe219427b14", "size": 3675, "ext": "py", "lang": "Python", "max_stars_repo_path": "vs_example.py", "max_stars_repo_name": "anonymouscontributor/cnr", "max_stars_repo_head_hexsha": "f0c793baddf67b8540ea90617e82d27269d367b9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "vs_example.py", "max_issues_repo_name": "anonymouscontributor/cnr", "max_issues_repo_head_hexsha": "f0c793baddf67b8540ea90617e82d27269d367b9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "vs_example.py", "max_forks_repo_name": "anonymouscontributor/cnr", "max_forks_repo_head_hexsha": "f0c793baddf67b8540ea90617e82d27269d367b9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.0957446809, "max_line_length": 125, "alphanum_fraction": 0.7341496599, "include": true, "reason": "import numpy", "num_tokens": 966}
[STATEMENT] lemma cut_off_after_match_any: "simple_fw (cut_off_after_match_any rs) p = simple_fw rs p" [PROOF STATE] proof (prove) goal (1 subgoal): 1. simple_fw (cut_off_after_match_any rs) p = simple_fw rs p [PROOF STEP] apply(induction rs p rule: simple_fw.induct) [PROOF STATE] proof (prove) goal (3 subgoals): 1. \<And>uu_. simple_fw (cut_off_after_match_any []) uu_ = simple_fw [] uu_ 2. \<And>m rs p. (\<not> simple_matches m p \<Longrightarrow> simple_fw (cut_off_after_match_any rs) p = simple_fw rs p) \<Longrightarrow> simple_fw (cut_off_after_match_any (SimpleRule m Accept # rs)) p = simple_fw (SimpleRule m Accept # rs) p 3. \<And>m rs p. (\<not> simple_matches m p \<Longrightarrow> simple_fw (cut_off_after_match_any rs) p = simple_fw rs p) \<Longrightarrow> simple_fw (cut_off_after_match_any (SimpleRule m Drop # rs)) p = simple_fw (SimpleRule m Drop # rs) p [PROOF STEP] by(simp add: simple_match_any)+
{"llama_tokens": 353, "file": "Simple_Firewall_SimpleFw_Semantics", "length": 2}
# -*- coding: utf-8 -*- """ Created on Fri Jan 04 13:23:27 2013 @author: Joey """ import numpy as np import spectroscopy.Spacetime as Spacetime import spectroscopy.ElectronicOperator as ElectronicOperator import spectroscopy.NuclearWavefunction as NuclearWavefunction import spectroscopy.NuclearOperator as NuclearOperator import spectroscopy.ElectronicWavefunction as ElectronicWavefunction omega_1_g_wavenumbers = 1334.0 omega_1_e_wavenumbers = 1334.0 HR_1 = 0.44 mass_1_amu = 63.466 #omega_2_g_wavenumbers = 1334.0 #omega_2_e_wavenumbers = 1334.0 #HR_2 = 0.44 #mass_2_amu = 63.466 #S = dx^2 m \omega / (2 \hbar) ##all units from here on out are defined by mySpace mySpace = Spacetime.Spacetime(xMax_tuple = (23.0,), numberOfNuclearDimenions = 1, numberOfElectronicDimensions = 2, numberOfSimulationSpacePointsPerNuclearDimension_tuple = (24,), dt_SECONDS = .100E-15, UnityMassInElectronMasses = 100.0) #the following parameters allow for a converged TG/PP signal. Not tested with 2DES, though #mySpace = Spacetime.Spacetime(xMax_tuple = (23.0,), # numberOfNuclearDimenions = 1, # numberOfElectronicDimensions = 2, # numberOfSimulationSpacePointsPerNuclearDimension_tuple = (24,), # dt_SECONDS = 1.00E-15, # UnityMassInElectronMasses = 100.0) omega_1_g = mySpace.unitHandler.energyUnitsFromWavenumbers(omega_1_g_wavenumbers) omega_1_e = mySpace.unitHandler.energyUnitsFromWavenumbers(omega_1_e_wavenumbers) m_1 = mySpace.unitHandler.massUnitsFromAmu(mass_1_amu) pulse_carrier_frequency_wavenumbers = 0.0 pulse_carrier_frequency = mySpace.unitHandler.energyUnitsFromWavenumbers(pulse_carrier_frequency_wavenumbers) dx_1 = np.sqrt(HR_1 * 2.0 * mySpace.hbar / (m_1 * omega_1_e)) x_0 = 0.0 groundCenter = x_0 excitedCenter = x_0 + dx_1 nuclear_ground_1 = NuclearOperator.harmonicOscillator(mySpace, omega = omega_1_g, mass = m_1, center = groundCenter) nuclear_excited_1 = NuclearOperator.harmonicOscillator(mySpace, omega = omega_1_e, mass = m_1, center = excitedCenter) x_max_needed = 2 * dx_1 + 4.0 * nuclear_ground_1.sigma print "xmax we need: ", x_max_needed, " Versus what we got: ", mySpace.xMax_values[0] dx_needed_2_interactions = (mySpace.hbar * np.pi) / (nuclear_ground_1.omega * nuclear_ground_1.mass * (2.0 * dx_1 + 4.0 * nuclear_ground_1.sigma)) print "dx needed for 2 interactions: ", dx_needed_2_interactions, "but we have: ", mySpace.Dx_values[0] electronic_ground = NuclearOperator.nuclearHamiltonian(mySpace, listOfOneDimensionalHamiltonians = [nuclear_ground_1] ) electronic_excited = NuclearOperator.nuclearHamiltonian(mySpace, listOfOneDimensionalHamiltonians = [nuclear_excited_1 ] ) groundStateNuclearWF = NuclearWavefunction.nuclearWavefunction(mySpace, groundStateNuclearHamiltonian = electronic_ground ) ElectronicHamiltonian = ElectronicOperator.ElectronicHamiltonian(mySpace, [(0,0, electronic_ground), (1,1, electronic_excited)]) initialEWF = ElectronicWavefunction.electronicWavefunction(mySpace, listOfNuclearWavefunctions = [groundStateNuclearWF, 0], Normalize=True) mu_0 = 1.0 mu_0 = 1.0 x_max = mySpace.xMax_values[0] c = 1E-3 constantMu = NuclearOperator.constantPositionNuclearOperator(mySpace, mu_0) xTransitionDipole_FC = ElectronicOperator.ElectronicPositionOperator(mySpace, [(0, 1, constantMu), (1, 0, constantMu)]) yTransitionDipole_FC = ElectronicOperator.ElectronicPositionOperator(mySpace, []) zTransitionDipole_FC = ElectronicOperator.ElectronicPositionOperator(mySpace, []) transitionDipoleTuple_FC = (xTransitionDipole_FC, yTransitionDipole_FC, zTransitionDipole_FC) linearMu = NuclearOperator.functionalPositionNuclearOperator(mySpace, lambda x: mu_0 + c * (x- x_max) ) xTransitionDipole_linear = ElectronicOperator.ElectronicPositionOperator(mySpace, [(0, 1, linearMu ), (1, 0, linearMu)]) yTransitionDipole_linear = ElectronicOperator.ElectronicPositionOperator(mySpace, []) zTransitionDipole_linear = ElectronicOperator.ElectronicPositionOperator(mySpace, []) transitionDipoleTuple_linear = (xTransitionDipole_linear, yTransitionDipole_linear, zTransitionDipole_linear)
{"hexsha": "35f4ae08d0402a378ba10452eeba70b13f997334", "size": 4709, "ext": "py", "lang": "Python", "max_stars_repo_path": "systems/Juergen/one_fast_mode_monomer.py", "max_stars_repo_name": "jgoodknight/spectroscopy", "max_stars_repo_head_hexsha": "5c41852dff8d02fe49395ecc5cd36902ec867033", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2017-04-06T00:47:24.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-12T19:33:39.000Z", "max_issues_repo_path": "systems/Juergen/one_fast_mode_monomer.py", "max_issues_repo_name": "jgoodknight/spectroscopy", "max_issues_repo_head_hexsha": "5c41852dff8d02fe49395ecc5cd36902ec867033", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "systems/Juergen/one_fast_mode_monomer.py", "max_forks_repo_name": "jgoodknight/spectroscopy", "max_forks_repo_head_hexsha": "5c41852dff8d02fe49395ecc5cd36902ec867033", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.5714285714, "max_line_length": 148, "alphanum_fraction": 0.6982374177, "include": true, "reason": "import numpy", "num_tokens": 1238}
from __future__ import annotations from typing import ( TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union, cast, ) import networkx as nx import strictyaml as yaml from dcp.utils.common import md5_hash, remove_dupes from loguru import logger from snapflow.core.metadata.orm import BaseModel from snapflow.core.node import DeclaredNode, Node, NodeConfiguration, NodeLike, node from snapflow.core.snap import SnapLike from sqlalchemy import Column, String from sqlalchemy.sql.sqltypes import JSON if TYPE_CHECKING: from snapflow import Environment from snapflow.core.streams import StreamLike class NodeDoesNotExist(KeyError): pass class GraphMetadata(BaseModel): hash = Column(String(128), primary_key=True) adjacency = Column(JSON) def __repr__(self) -> str: return self._repr( hash=self.hash, ) class DeclaredGraph: def __init__(self, nodes: Iterable[DeclaredNode] = None): self._nodes: Dict[str, DeclaredNode] = {} if nodes: for n in nodes: self.add_node(n) def __str__(self): s = "Nodes:\n------\n" + "\n".join(self._nodes.keys()) return s def node( self, snap: Union[SnapLike, str], key: Optional[str] = None, params: Dict[str, Any] = None, inputs: Dict[str, StreamLike] = None, input: StreamLike = None, graph: Optional[DeclaredGraph] = None, output_alias: Optional[str] = None, schema_translation: Optional[Dict[str, Union[Dict[str, str], str]]] = None, upstream: Union[StreamLike, Dict[str, StreamLike]] = None, # TODO: DEPRECATED ) -> DeclaredNode: dn = node( snap=snap, key=key, params=params, inputs=inputs, input=input, graph=graph, output_alias=output_alias, schema_translation=schema_translation, upstream=upstream, ) self.add_node(dn) return dn create_node = node # Legacy api def add_node(self, node: DeclaredNode): if node.key in self._nodes: raise KeyError( f"Duplicate node key `{node.key}`. Specify a distinct key for the node (key='unique_key')" ) node.graph = self self._nodes[node.key] = node def remove_node(self, node: DeclaredNode): del self._nodes[node.key] def get_node(self, key: NodeLike) -> DeclaredNode: if isinstance(key, DeclaredNode): return key assert isinstance(key, str) return self._nodes[key] def has_node(self, key: str) -> bool: return key in self._nodes def all_nodes(self) -> Iterable[DeclaredNode]: return self._nodes.values() def instantiate(self, env: Environment) -> Graph: g = Graph(env) for dn in self.all_nodes(): n = dn.instantiate(env, g) g.add_node(n) return g graph = DeclaredGraph DEFAULT_GRAPH = graph() def hash_adjacency(adjacency: List[Tuple[str, Dict]]) -> str: return md5_hash(str(adjacency)) [ ("_input_input", {"dataframe_conform_to_schema": {}}), ("dataframe_conform_to_schema", {}), ] NxNode = Tuple[str, Dict[str, Dict]] NxAdjacencyList = List[NxNode] class Graph: def __init__(self, env: Environment, nodes: Iterable[Node] = None): self.env = env self._nodes: Dict[str, Node] = {} if nodes: for n in nodes: self.add_node(n) def __str__(self): s = "Nodes:\n------\n" + "\n".join(self._nodes.keys()) return s def get_metadata_obj(self) -> GraphMetadata: adjacency = self.adjacency_list() return GraphMetadata(hash=hash_adjacency(adjacency), adjacency=adjacency) # TODO: duplicated code def node( self, snap: Union[SnapLike, str], key: Optional[str] = None, params: Dict[str, Any] = None, inputs: Dict[str, StreamLike] = None, input: StreamLike = None, graph: Optional[DeclaredGraph] = None, output_alias: Optional[str] = None, schema_translation: Optional[Dict[str, Union[Dict[str, str], str]]] = None, upstream: Union[StreamLike, Dict[str, StreamLike]] = None, # TODO: DEPRECATED ) -> Node: dn = node( snap=snap, key=key, params=params, inputs=inputs, input=input, graph=graph, output_alias=output_alias, schema_translation=schema_translation, upstream=upstream, ) n = dn.instantiate(self.env, self) self.add_node(n) return n create_node = node # Legacy api def add_node(self, node: Node): if node.key in self._nodes: raise KeyError(f"Duplicate node key {node.key}") self._nodes[node.key] = node def remove_node(self, node: Node): del self._nodes[node.key] def get_node(self, key: NodeLike) -> Node: if isinstance(key, Node): return key if isinstance(key, DeclaredNode): key = key.key assert isinstance(key, str) return self._nodes[key] def has_node(self, key: str) -> bool: return key in self._nodes def all_nodes(self) -> Iterable[Node]: return self._nodes.values() def validate_graph(self) -> bool: # TODO # validate node keys are valid # validate snaps are valid # validate types are valid # etc return True def as_nx_graph(self) -> nx.DiGraph: g = nx.DiGraph() for n in self.all_nodes(): g.add_node(n.key) inputs = n.declared_inputs for input_stream in inputs.values(): for input_node_key in input_stream.stream.source_node_keys(): g.add_node(input_node_key) g.add_edge(input_node_key, n.key) # TODO: self ref edge? return g def adjacency_list(self) -> NxAdjacencyList: return list(self.as_nx_graph().adjacency()) def get_all_upstream_dependencies_in_execution_order( self, node: Node ) -> List[Node]: g = self.as_nx_graph() node_keys = self._get_all_upstream_dependencies_in_execution_order(g, node.key) return [self.get_node(name) for name in node_keys] def _get_all_upstream_dependencies_in_execution_order( self, g: nx.DiGraph, node: str ) -> List[str]: nodes = [] for parent_node in g.predecessors(node): if parent_node == node: # Ignore self-ref cycles continue parent_deps = self._get_all_upstream_dependencies_in_execution_order( g, parent_node ) nodes.extend(parent_deps) nodes.append(node) # May have added nodes twice, just keep first reference: return remove_dupes(nodes) def get_all_nodes_in_execution_order(self) -> List[Node]: g = self.as_nx_graph() return [self.get_node(name) for name in nx.topological_sort(g)] def load_graph_from_dict(raw_graph: Dict[str, Any]) -> DeclaredGraph: """ nodes: - key: node1 snap: core.import_local_csv output_alias: csv1 inputs: input: othernode params: path: "****" """ raw_nodes = raw_graph["nodes"] g = DeclaredGraph() for r in raw_nodes: inputs = r.pop("inputs", None) inpt = r.pop("inpt", None) if inputs and inpt: raise ValueError("Can't specify both `inputs` and `input`") elif inputs: if isinstance(inputs, list): assert len(inputs) == 1 r["input"] = inputs[0] elif isinstance(inputs, dict): r["inputs"] = inputs else: raise TypeError(inputs) elif inpt: assert isinstance(inpt, str) r["input"] = inpt g.node(**r) return g def graph_from_yaml(yml: str) -> DeclaredGraph: d = yaml.load(yml).data if isinstance(d, list): d = {"nodes": d} assert isinstance(d, dict) return load_graph_from_dict(d) def graph_from_node_configs( env: Environment, nodes: Iterable[NodeConfiguration] ) -> Graph: declared_graph = DeclaredGraph([DeclaredNode.from_config(n) for n in nodes]) graph = declared_graph.instantiate(env) return graph
{"hexsha": "d85a466770286f91f3683b91be777406023ff679", "size": 8621, "ext": "py", "lang": "Python", "max_stars_repo_path": "snapflow/core/graph.py", "max_stars_repo_name": "icedevml/snapflow", "max_stars_repo_head_hexsha": "329dae3f8eaa70d3a26d38a505faeb45d8eecb57", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "snapflow/core/graph.py", "max_issues_repo_name": "icedevml/snapflow", "max_issues_repo_head_hexsha": "329dae3f8eaa70d3a26d38a505faeb45d8eecb57", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "snapflow/core/graph.py", "max_forks_repo_name": "icedevml/snapflow", "max_forks_repo_head_hexsha": "329dae3f8eaa70d3a26d38a505faeb45d8eecb57", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.8327759197, "max_line_length": 106, "alphanum_fraction": 0.5941306113, "include": true, "reason": "import networkx", "num_tokens": 2019}
#ifndef OPTIONS_HPP_ #define OPTIONS_HPP_ #include <boost/program_options.hpp> #include <string> #include <vector> #include "lvr2/config/BaseOption.hpp" namespace viewer { using boost::program_options::options_description; using boost::program_options::positional_options_description; using boost::program_options::variables_map; using std::string; class Options : public lvr2::BaseOption { public: /** * @brief Ctor. Parses the command parameters given to the main * function of the program */ Options(int argc, char** argv); virtual ~Options(); /** * @brief Prints a usage message to stdout. */ bool printUsage() const; /** * @brief Returns the input file */ //std::vector<std::string> getInputFiles() const; /** * @brief Returns the input file name */ std::string getInputFileName() const; /** * @brief Returns the layers used for LOD * * @return */ std::vector<std::string> getLayers() const; int getCacheSize() const; /** * @brief * * @return */ float getHighResDistance() const; bool isChunkedMesh() const; }; } #endif /* OPTIONS_HPP_ */
{"hexsha": "e200ea04c171ec12b99d6e488f0745bb60bc9fde", "size": 1224, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/tools/lvr2_viewer/app/Options.hpp", "max_stars_repo_name": "uos/lvr", "max_stars_repo_head_hexsha": "9bb03a30441b027c39db967318877e03725112d5", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 38.0, "max_stars_repo_stars_event_min_datetime": "2019-06-19T15:10:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-16T03:08:24.000Z", "max_issues_repo_path": "src/tools/lvr2_viewer/app/Options.hpp", "max_issues_repo_name": "uos/lvr", "max_issues_repo_head_hexsha": "9bb03a30441b027c39db967318877e03725112d5", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 9.0, "max_issues_repo_issues_event_min_datetime": "2019-06-19T16:19:51.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-17T08:31:25.000Z", "max_forks_repo_path": "src/tools/lvr2_viewer/app/Options.hpp", "max_forks_repo_name": "uos/lvr", "max_forks_repo_head_hexsha": "9bb03a30441b027c39db967318877e03725112d5", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 13.0, "max_forks_repo_forks_event_min_datetime": "2019-04-16T11:50:32.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-26T07:47:44.000Z", "avg_line_length": 18.8307692308, "max_line_length": 69, "alphanum_fraction": 0.6266339869, "num_tokens": 290}
import numpy as np import matplotlib.pyplot as plt import h5py as h5 def plot_traj(x,name,label=""): fig_traj = plt.figure(1) ax_traj = fig_traj.add_subplot(111) ax_traj.plot(x[:,:,0],x[:,:,1],label=label) ax_traj.set_aspect('equal') ax_traj.legend() fig_xztraj = plt.figure(2) ax_xztraj = fig_xztraj.add_subplot(111) ax_xztraj.plot(x[:,:,0],x[:,:,2],label=label) ax_xztraj.legend() fig_traj.savefig(name+'_trajectory.png') fig_xztraj.savefig(name+'_xzSlice.png') def plot_vel(t,vel,name,label=""): fig_xvel = plt.figure(3) ax_xvel = fig_xvel.add_subplot(111) ax_xvel.plot(t,vel[:,:,0],label=label) ax_xvel.set_xlim([0,t[-1]]) ax_xvel.legend() fig_yvel = plt.figure(4) ax_yvel = fig_yvel.add_subplot(111) ax_yvel.plot(t,vel[:,:,1],label=label) ax_yvel.set_xlim([0,t[-1]]) ax_yvel.legend() fig_zvel = plt.figure(5) ax_zvel = fig_zvel.add_subplot(111) ax_zvel.plot(t,vel[:,:,2],label=label) ax_zvel.set_xlim([0,t[-1]]) ax_zvel.legend() fig_xvel.savefig(name+'_xvelocity.png') fig_yvel.savefig(name+'_yvelocity.png') fig_zvel.savefig(name+'_zvelocity.png') def plot_isotraj(x,name,plim=1,label=""): fig_isotraj = plt.figure(6) ax = fig_isotraj.gca(projection='3d') for pii in range(0,plim): ax.plot3D(x[:,pii,0], x[:,pii,1], zs=x[:,pii,2]) ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('z') fig_isotraj.savefig(name+'_isotraj.png') def plot_xres(t,xres,name,label=""): fig_xres = plt.figure(8) ax_xres = fig_xres.add_subplot(111) for k in range(0,xres.shape[1]): ax_xres.plot(t[1:],xres[1:,k],label=label+" K={0}".format(k)) ax_xres.set_xlim([0,t[-1]]) ax_xres.set_yscale('log') ax_xres.legend() fig_xres.savefig(name+'_xres.png') def plot_vres(t,vres,name,label=""): fig_vres = plt.figure(9) ax_vres = fig_vres.add_subplot(111) for k in range(0,vres.shape[1]): ax_vres.plot(t[1:],vres[1:,k],label=label+" K={0}".format(k)) ax_vres.set_xlim([0,t[-1]]) ax_vres.set_yscale('log') ax_vres.legend() fig_vres.savefig(name+'_vres.png') def orderLines(order,xRange,yRange): if order < 0: a = yRange[1]/xRange[0]**order else: a = yRange[0]/xRange[0]**order oLine = [a*xRange[0]**order,a*xRange[1]**order] return oLine
{"hexsha": "6f191c961bbd9d24e1cd18ed8847a1240d72f6d9", "size": 2428, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/plotting.py", "max_stars_repo_name": "Krissmedt/relativistic_sdc", "max_stars_repo_head_hexsha": "7b294a9ab5d75a1540f192393ad0b4a570bfddb5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tools/plotting.py", "max_issues_repo_name": "Krissmedt/relativistic_sdc", "max_issues_repo_head_hexsha": "7b294a9ab5d75a1540f192393ad0b4a570bfddb5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tools/plotting.py", "max_forks_repo_name": "Krissmedt/relativistic_sdc", "max_forks_repo_head_hexsha": "7b294a9ab5d75a1540f192393ad0b4a570bfddb5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.829787234, "max_line_length": 69, "alphanum_fraction": 0.6247940692, "include": true, "reason": "import numpy", "num_tokens": 772}
import numpy import pandas scoreData = pandas.DataFrame({'Y': ['A', 'A', 'A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C'], 'P_A': [0.47, 0.13, 0.33, 0.47, 0.37, 0.47, 0.5, 0.47, 0.33, 0, 0.47, 0.47, 0.33, 0.47, 0.47, 0, 0.47, 0, 0.47, 0.47], 'P_B': [0.13, 0.4, 0.34, 0.13, 0.13, 0.13, 0.5, 0.13, 0.34, 0.33, 0.13, 0.13, 0.34, 0.13, 0.13, 0.33, 0.13, 0.33, 0.13, 0.13], 'P_C': [0.4, 0.47, 0.33, 0.4, 0.5, 0.4, 0, 0.4, 0.33, 0.67, 0.4, 0.4, 0.33, 0.4, 0.4, 0.67, 0.4, 0.67, 0.4, 0.4]}) # Inputs: # y: a Pandas Series that contains the actual target categories # predProb: a Pandas DataFrame that contains the predicted probabilities # column names must have prefix P_ followed by target category value # column order must match the ascending order of the target categories # Output: # outMetric: a Pandas Series that contains the metric values # MCE = Misclassification Rate # ASE = Average Squared Error # RASE = Root Average Squared Error # AUC = Area Under Curve def NominalMetric (y, predProb): n = predProb.shape[0] # Number of observations K = predProb.shape[1] # Number of target categories # Retrieve target categories with the prefix P_ y_cat = predProb.columns # Predicted target category j_max = predProb.values.argmax(axis = 1) predYCat = y_cat[j_max] # Misclassification rate yWithP_ = 'P_' + y qMisClass = numpy.where(predYCat == yWithP_, 0, 1) # Root Average Squared Error delta = pandas.DataFrame(numpy.zeros((n,K)), columns = y_cat) for col in y_cat: delta[col] = numpy.where(yWithP_ == col, 1.0, 0.0) ase = numpy.mean(numpy.mean((delta - predProb.reset_index()) ** 2)) # Area Under Curve nComb = 0 auc = 0.0 for row in y_cat: eProb = predProb[row][yWithP_ == row] for col in y_cat: if (row != col): neProb = predProb[row][yWithP_ == col] # Calculate the number of concordant, discordant, and tied pairs nConcordant = 0 nDiscordant = 0 nTied = 0 for eP in eProb: nConcordant = nConcordant + numpy.sum(numpy.where(neProb < eP, 1, 0)) nDiscordant = nDiscordant + numpy.sum(numpy.where(neProb > eP, 1, 0)) nTied = nTied + numpy.sum(numpy.where(neProb == eP, 1, 0)) nPairs = nConcordant + nDiscordant + nTied if (nPairs > 0): nComb = nComb + 1 auc = auc + 0.5 + 0.5 * (nConcordant - nDiscordant) / nPairs if (nComb > 0): auc = auc / nComb else: auc = numpy.nan outMetric = pandas.Series({'MCE': numpy.mean(qMisClass), 'ASE': ase, 'RASE': numpy.sqrt(ase), 'AUC': auc}) return (outMetric) # Calculate the metrics for the entire data outMetric = NominalMetric(y = scoreData['Y'], predProb = scoreData[['P_A', 'P_B', 'P_C']]) # Calculate the metrics only for Y = 'A' subsetData = scoreData[scoreData['Y'] == 'A'] outMetric_A = NominalMetric(y = subsetData['Y'], predProb = subsetData[['P_A', 'P_B', 'P_C']]) # Calculate the metrics only for Y = 'B' subsetData = scoreData[scoreData['Y'] == 'B'] outMetric_B = NominalMetric(y = subsetData['Y'], predProb = subsetData[['P_A', 'P_B', 'P_C']]) # Calculate the metrics only for Y = 'C' subsetData = scoreData[scoreData['Y'] == 'C'] outMetric_C = NominalMetric(y = subsetData['Y'], predProb = subsetData[['P_A', 'P_B', 'P_C']])
{"hexsha": "b2c5f0c7437782a3d6e4ccac6510aac15db89353", "size": 3767, "ext": "py", "lang": "Python", "max_stars_repo_path": "Kolmogorov-Smirnov Curve/Target Metric.py", "max_stars_repo_name": "eyobghiday/machine-learning", "max_stars_repo_head_hexsha": "d7165f2b64df6fb780ad30aae55b3b827382bede", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-02-14T20:20:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T07:06:31.000Z", "max_issues_repo_path": "Kolmogorov-Smirnov Curve/Target Metric.py", "max_issues_repo_name": "eyobghiday/machine-learning", "max_issues_repo_head_hexsha": "d7165f2b64df6fb780ad30aae55b3b827382bede", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Kolmogorov-Smirnov Curve/Target Metric.py", "max_forks_repo_name": "eyobghiday/machine-learning", "max_forks_repo_head_hexsha": "d7165f2b64df6fb780ad30aae55b3b827382bede", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.3258426966, "max_line_length": 157, "alphanum_fraction": 0.5511016724, "include": true, "reason": "import numpy", "num_tokens": 1257}
c ============================================ subroutine b4step2(maxmx,maxmy,mbc,mx,my,meqn,q, & xlower,ylower,dx,dy,t,dt,maux,aux) c ============================================ c c # called from claw2 before each call to step2. c # use to set time-dependent aux arrays or perform other tasks c # which must be done every time step. c c # dummy routine c c implicit double precision (a-h,o-z) dimension q(1-mbc:maxmx+mbc,1-mbc:maxmy+mbc, meqn) dimension aux(1-mbc:maxmx+mbc,1-mbc:maxmy+mbc, *) c return end
{"hexsha": "513b1bb3eb63d8e7870ac89b4dbfaf8eed631173", "size": 592, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "clawpack/2d/lib/b4step2.f", "max_stars_repo_name": "geoflows/geoclaw-4.x", "max_stars_repo_head_hexsha": "c8879d25405017b38392aa3b1ea422ff3e3604ea", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2016-11-13T03:11:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-07T18:59:48.000Z", "max_issues_repo_path": "clawpack/2d/lib/b4step2.f", "max_issues_repo_name": "che-wenchao/D-Claw", "max_issues_repo_head_hexsha": "8ab5d971c9a7a7130e03a447a4b8642e292f4e88", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2020-01-14T18:00:37.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T14:25:24.000Z", "max_forks_repo_path": "clawpack/2d/lib/b4step2.f", "max_forks_repo_name": "che-wenchao/D-Claw", "max_forks_repo_head_hexsha": "8ab5d971c9a7a7130e03a447a4b8642e292f4e88", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2020-01-14T17:15:42.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-03T17:28:44.000Z", "avg_line_length": 29.6, "max_line_length": 67, "alphanum_fraction": 0.5253378378, "num_tokens": 171}
import numpy as np import matplotlib.pyplot as plt from PIL import Image import torch from flowbias.config import Config from flowbias.utils.flow import compute_color from flowbias.utils.meta_infrastructure import get_available_datasets from flowbias.evaluations.edgeEval.area_filter import AreaFilter from flowbias.utils.model_loading import sample_to_torch_batch dataset_name = "sintelFinalValid" #"flyingChairsValid" datasets = get_available_datasets(restrict_to=[dataset_name]) dataset = datasets[dataset_name] #sample_id = 5 sample_id = 712 sample = sample_to_torch_batch(dataset[sample_id]) print(sample.keys()) flow = sample["target1"] #area_filter = AreaFilter([1,2,4,8], channels=2) #factor 1.6 to resemble a laplacian of gaussian #area_filter = AreaFilter([1, 1.6, 2.56, 4.09, 6.55, 10.48], channels=2) area_filter = AreaFilter([1, 1.6, 2.56, 4.09, 6.55], channels=2) #normalize flow flow_n = flow / torch.norm(flow, dim=1, keepdim=True) dog = area_filter.compute_dog(flow_n).cpu().numpy() print(dog.shape) isum = np.sum(np.abs(dog)[0,:,:,:], axis=0) mp = area_filter._max_pad im1 = sample["input1"].cpu().numpy()[0,:,:,:].transpose([1, 2, 0])[mp:-mp,mp:-mp,:] im2 = sample["input2"].cpu().numpy()[0,:,:,:].transpose([1, 2, 0])[mp:-mp,mp:-mp,:] flow = flow.cpu().numpy()[0,:,:,:].transpose([1, 2, 0])[mp:-mp,mp:-mp,:] flow = compute_color(flow[:,:,0], flow[:,:,1]) plt.figure() plt.subplot(2,2,1) plt.imshow(im1) plt.subplot(2,2,2) plt.imshow(im2) plt.subplot(2,2,3) plt.imshow(flow) plt.subplot(2,2,4) plt.imshow(isum, cmap="gray") plt.show() print(np.min(isum), np.max(isum)) isum /= np.max(isum) im = Image.fromarray(isum*255) if im.mode != 'RGB': im = im.convert('RGB') im.save(f"{Config.temp_directory}/edges/flow_dog_{dataset_name}_{sample_id}.png")
{"hexsha": "b1586a043a532a1686917e104ec089acb68deff0", "size": 1788, "ext": "py", "lang": "Python", "max_stars_repo_path": "evaluations/edgeEval/display_area_filter.py", "max_stars_repo_name": "MoritzWillig/flowbias", "max_stars_repo_head_hexsha": "d08e1d8cd250ed147060d374f648e39a23ef16f5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "evaluations/edgeEval/display_area_filter.py", "max_issues_repo_name": "MoritzWillig/flowbias", "max_issues_repo_head_hexsha": "d08e1d8cd250ed147060d374f648e39a23ef16f5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "evaluations/edgeEval/display_area_filter.py", "max_forks_repo_name": "MoritzWillig/flowbias", "max_forks_repo_head_hexsha": "d08e1d8cd250ed147060d374f648e39a23ef16f5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.0909090909, "max_line_length": 83, "alphanum_fraction": 0.7142058166, "include": true, "reason": "import numpy", "num_tokens": 559}
from __future__ import division from typing import Union, Optional import numpy as np # type: ignore import cupy as cp # type: ignore from gepapy.operations import Operations class Job_Shop(Operations): """Job_Shop.""" def __init__( self, processing_time: Optional[Union[list, np.ndarray, cp.core.core.ndarray]], machine_sequence: Optional[Union[list, np.ndarray, cp.core.core.ndarray]], due_date: Optional[Union[list, np.ndarray, cp.core.core.ndarray]], weights: Optional[Union[list, np.ndarray, cp.core.core.ndarray]], n_samples: int, n_jobs: int, n_operations: int, n_machines: int, percent_cross: float = 0.2, percent_intra_cross: float = 0.5, percent_mutation: float = 0.2, percent_intra_mutation: float = 0.5, percent_migration: float = 0.1, percent_selection: float = 0.1, fitness_type: str = "max_C", ): """__init__. :param processing_time: :type processing_time: Optional[Union[list, np.ndarray, cp.core.core.ndarray]] :param machine_sequence: :type machine_sequence: Optional[Union[list, np.ndarray, cp.core.core.ndarray]] :param due_date: :type due_date: Optional[Union[list, np.ndarray, cp.core.core.ndarray]] :param weights: :type weights: Optional[Union[list, np.ndarray, cp.core.core.ndarray]] :param n_samples: :type n_samples: int :param n_jobs: :type n_jobs: int :param n_operations: :type n_operations: int :param n_machines: :type n_machines: int :param percent_cross: :type percent_cross: float :param percent_intra_cross: :type percent_intra_cross: float :param percent_mutation: :type percent_mutation: float :param percent_intra_mutation: :type percent_intra_mutation: float :param percent_migration: :type percent_migration: float :param percent_selection: :type percent_selection: float :param fitness_type: :type fitness_type: str """ self._initialized = False self._n_samples = self.set_n_samples(n_samples) self._n_jobs = self._set_n_jobs(n_jobs) self._n_machines = self._set_n_machines(n_machines) self._n_operations = self._set_n_operations(n_operations) self._fitness_type = self.set_fitness_type(fitness_type) self._processing_time = self._set_processing_time(processing_time) self._machine_sequence = self._set_machine_sequence(machine_sequence) self._due_date = self._set_due_date(due_date) self._weights = self._set_weights(weights) self._percent_cross = self._set_percent_cross(percent_cross) self._percent_intra_cross = self.set_percent_intra_cross(percent_intra_cross) self._percent_mutation = self._set_percent_mutation(percent_mutation) self._percent_intra_mutation = self.set_percent_intra_mutation( percent_intra_mutation ) self._percent_migration = self._set_percent_migration(percent_migration) self._percent_selection = self._set_percent_selection(percent_selection) self._fitness = cp.array([], dtype=cp.float32) self._population = cp.array([], dtype=cp.float32) self._population = self.set_population() self._initialized = True
{"hexsha": "59697cdbb20799802aa707c4b5f127930871621d", "size": 3459, "ext": "py", "lang": "Python", "max_stars_repo_path": "gepapy/job_shop.py", "max_stars_repo_name": "mandalarotation/GenSchedulingCuda-GSC", "max_stars_repo_head_hexsha": "936d23d26b6a1db93096a849426a6f57089b8747", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gepapy/job_shop.py", "max_issues_repo_name": "mandalarotation/GenSchedulingCuda-GSC", "max_issues_repo_head_hexsha": "936d23d26b6a1db93096a849426a6f57089b8747", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gepapy/job_shop.py", "max_forks_repo_name": "mandalarotation/GenSchedulingCuda-GSC", "max_forks_repo_head_hexsha": "936d23d26b6a1db93096a849426a6f57089b8747", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.6941176471, "max_line_length": 87, "alphanum_fraction": 0.6721595837, "include": true, "reason": "import numpy,import cupy", "num_tokens": 795}
"""Use this script to generate fake CPT data.""" import os import sys from pathlib import Path import numpy as np import pandas as pd from faker import Faker fake = Faker() fake.seed(0) def usage(argv): """Give feedback on commandline usage.""" cmd = os.path.basename(argv[0]) print('usage: %s <file_path> <# cpt> <# psd>\n' '(example: "%s faux_data 100 1000")' % (cmd, cmd)) sys.exit(1) def main(argv=sys.argv): """Put fake CPT and PSD files to folder. Every PSD files contains 20 PSD samples. """ if len(argv) < 3: usage(argv) folder = Path(argv[1]) folder.mkdir() cpt_folder = folder.joinpath('cpt') cpt_folder.mkdir() num_cpts = int(argv[2]) for cpt in range(num_cpts): filename = ('CPT name=CPT-{}-{},date={:%Y%m%d},' 'latitude={:0.3f},longitude={:0.3f}.csv').format( fake.building_number(), fake.country_code(), fake.date_time_this_year(), fake.latitude(), fake.longitude()) path = cpt_folder.joinpath(filename) depths = np.arange(0, 3, 0.02) df = pd.DataFrame( {'depth': depths, 'qc': abs(np.cos((depths + 1)**5)) + depths**.3, 'fs': abs(np.sin(depths)**2 + np.cos((depths + 12)**5) / 1.2) }) df.to_csv(str(path)) psd_folder = folder.joinpath('psd') psd_folder.mkdir() num_psds = int(argv[3]) for psd in range(num_psds): filename = ('PSD name=PSD-{}-{},date={:%Y%m%d}.csv').format( fake.building_number(), fake.country_code(), fake.date_time_this_year()) path = psd_folder.joinpath(filename) name = filename.split(',')[0].split('=')[1] df = pd.DataFrame([fake_psd(name + '-' + str(i)) for i in range(20)]) df.index = df['name'] df.sort_index().to_csv(str(path)) def fake_psd(name): """Create fake PSD data.""" return { 'name': name, 'latitude': fake.latitude(), 'longitude': fake.longitude(), 'density': fake.random.randrange(1100, 1600), 'quality': fake.random_element(('good', 'bad')) } if __name__ == "__main__": main()
{"hexsha": "6cca9663c0227afcd422115b1391e7a93ea57d73", "size": 2249, "ext": "py", "lang": "Python", "max_stars_repo_path": "Python/sql test/sql_test/scripts/generate_fake_data.py", "max_stars_repo_name": "Pietervanhalem/Pieters-Personal-Repository", "max_stars_repo_head_hexsha": "c31e3c86b1d42f29876455e8553f350d4d527ee5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-02-26T13:02:44.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-06T07:09:10.000Z", "max_issues_repo_path": "Python/sql test/sql_test/scripts/generate_fake_data.py", "max_issues_repo_name": "Pietervanhalem/Pieters-Personal-Repository", "max_issues_repo_head_hexsha": "c31e3c86b1d42f29876455e8553f350d4d527ee5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2020-03-06T07:17:10.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-26T22:32:59.000Z", "max_forks_repo_path": "Python/sql test/sql_test/scripts/generate_fake_data.py", "max_forks_repo_name": "Pietervanhalem/Personal-Code-Examples", "max_forks_repo_head_hexsha": "c31e3c86b1d42f29876455e8553f350d4d527ee5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.4588235294, "max_line_length": 77, "alphanum_fraction": 0.5518008004, "include": true, "reason": "import numpy", "num_tokens": 595}
from Bio import SeqIO import numpy as np import timeit import sys from functools import lru_cache from operator import itemgetter from typing import List, Tuple import random from data.ExactWeightedMatching import ExactWeightedMatching from lib.helperfunctions import preprocess, DNALA, fitness def random_pairing(num_sequences: int): indizes = list(range(num_sequences)) random.shuffle(indizes) pairing = [] for i in range(num_sequences // 2): pairing.append((indizes[2 * i], indizes[2 * i + 1])) return pairing if __name__ == "__main__": print("just some tests for reference algorithm and preprocessing...") num_sequences = int(sys.argv[1]) input_path = "data/human_data_{}.fasta".format(num_sequences) cost_matrix = preprocess(input_path, num_sequences) print() print("---------- USING DNALA ----------") start = timeit.default_timer() pairing_dnala = DNALA(num_sequences, cost_matrix) runtime = timeit.default_timer() - start print("Fitness for DNALA: {0}, Runtime {1}s".format(fitness(pairing_dnala, cost_matrix), runtime)) print() print("---------- USING EXACT ----------") start = timeit.default_timer() exact_pairing = ExactWeightedMatching(num_sequences, cost_matrix) runtime = timeit.default_timer() - start print("Fitness for EXACT: {0}, Runtime {1}s".format(fitness(exact_pairing, cost_matrix), runtime)) print() print("---------- USING RANDOM ----------") print("Fitness for RANDOM: {0}".format(fitness(random_pairing(num_sequences), cost_matrix)))
{"hexsha": "37e36647bc5e66567b5364b7ea2582a031ed0aa2", "size": 1580, "ext": "py", "lang": "Python", "max_stars_repo_path": "ex1/main_DNALA_EXACT_RANDOM.py", "max_stars_repo_name": "tkauf15k/sos2020", "max_stars_repo_head_hexsha": "b75188097d095e4acaca32290ba4f49fa8cb6c0e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ex1/main_DNALA_EXACT_RANDOM.py", "max_issues_repo_name": "tkauf15k/sos2020", "max_issues_repo_head_hexsha": "b75188097d095e4acaca32290ba4f49fa8cb6c0e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ex1/main_DNALA_EXACT_RANDOM.py", "max_forks_repo_name": "tkauf15k/sos2020", "max_forks_repo_head_hexsha": "b75188097d095e4acaca32290ba4f49fa8cb6c0e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.9166666667, "max_line_length": 102, "alphanum_fraction": 0.6924050633, "include": true, "reason": "import numpy", "num_tokens": 371}
#!/usr/bin/env python # > ./stream.py # > ./stream.py --dev=help import cv2 as cv from umucv.stream import autoStream from collections import deque import numpy as np frames = deque(maxlen=10) for key, frame in autoStream(): aux = cv.resize(frame, (160,140)) frames.append(aux) screen = np.hstack(frames) cv.imshow('lastFrames', screen) cv.destroyAllWindows()
{"hexsha": "e09191bbee7d1df971176a8c376065935b1a61ac", "size": 385, "ext": "py", "lang": "Python", "max_stars_repo_path": "2021-2022/stream_frame_queue.py", "max_stars_repo_name": "franciscomirasg/umucv", "max_stars_repo_head_hexsha": "703629d5152d55d00821aee02d30fbb3cca1b73e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "2021-2022/stream_frame_queue.py", "max_issues_repo_name": "franciscomirasg/umucv", "max_issues_repo_head_hexsha": "703629d5152d55d00821aee02d30fbb3cca1b73e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "2021-2022/stream_frame_queue.py", "max_forks_repo_name": "franciscomirasg/umucv", "max_forks_repo_head_hexsha": "703629d5152d55d00821aee02d30fbb3cca1b73e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 16.0416666667, "max_line_length": 37, "alphanum_fraction": 0.6883116883, "include": true, "reason": "import numpy", "num_tokens": 99}
#ifndef NCURSES_STREAM_HPP #define NCURSES_STREAM_HPP #include <string> #include <ncurses.h> #include <boost//algorithm/string.hpp> #include "Character.hpp" #include "Maze.hpp" #include "Cell.hpp" class NCursesStream { /// \brief wrapper class around NCurses initialization and de-initialization methods struct NCurses { /// \brief constructor. Initializes ncurses NCurses() { initscr(); cbreak(); keypad(stdscr, TRUE); curs_set(0); // hide the cursor refresh(); } /// \brief destructor. tears down ncurses ~NCurses() { endwin(); } }; /// \brief parent class for all windows. Essentially an object oriented wrapper around the ncurses window class Window { protected: size_t mHeight; ///< height of the window size_t mWidth; ///< width of the window size_t mStartRow; ///< start row of the window (origin is top left) size_t mStartCol; ///< start column of the window (origin is top left) WINDOW* mAddress; ///< reference to ncurses window controlled by this object public: /// \brief Constructs a window with the given geometric position /// \param height the height of the window, in characters /// \param width the width of the window, in characters /// \param start_row the start row of the window /// \param start_col the start column of the window Window(size_t height, size_t width, size_t start_row, size_t start_col) : mHeight(height), mWidth(width), mStartRow(start_row), mStartCol(start_col), mAddress( newwin(static_cast<int>(mHeight), static_cast<int>(mWidth), static_cast<int>(mStartRow), static_cast<int>(mStartCol)) ) { //std::cerr << "Window(" << height << "," << width << "," << start_row << "," << start_col << ")\n"; // XXX } /// \brief Destroys a window ~Window() { delwin(mAddress); mAddress = nullptr; } // accessor method inline size_t Height() const { return mHeight; } inline size_t Width() const { return mWidth; } inline size_t StartRow() const { return mStartRow; } inline size_t EndRow() const { return StartRow() + Height(); } inline size_t StartColumn() const { return mStartCol; } inline size_t EndColumn() const { return StartColumn() + Width(); } inline void Refresh() { wrefresh(mAddress); } }; /// \brief Window with a box on the outer edge struct BoxWindow : public Window { BoxWindow(size_t height, size_t width, size_t start_row, size_t start_col) : Window(height, width, start_row, start_col) { box(mAddress, 0, 0); } }; class InputWindow : public Window { const std::string mPrompt = "> "; const size_t mMaxInputLength; public: InputWindow(size_t height, size_t width, size_t start_row, size_t start_col) : Window(height, width, start_row, start_col), mPrompt("> "), mMaxInputLength(Width() - mPrompt.length()) { mvwprintw(mAddress, 0, 0, "%s", mPrompt.c_str()); } std::string Read() { bool enter_received = false; std::string msg; while( !enter_received ) { auto c = wgetch(mAddress); switch( c ) { case 0x08: // backspace case 0x7F: // delete if( !msg.empty() ) msg.pop_back(); break; case '\n': case '\r': enter_received = true; break; default: if( std::isprint(c) ) msg.push_back(c); } mvwprintw(mAddress, 0, mPrompt.length(), "%s", msg.c_str()); wclrtoeol(mAddress); wrefresh(mAddress); } wmove(mAddress, 0, mPrompt.length()); wclrtoeol(mAddress); wrefresh(mAddress); return msg; } }; class DisplayWindow : public Window { public: DisplayWindow(size_t height, size_t width, size_t start_row, size_t start_col) : Window(height, width, start_row, start_col) { } void Write(const Maze<Cell>& dungeon) { for(unsigned row=0; row<dungeon.Rows(); ++row) { wmove(mAddress, row, 0); for(unsigned col=0; col<dungeon.Columns(); ++col) { if( dungeon.At(row, col).Opened() ) { waddch(mAddress, ' '); } else { bool closed_above = dungeon.Contains(row-1,col) && !dungeon.At(row-1,col).Opened(); bool closed_below = dungeon.Contains(row+1,col) && !dungeon.At(row+1,col).Opened(); bool closed_left = dungeon.Contains(row,col-1) && !dungeon.At(row,col-1 ).Opened(); bool closed_right = dungeon.Contains(row,col+1) && !dungeon.At(row,col+1).Opened(); if( closed_above && closed_below && closed_right && closed_left ) waddch(mAddress, ACS_PLUS); else if( closed_below && closed_right && closed_left ) waddch(mAddress, ACS_TTEE); else if( closed_above && closed_right && closed_left ) waddch(mAddress, ACS_BTEE); else if( closed_above && closed_below && closed_left ) waddch(mAddress, ACS_RTEE); else if( closed_above && closed_below && closed_right) waddch(mAddress, ACS_LTEE); else if( closed_below && closed_right ) waddch(mAddress, ACS_ULCORNER); else if( closed_below && closed_left ) waddch(mAddress, ACS_URCORNER); else if( closed_above && closed_right ) waddch(mAddress, ACS_LLCORNER); else if( closed_above && closed_left ) waddch(mAddress, ACS_LRCORNER); else if( closed_left || closed_right ) waddch(mAddress, ACS_HLINE); else if( closed_above || closed_below ) waddch(mAddress, ACS_VLINE); else throw std::runtime_error("Display: Unhandled cell detected"); } } } wrefresh(mAddress); } }; class OutputWindow : public BoxWindow { public: OutputWindow(size_t height, size_t width, size_t start_row, size_t start_col) : BoxWindow(height, width, start_row, start_col) { box(mAddress, 0, 0); } void Write(const std::string& msg) { // put the message into the history, splitting lines that are longer than the window or have a newline // split on newlines std::vector<std::string> lines; boost::split(lines, msg, boost::is_any_of("\r\n")); // split long-lines into two lines for(auto itr=lines.begin(); itr!=lines.end(); ++itr) { if( itr->size() > Width() - 2 ) { lines.insert(itr, itr->substr(0, Width()-2)); itr->erase(Width()-2); } } //TODO: handle messages longer than Height()-2 lines long //TODO: add ability to scroll up and see history if(lines.size() > Height()-2) { mvwprintw(mAddress, 1, 1, "ERROR: Message too long (%d lines)", lines.size()); } else { for(size_t i=1; i<(Height()-2); ++i) { wmove(mAddress, i, 1); if( lines.size() >= i ) wprintw(mAddress, "%s", lines.at(i-1).c_str()); wclrtoeol(mAddress); } } box(mAddress, 0, 0); wrefresh(mAddress); } }; NCurses mNCurses; ///< structure for construction and destruction of the ncurses environment DisplayWindow mDisplayWindow; ///< used to display map of current location BoxWindow mCharacterWindow; ///< used to display current character statistics OutputWindow mOutputWindow; ///< used for output game narraration InputWindow mInputWindow; ///< used for input of commands BoxWindow mOuterWindow; ///< outer window containing all game windows public: NCursesStream(size_t display_height=30, size_t display_width=50, size_t output_window_height=5, size_t character_window_width=10) : mNCurses(), mDisplayWindow(display_height, display_width, 1, 1), mCharacterWindow(mDisplayWindow.Height(), character_window_width, mDisplayWindow.StartRow(), mDisplayWindow.EndColumn()+1), mOutputWindow(output_window_height, mCharacterWindow.EndColumn()-mDisplayWindow.StartColumn(), mDisplayWindow.EndRow(), mDisplayWindow.StartColumn()), mInputWindow(1, mOutputWindow.Width(), mOutputWindow.EndRow(), mDisplayWindow.StartColumn()), mOuterWindow(mInputWindow.EndRow()+1, mInputWindow.EndColumn()+1, 0, 0) { mOuterWindow.Refresh(); mDisplayWindow.Refresh(); mCharacterWindow.Refresh(); mOutputWindow.Refresh(); mInputWindow.Refresh(); } ~NCursesStream() { } void flush() { refresh(); } // TODO: add iomanip methods: // SetMazePaneHeight // SetMazePaneWidth // SetCharacterPaneHeight // SetCharacterPaneWidth // SetIOPaneHeight // SetIOPaneWidth //TODO: make these templates, with specialization for character and dungeon. All other info goes to the // narration window friend NCursesStream& operator<<(NCursesStream& out, const Character& character); friend NCursesStream& operator<<(NCursesStream& out, const std::string& narraration); friend NCursesStream& operator<<(NCursesStream& out, const Maze<Cell>& dungeon); friend NCursesStream& operator>>(NCursesStream& in, std::string& str); }; NCursesStream& operator<<(NCursesStream& out, const Character& character) { //TODO return out; } NCursesStream& operator<<(NCursesStream& out, const std::string& narraration) { out.mOutputWindow.Write(narraration); return out; } NCursesStream& operator>>(NCursesStream& in, std::string& str) { str = in.mInputWindow.Read(); return in; } //TODO: make a template NCursesStream& operator<<(NCursesStream& out, const Maze<Cell>& dungeon) { out.mDisplayWindow.Write(dungeon); return out; } #endif // NCURSES_STREAM_HPP
{"hexsha": "5c1f62da078d13a133d3952d5a44c369ee44167d", "size": 11396, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "dungeon/NCursesStream.hpp", "max_stars_repo_name": "samsparks/climaze", "max_stars_repo_head_hexsha": "a49dc0a926f86311212a61e837ebb46e2a387fe2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dungeon/NCursesStream.hpp", "max_issues_repo_name": "samsparks/climaze", "max_issues_repo_head_hexsha": "a49dc0a926f86311212a61e837ebb46e2a387fe2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dungeon/NCursesStream.hpp", "max_forks_repo_name": "samsparks/climaze", "max_forks_repo_head_hexsha": "a49dc0a926f86311212a61e837ebb46e2a387fe2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.0632911392, "max_line_length": 156, "alphanum_fraction": 0.5431730432, "num_tokens": 2489}
# -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # (C) British Crown Copyright 2017-2021 Met Office. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """ Unit tests for the `ensemble_copula_coupling.EnsembleCopulaCouplingUtilities` class. """ import importlib import unittest import unittest.mock as mock from datetime import datetime from unittest.case import skipIf from unittest.mock import patch import numpy as np from cf_units import Unit from iris.coords import DimCoord from iris.cube import Cube, CubeList from iris.exceptions import CoordinateNotFoundError from iris.tests import IrisTest from improver.ensemble_copula_coupling.utilities import ( choose_set_of_percentiles, concatenate_2d_array_with_2d_array_endpoints, create_cube_with_percentiles, get_bounds_of_distribution, insert_lower_and_upper_endpoint_to_1d_array, interpolate_multiple_rows_same_x, interpolate_multiple_rows_same_y, restore_non_percentile_dimensions, slow_interp_same_x, slow_interp_same_y, ) from improver.synthetic_data.set_up_test_cubes import ( set_up_percentile_cube, set_up_variable_cube, ) from .ecc_test_data import ECC_TEMPERATURE_REALIZATIONS, set_up_spot_test_cube class Test_concatenate_2d_array_with_2d_array_endpoints(IrisTest): """Test the concatenate_2d_array_with_2d_array_endpoints.""" def test_basic(self): """Test that result is a numpy array with the expected contents.""" expected = np.array([[0, 20, 50, 80, 100]]) input_array = np.array([[20, 50, 80]]) result = concatenate_2d_array_with_2d_array_endpoints(input_array, 0, 100) self.assertIsInstance(result, np.ndarray) self.assertArrayAlmostEqual(result, expected) def test_1d_input(self): """Test 1D input results in the expected error""" input_array = np.array([-40, 200, 1000]) msg = "Expected 2D input" with self.assertRaisesRegex(ValueError, msg): concatenate_2d_array_with_2d_array_endpoints(input_array, -100, 10000) def test_3d_input(self): """Test 3D input results in expected error""" input_array = np.array([[[-40, 200, 1000]]]) msg = "Expected 2D input" with self.assertRaisesRegex(ValueError, msg): concatenate_2d_array_with_2d_array_endpoints(input_array, -100, 10000) class Test_choose_set_of_percentiles(IrisTest): """Test the choose_set_of_percentiles plugin.""" def test_basic(self): """ Test that the plugin returns a list with the expected number of percentiles. """ no_of_percentiles = 3 result = choose_set_of_percentiles(no_of_percentiles) self.assertIsInstance(result, list) self.assertEqual(len(result), no_of_percentiles) def test_data(self): """ Test that the plugin returns a list with the expected data values for the percentiles. """ data = np.array([25, 50, 75]) no_of_percentiles = 3 result = choose_set_of_percentiles(no_of_percentiles) self.assertArrayAlmostEqual(result, data) def test_random(self): """ Test that the plugin returns a list with the expected number of percentiles, if the random sampling option is selected. """ no_of_percentiles = 3 result = choose_set_of_percentiles(no_of_percentiles, sampling="random") self.assertIsInstance(result, list) self.assertEqual(len(result), no_of_percentiles) def test_unknown_sampling_option(self): """ Test that the plugin returns the expected error message, if an unknown sampling option is selected. """ no_of_percentiles = 3 msg = "Unrecognised sampling option" with self.assertRaisesRegex(ValueError, msg): choose_set_of_percentiles(no_of_percentiles, sampling="unknown") class Test_create_cube_with_percentiles(IrisTest): """Test the _create_cube_with_percentiles plugin.""" def setUp(self): """Set up temperature cube.""" self.cube = set_up_variable_cube(ECC_TEMPERATURE_REALIZATIONS[0]) self.cube_data = ECC_TEMPERATURE_REALIZATIONS def test_basic(self): """Test that the plugin returns an Iris.cube.Cube with suitable units.""" cube_data = self.cube_data + 2 percentiles = [10, 50, 90] result = create_cube_with_percentiles(percentiles, self.cube, cube_data) self.assertIsInstance(result, Cube) self.assertEqual(result.units, self.cube.units) def test_changed_cube_units(self): """Test that the plugin returns a cube with chosen units.""" cube_data = self.cube_data + 2 percentiles = [10, 50, 90] result = create_cube_with_percentiles( percentiles, self.cube, cube_data, cube_unit="1" ) self.assertEqual(result.units, Unit("1")) def test_many_percentiles(self): """Test that the plugin returns an Iris.cube.Cube with many percentiles. """ percentiles = np.linspace(0, 100, 100) cube_data = np.zeros( [ len(percentiles), len(self.cube.coord("latitude").points), len(self.cube.coord("longitude").points), ] ) result = create_cube_with_percentiles(percentiles, self.cube, cube_data) self.assertEqual(cube_data.shape, result.data.shape) def test_incompatible_percentiles(self): """ Test that the plugin fails if the percentile values requested are not numbers. """ percentiles = ["cat", "dog", "elephant"] cube_data = np.zeros( [ len(percentiles), len(self.cube.coord("latitude").points), len(self.cube.coord("longitude").points), ] ) msg = "could not convert string to float" with self.assertRaisesRegex(ValueError, msg): create_cube_with_percentiles(percentiles, self.cube, cube_data) def test_percentile_points(self): """ Test that the plugin returns an Iris.cube.Cube with a percentile coordinate with the desired points. """ cube_data = self.cube_data + 2 percentiles = [10, 50, 90] result = create_cube_with_percentiles(percentiles, self.cube, cube_data) self.assertIsInstance(result.coord("percentile"), DimCoord) self.assertArrayAlmostEqual(result.coord("percentile").points, percentiles) def test_spot_forecasts_percentile_points(self): """ Test that the plugin returns a Cube with a percentile dimension coordinate and that the percentile dimension has the expected points for an input spot forecast. """ cube = set_up_spot_test_cube() spot_data = cube.data.copy() + 2 spot_cube = next(cube.slices_over("realization")) spot_cube.remove_coord("realization") percentiles = [10, 50, 90] result = create_cube_with_percentiles(percentiles, spot_cube, spot_data) self.assertIsInstance(result, Cube) self.assertIsInstance(result.coord("percentile"), DimCoord) self.assertArrayAlmostEqual(result.coord("percentile").points, percentiles) def test_percentile_length_too_short(self): """ Test that the plugin raises the default ValueError, if the number of percentiles is fewer than the length of the zeroth dimension of the required cube data. """ cube_data = self.cube_data + 2 percentiles = [10, 50] msg = "Require data with shape" with self.assertRaisesRegex(ValueError, msg): create_cube_with_percentiles(percentiles, self.cube, cube_data) def test_percentile_length_too_long(self): """ Test that the plugin raises the default ValueError, if the number of percentiles exceeds the length of the zeroth dimension of the required data. """ cube_data = self.cube_data[0, :, :] + 2 percentiles = [10, 50, 90] msg = "Require data with shape" with self.assertRaisesRegex(ValueError, msg): create_cube_with_percentiles(percentiles, self.cube, cube_data) def test_metadata_copy(self): """ Test that the metadata dictionaries within the input cube, are also present on the output cube. """ self.cube.attributes = {"source": "ukv"} cube_data = self.cube_data + 2 percentiles = [10, 50, 90] result = create_cube_with_percentiles(percentiles, self.cube, cube_data) self.assertDictEqual(self.cube.metadata._asdict(), result.metadata._asdict()) def test_coordinate_copy(self): """ Test that the coordinates within the input cube, are also present on the output cube. """ cube_data = self.cube_data + 2 percentiles = [10, 50, 90] result = create_cube_with_percentiles(percentiles, self.cube, cube_data) for coord in self.cube.coords(): if coord not in result.coords(): msg = "Coordinate: {} not found in cube {}".format(coord, result) raise CoordinateNotFoundError(msg) class Test_get_bounds_of_distribution(IrisTest): """Test the get_bounds_of_distribution plugin.""" def test_basic(self): """Test that the result is a numpy array.""" cube_name = "air_temperature" cube_units = Unit("degreesC") result = get_bounds_of_distribution(cube_name, cube_units) self.assertIsInstance(result, np.ndarray) def test_check_data(self): """ Test that the expected results are returned for the bounds_pairing. """ cube_name = "air_temperature" cube_units = Unit("degreesC") bounds_pairing = (-100, 60) result = get_bounds_of_distribution(cube_name, cube_units) self.assertArrayAlmostEqual(result, bounds_pairing) def test_check_unit_conversion(self): """ Test that the expected results are returned for the bounds_pairing, if the units of the bounds_pairings need to be converted to match the units of the forecast. """ cube_name = "air_temperature" cube_units = Unit("fahrenheit") bounds_pairing = (-148, 140) # In fahrenheit result = get_bounds_of_distribution(cube_name, cube_units) self.assertArrayAlmostEqual(result, bounds_pairing) def test_check_exception_is_raised(self): """ Test that the expected results are returned for the bounds_pairing. """ cube_name = "nonsense" cube_units = Unit("degreesC") msg = "The bounds_pairing_key" with self.assertRaisesRegex(KeyError, msg): get_bounds_of_distribution(cube_name, cube_units) class Test_insert_lower_and_upper_endpoint_to_1d_array(IrisTest): """Test the insert_lower_and_upper_endpoint_to_1d_array.""" def test_basic(self): """Test that the result is a numpy array with the expected contents.""" expected = [0, 20, 50, 80, 100] percentiles = np.array([20, 50, 80]) result = insert_lower_and_upper_endpoint_to_1d_array(percentiles, 0, 100) self.assertIsInstance(result, np.ndarray) self.assertArrayAlmostEqual(result, expected) def test_2d_example(self): """Test 2D input results in expected error""" percentiles = np.array([[-40, 200, 1000], [-40, 200, 1000]]) msg = "Expected 1D input" with self.assertRaisesRegex(ValueError, msg): insert_lower_and_upper_endpoint_to_1d_array(percentiles, -100, 10000) class Test_restore_non_percentile_dimensions(IrisTest): """Test the restore_non_percentile_dimensions.""" def setUp(self): """Set up template cube and temperature data.""" self.cube = set_up_variable_cube(282 * np.ones((3, 3), dtype=np.float32)) # function is designed to reshape an input data array with dimensions of # "percentiles x points" - generate suitable input data self.expected_data = np.sort(ECC_TEMPERATURE_REALIZATIONS.copy(), axis=0) points_data = [self.expected_data[i].flatten() for i in range(3)] self.input_data = np.array(points_data) def test_multiple_percentiles(self): """ Test the result is an array with the expected shape and contents. """ reshaped_array = restore_non_percentile_dimensions( self.input_data, self.cube, 3 ) self.assertIsInstance(reshaped_array, np.ndarray) self.assertArrayAlmostEqual(reshaped_array, self.expected_data) def test_single_percentile(self): """ Test the array size and contents if the percentile coordinate is scalar. """ expected = np.array( [[226.15, 237.4, 248.65], [259.9, 271.15, 282.4], [293.65, 304.9, 316.15]], dtype=np.float32, ) reshaped_array = restore_non_percentile_dimensions( self.input_data[0], self.cube, 1 ) self.assertArrayAlmostEqual(reshaped_array, expected) def test_multiple_timesteps(self): """ Test that the data has been reshaped correctly when there are multiple timesteps. The array contents are also checked. The output cube has only a single percentile, which is therefore demoted to a scalar coordinate. """ expected = np.array( [ [[4.0, 4.71428571], [5.42857143, 6.14285714]], [[6.85714286, 7.57142857], [8.28571429, 9.0]], ] ) cubelist = CubeList([]) for i, hour in enumerate([7, 8]): cubelist.append( set_up_percentile_cube( np.array([expected[i, :, :]], dtype=np.float32), np.array([50], dtype=np.float32), units="degC", time=datetime(2015, 11, 23, hour), frt=datetime(2015, 11, 23, 6), ) ) percentile_cube = cubelist.merge_cube() reshaped_array = restore_non_percentile_dimensions( percentile_cube.data.flatten(), next(percentile_cube.slices_over("percentile")), 1, ) self.assertArrayAlmostEqual(reshaped_array, expected) numba_installed = True try: importlib.util.find_spec("numba") from improver.ensemble_copula_coupling.numba_utilities import ( fast_interp_same_x, fast_interp_same_y, ) except ImportError: numba_installed = False class Test_interpolate_multiple_rows_same_y(IrisTest): """Test interpolate_multiple_rows_same_y""" def setUp(self): """Set up arrays.""" np.random.seed(0) self.x = np.arange(0, 1, 0.01) self.xp = np.sort(np.random.random_sample((100, 100)), axis=1) self.fp = np.arange(0, 100, 1).astype(float) def test_slow(self): """Test slow interp against known result.""" xp = np.array([[0, 1, 2, 3, 4], [-4, -3, -2, -1, 0]], dtype=np.float32) fp = np.array([0, 2, 4, 6, 8], dtype=np.float32) x = np.array([-1, 0.5, 2], dtype=np.float32) expected = np.array([[0, 1, 4], [6, 8, 8]], dtype=np.float32) result = slow_interp_same_y(x, xp, fp) np.testing.assert_allclose(result, expected) @patch.dict("sys.modules", numba=None) @patch("improver.ensemble_copula_coupling.utilities.slow_interp_same_y") def test_slow_interp_same_y_called(self, interp_imp): """Test that slow_interp_same_y is called if numba is not installed.""" interpolate_multiple_rows_same_y( mock.sentinel.x, mock.sentinel.xp, mock.sentinel.fp ) interp_imp.assert_called_once_with( mock.sentinel.x, mock.sentinel.xp, mock.sentinel.fp ) @skipIf(not (numba_installed), "numba not installed") @patch("improver.ensemble_copula_coupling.numba_utilities.fast_interp_same_y") def test_fast_interp_same_y_called(self, interp_imp): """Test that fast_interp_same_y is called if numba is installed.""" interpolate_multiple_rows_same_y( mock.sentinel.x, mock.sentinel.xp, mock.sentinel.fp ) interp_imp.assert_called_once_with( mock.sentinel.x, mock.sentinel.xp, mock.sentinel.fp ) @skipIf(not (numba_installed), "numba not installed") def test_fast(self): """Test fast interp against known result.""" xp = np.array([[0, 1, 2, 3, 4], [-4, -3, -2, -1, 0]], dtype=np.float32) fp = np.array([0, 2, 4, 6, 8], dtype=np.float32) x = np.array([-1, 0.5, 2], dtype=np.float32) expected = np.array([[0, 1, 4], [6, 8, 8]], dtype=np.float32) result = fast_interp_same_y(x, xp, fp) np.testing.assert_allclose(result, expected) @skipIf(not (numba_installed), "numba not installed") def test_slow_vs_fast(self): """Test that slow and fast versions give same result.""" result_slow = slow_interp_same_y(self.x, self.xp, self.fp) result_fast = fast_interp_same_y(self.x, self.xp, self.fp) np.testing.assert_allclose(result_slow, result_fast) @skipIf(not (numba_installed), "numba not installed") def test_slow_vs_fast_unordered(self): """Test that slow and fast versions give same result when x is not sorted.""" shuffled_x = self.x.copy() np.random.shuffle(shuffled_x) result_slow = slow_interp_same_y(shuffled_x, self.xp, self.fp) result_fast = fast_interp_same_y(shuffled_x, self.xp, self.fp) np.testing.assert_allclose(result_slow, result_fast) @skipIf(not (numba_installed), "numba not installed") def test_slow_vs_fast_repeated(self): """Test that slow and fast versions give same result when rows of xp contain repeats.""" xp_repeat = self.xp.copy() xp_repeat[:, 51] = xp_repeat[:, 50] result_slow = slow_interp_same_y(self.x, xp_repeat, self.fp) result_fast = fast_interp_same_y(self.x, xp_repeat, self.fp) np.testing.assert_allclose(result_slow, result_fast) @skipIf(not (numba_installed), "numba not installed") def test_slow_vs_multi(self): """Test that slow interp gives same result as interpolate_multiple_rows_same_y.""" result_slow = slow_interp_same_y(self.x, self.xp, self.fp) result_multiple = interpolate_multiple_rows_same_y(self.x, self.xp, self.fp) np.testing.assert_allclose(result_slow, result_multiple) class TestInterpolateMultipleRowsSameX(IrisTest): """Test interpolate_multiple_rows""" def setUp(self): """Set up arrays.""" np.random.seed(0) self.x = np.arange(0, 1, 0.01) self.xp = np.sort(np.random.random_sample(100)) self.fp = np.random.random((100, 100)) def test_slow(self): """Test slow interp against known result.""" xp = np.array([0, 1, 2, 3, 4], dtype=np.float32) fp = np.array([[0, 0.5, 1, 1.5, 2], [0, 2, 4, 6, 8]], dtype=np.float32) x = np.array([-1, 0.5, 2], dtype=np.float32) expected = np.array([[0, 0.25, 1], [0, 1, 4]], dtype=np.float32) result = slow_interp_same_x(x, xp, fp) np.testing.assert_allclose(result, expected) @skipIf(not (numba_installed), "numba not installed") def test_fast(self): """Test fast interp against known result.""" xp = np.array([0, 1, 2, 3, 4], dtype=np.float32) fp = np.array([[0, 0.5, 1, 1.5, 2], [0, 2, 4, 6, 8]], dtype=np.float32) x = np.array([-1, 0.5, 2], dtype=np.float32) expected = np.array([[0, 0.25, 1], [0, 1, 4]], dtype=np.float32) result = fast_interp_same_x(x, xp, fp) np.testing.assert_allclose(result, expected) @skipIf(not (numba_installed), "numba not installed") def test_slow_vs_fast(self): """Test that slow and fast versions give same result.""" result_slow = slow_interp_same_x(self.x, self.xp, self.fp) result_fast = fast_interp_same_x(self.x, self.xp, self.fp) np.testing.assert_allclose(result_slow, result_fast) @skipIf(not (numba_installed), "numba not installed") def test_slow_vs_fast_unordered(self): """Test that slow and fast versions give same result when x is not sorted.""" shuffled_x = self.x.copy() np.random.shuffle(shuffled_x) result_slow = slow_interp_same_x(shuffled_x, self.xp, self.fp) result_fast = fast_interp_same_x(shuffled_x, self.xp, self.fp) np.testing.assert_allclose(result_slow, result_fast) @skipIf(not (numba_installed), "numba not installed") def test_slow_vs_fast_repeated(self): """Test that slow and fast versions give same result when xp contains repeats.""" repeat_xp = self.xp.copy() repeat_xp[51] = repeat_xp[50] result_slow = slow_interp_same_x(self.x, repeat_xp, self.fp) result_fast = fast_interp_same_x(self.x, repeat_xp, self.fp) np.testing.assert_allclose(result_slow, result_fast) @skipIf(not (numba_installed), "numba not installed") def test_slow_vs_multi(self): """Test that slow interp gives same result as interpolate_multiple_rows_same_x.""" result_slow = slow_interp_same_x(self.x, self.xp, self.fp) result_multiple = interpolate_multiple_rows_same_x(self.x, self.xp, self.fp) np.testing.assert_allclose(result_slow, result_multiple) @patch.dict("sys.modules", numba=None) @patch("improver.ensemble_copula_coupling.utilities.slow_interp_same_x") def test_slow_interp_same_x_called(self, interp_imp): """Test that slow_interp_same_x is called if numba is not installed.""" interpolate_multiple_rows_same_x( mock.sentinel.x, mock.sentinel.xp, mock.sentinel.fp ) interp_imp.assert_called_once_with( mock.sentinel.x, mock.sentinel.xp, mock.sentinel.fp ) @skipIf(not (numba_installed), "numba not installed") @patch("improver.ensemble_copula_coupling.numba_utilities.fast_interp_same_x") def test_fast_interp_same_x_called(self, interp_imp): """Test that fast_interp_same_x is called if numba is installed.""" interpolate_multiple_rows_same_x( mock.sentinel.x, mock.sentinel.xp, mock.sentinel.fp ) interp_imp.assert_called_once_with( mock.sentinel.x, mock.sentinel.xp, mock.sentinel.fp ) if __name__ == "__main__": unittest.main()
{"hexsha": "e08301419ba4e3ea152d3f4c509d9025fef13bb4", "size": 24272, "ext": "py", "lang": "Python", "max_stars_repo_path": "improver_tests/ensemble_copula_coupling/test_utilities.py", "max_stars_repo_name": "bjwheltor/improver", "max_stars_repo_head_hexsha": "21b21106f2a7376ee32cd01f47ea81bb770f56a9", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "improver_tests/ensemble_copula_coupling/test_utilities.py", "max_issues_repo_name": "bjwheltor/improver", "max_issues_repo_head_hexsha": "21b21106f2a7376ee32cd01f47ea81bb770f56a9", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "improver_tests/ensemble_copula_coupling/test_utilities.py", "max_forks_repo_name": "bjwheltor/improver", "max_forks_repo_head_hexsha": "21b21106f2a7376ee32cd01f47ea81bb770f56a9", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.5886287625, "max_line_length": 91, "alphanum_fraction": 0.6610085695, "include": true, "reason": "import numpy", "num_tokens": 5681}
""" OpenCV Canny Edge Detection : Edge detection is term where identify the boundary of object in image. """ # importing module import cv2 import numpy as np # image path img = cv2.imread("../images/1.jpeg") # canny edge detection edges = cv2.Canny(img, 100, 200) # display the image cv2.imshow("Edge detection image", edges) cv2.imshow("Original image", img) # waits until a key is pressed cv2.waitKey(0) # destroys the window showing image cv2.destroyAllWindows()
{"hexsha": "f7f926ed34066ddacdcd3afe5880a5e521b52bc1", "size": 480, "ext": "py", "lang": "Python", "max_stars_repo_path": "OpenCV2/Image_Processing/Canny_Edge_Detection.py", "max_stars_repo_name": "siddharth-143/Python", "max_stars_repo_head_hexsha": "293f4643a3a13e3b82d23fd8922db54dbb0f12bc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "OpenCV2/Image_Processing/Canny_Edge_Detection.py", "max_issues_repo_name": "siddharth-143/Python", "max_issues_repo_head_hexsha": "293f4643a3a13e3b82d23fd8922db54dbb0f12bc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "OpenCV2/Image_Processing/Canny_Edge_Detection.py", "max_forks_repo_name": "siddharth-143/Python", "max_forks_repo_head_hexsha": "293f4643a3a13e3b82d23fd8922db54dbb0f12bc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.2, "max_line_length": 74, "alphanum_fraction": 0.725, "include": true, "reason": "import numpy", "num_tokens": 121}
subroutine opandist (gsmObj, event, fisevent, nn) ! ====================================================================== ! ! Accumulate distribution of fission fragments' opening angles. ! nn is total number of produced neutrons. ! bf12 contains the fragment velocity vectors. ! ! Written by K. K. Gudima, December, 2004. ! Edited by A. J. Sierk, LANL T-16, January, 2005. ! Converted to separate SR by A. J. Sierk, LANL T-16, March, 2006. ! Edited by LMK, XCP-3, July 2013 (included error protection) ! ! ====================================================================== use, intrinsic:: iso_fortran_env, only: int32, real64 use gsm_params, only: one, radiantodegree implicit none class(GSM), intent(inout) :: gsmObj class(EventData), intent(in ) :: event logical, intent(in ) :: fisevent integer(int32), intent(in ) :: nn integer(int32) :: it12, jk real(real64) :: b1, b2, ct12, st12, t12, temp ! ====================================================================== real(real64) :: opan, dth12 common /fisopa/ opan(7,185), dth12 ! ====================================================================== if (.not.fisevent) return b1 = sqrt(event%fissFrag(1)%linearMomFrac(1)**2 + & & event%fissFrag(1)%linearMomFrac(2)**2 + & & event%fissFrag(1)%linearMomFrac(3)**2) b2 = sqrt(event%fissFrag(2)%linearMomFrac(1)**2 + & & event%fissFrag(2)%linearMomFrac(2)**2 + & & event%fissFrag(2)%linearMomFrac(3)**2) temp = b1*b2 if ( temp < div0Lim .and.temp > -div0Lim ) then temp = div0Lim write(gsmObj%io%message,1000) "44" call gsmObj%io%print(4, 3, gsmObj%io%message) end if ! Angle between the two fission fragments? ct12 = (event%fissFrag(1)%linearMomFrac(1) * & & event%fissFrag(2)%linearMomFrac(1) + & & event%fissFrag(1)%linearMomFrac(2) * & & event%fissFrag(2)%linearMomFrac(2) + & & event%fissFrag(1)%linearMomFrac(3) * & & event%fissFrag(2)%linearMomFrac(3)) / (temp) st12 = sqrt(abs(one - ct12**2)) t12 = atan2(st12,ct12)*radiantodegree if ( dth12 < div0Lim .and.dth12 > -div0Lim ) then dth12 = div0Lim write(gsmObj%io%message,1000) "52" call gsmObj%io%print(4, 3, gsmObj%io%message) end if it12 = int(t12/dth12) + 1 opan(1,it12) = opan(1,it12) + one opan(1,183) = opan(1,183) + t12 opan(1,184) = opan(1,184) + t12*t12 opan(1,185) = opan(1,185) + one if (nn <= 5) then ! n multiplictiy 0-5 jk = 2 elseif (nn <= 8) then ! n multiplictiy 6-8 jk = 3 elseif (nn <= 12) then ! n multiplictiy 9-12 jk = 4 elseif (nn <= 15) then ! n multiplictiy 13-15 jk = 5 elseif (nn <= 19) then ! n multiplictiy 16-19 jk = 6 elseif (nn >= 20) then ! n multiplicity 20-..... jk = 7 endif opan(jk,it12) = opan(jk,it12) + one opan(jk,183) = opan(jk,183) + t12 opan(jk,184) = opan(jk,184) + t12*t12 opan(jk,185) = opan(jk,185) + one return ! ====================================================================== 1000 format("Divide by zero error prevented in 'opandist.f90', line(s) ", A) ! ====================================================================== end subroutine opandist
{"hexsha": "cf384e0fbdfc2ce34f58196cb3f500de9b953022", "size": 3406, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/GeneralizedSpallation/tally/opandist.f90", "max_stars_repo_name": "lanl/generalized-spallation-model", "max_stars_repo_head_hexsha": "4a2f01a873d2e8f2304b8fd1474d43d1ce8d744d", "max_stars_repo_licenses": ["Intel", "Unlicense"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-24T18:05:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-24T18:05:48.000Z", "max_issues_repo_path": "src/GeneralizedSpallation/tally/opandist.f90", "max_issues_repo_name": "lanl/generalized-spallation-model", "max_issues_repo_head_hexsha": "4a2f01a873d2e8f2304b8fd1474d43d1ce8d744d", "max_issues_repo_licenses": ["Intel", "Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/GeneralizedSpallation/tally/opandist.f90", "max_forks_repo_name": "lanl/generalized-spallation-model", "max_forks_repo_head_hexsha": "4a2f01a873d2e8f2304b8fd1474d43d1ce8d744d", "max_forks_repo_licenses": ["Intel", "Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.1134020619, "max_line_length": 76, "alphanum_fraction": 0.5126247798, "num_tokens": 1167}
function V = spm_vol(P) % Get header information for images % FORMAT V = spm_vol(P) % P - a char or cell array of filenames % V - a structure array containing image volume information % The elements of the structures are: % V.fname - the filename of the image. % V.dim - the x, y and z dimensions of the volume % V.dt - A 1x2 array. First element is datatype (see spm_type). % The second is 1 or 0 depending on the endian-ness. % V.mat - a 4x4 affine transformation matrix mapping from % voxel coordinates to real world coordinates. % V.pinfo - plane info for each plane of the volume. % V.pinfo(1,:) - scale for each plane % V.pinfo(2,:) - offset for each plane % The true voxel intensities of the jth image are given % by: val*V.pinfo(1,j) + V.pinfo(2,j) % V.pinfo(3,:) - offset into image (in bytes). % If the size of pinfo is 3x1, then the volume is assumed % to be contiguous and each plane has the same scalefactor % and offset. %__________________________________________________________________________ % % The fields listed above are essential for the mex routines, but other % fields can also be incorporated into the structure. % % Note that spm_vol can also be applied to the filename(s) of 4-dim % volumes. In that case, the elements of V will point to a series of 3-dim % images. %__________________________________________________________________________ % Copyright (C) 1999-2014 Wellcome Trust Centre for Neuroimaging % John Ashburner % $Id: spm_vol.m 5958 2014-04-16 17:13:41Z guillaume $ if ~nargin V = struct('fname', {},... 'dim', {},... 'dt', {},... 'pinfo', {},... 'mat', {},... 'n', {},... 'descrip', {},... 'private', {}); elseif isempty(P) V = spm_vol; if iscell(P), V = {V}; end elseif isstruct(P) V = P; elseif iscell(P) V = cellfun(@spm_vol,P, 'UniformOutput',false); else V = spm_vol; cnt = 0; for i=1:size(P,1) v = spm_vol_hdr(deblank(P(i,:))); f = fieldnames(v); for j=1:numel(f) [V(cnt+1:cnt+size(v,2),1).(f{j})] = deal(v.(f{j})); end cnt = cnt + size(v,2); end end %========================================================================== % function V = spm_vol_hdr(p) %========================================================================== function V = spm_vol_hdr(p) [pth,nam,ext,n] = spm_fileparts(p); p = fullfile(pth,[nam ext]); n = str2num(n); if ~spm_existfile(p) error('File "%s" does not exist.', p); end switch ext case {'.nii','.NII'} % Do nothing case {'.img','.IMG'} if ~spm_existfile(fullfile(pth,[nam '.hdr'])) && ... ~spm_existfile(fullfile(pth,[nam '.HDR'])) error('File "%s" does not exist.', fullfile(pth,[nam '.hdr'])); end case {'.hdr','.HDR'} ext = '.img'; p = fullfile(pth,[nam ext]); if ~spm_existfile(p) error('File "%s" does not exist.', p); end case {'.gz','.GZ'} fprintf('Compressed NIfTI files are not supported.\n'); tmpname = tempname; try tmpname = char(gunzip(p,tmpname)); catch error('Cannot uncompress "%s".',p); end try if isempty(n), n = ''; else n = [',' num2str(n)]; end V = spm_vol_hdr([tmpname n]); for i=1:numel(V) V(i).dat = spm_read_vols(V(i)); V(i).private.dat.fname = spm_file(p,'ext',''); V(i).fname = p; V(i).dt(1) = 64; V(i).pinfo = [1 0 0]'; end catch warning('Cannot read uncompressed file "%s".',p); end spm_unlink(tmpname); rmdir(fileparts(tmpname)); return otherwise error('File "%s" is not of a recognised type.', p); end V = spm_vol_nifti(p,n); if isempty(n) && length(V.private.dat.dim) > 3 V0(1) = V; for i = 2:V.private.dat.dim(4) V0(i) = spm_vol_nifti(V.private, i); end V = V0; end
{"author": "fieldtrip", "repo": "fieldtrip", "sha": "c2039be598a02d86b39aae76bfa7aaa720f9801c", "save_path": "github-repos/MATLAB/fieldtrip-fieldtrip", "path": "github-repos/MATLAB/fieldtrip-fieldtrip/fieldtrip-c2039be598a02d86b39aae76bfa7aaa720f9801c/external/spm12/spm_vol.m"}
#!/usr/bin/env python import fvm import fvm.fvmbaseExt as fvmbaseExt import fvm.importers as importers import fvm.fvmparallel as fvmparallel import sys, time from numpy import * from mpi4py import MPI from FluentCase import FluentCase #fvmbaseExt.enableDebug("cdtor") fileBase = None numIterations = 10 fileBase = "/home/yildirim/memosa/src/fvm/test/cav_26_tri" #fileBase = "/home/yildirim/memosa/src/fvm/test/cav_44_tri" #fileBase = "/home/yildirim/memosa/src/fvm/test/tri_894" #fileBase = "/home/yildirim/memosa/src/fvm/test/cav_tri_915K" #fileBase = "/home/yildirim/memosa/src/fvm/test/cav_tri_3_66M" #fileBase = "/home/yildirim/memosa/src/fvm/test/test_tri" def usage(): print "Usage: %s filebase [outfilename]" % sys.argv[0] print "Where filebase.cas is a Fluent case file." print "Output will be in filebase-prism.dat if it is not specified." sys.exit(1) def advance(fmodel,niter): for i in range(0,niter): try: fmodel.advance(1) except KeyboardInterrupt: break def dumpMPITimeProfile(part_mesh_maxtime, part_mesh_mintime, solver_maxtime, solver_mintime): fname = "time_mpi_totalprocs" + str(MPI.COMM_WORLD.Get_size()) + ".dat" f = open(fname,'w') line = " part_mesh_mintime = " + str(part_mesh_mintime[0]) + "\n" + \ " part_mesh_maxtime = " + str(part_mesh_maxtime[0]) + "\n" + \ " solver_mintime = " + str(solver_mintime[0]) + "\n" + \ " solver_maxtime = " + str(solver_maxtime[0]) + "\n" print line f.write(line) f.close() def dumpTecplotFile(nmesh, meshes): #cell sites cellSites = [] for n in range(0,nmesh): cellSites.append( meshes[n].getCells() ) print "cellSites[", n, "].getCount = ", cellSites[n].getCount() #face sites faceSites = [] for n in range(0,nmesh): faceSites.append( meshes[n].getFaces() ) #node sites nodeSites = [] for n in range(0,nmesh): nodeSites.append( meshes[n].getNodes() ) #get connectivity (faceCells) faceCells = [] for n in range(0,nmesh): faceCells.append( meshes[n].getConnectivity( faceSites[n], cellSites[n] ) ) #get connectivity ( cellNodes ) cellNodes = [] for n in range(0,nmesh): cellNodes.append( meshes[n].getCellNodes() ) #get Volume as array volumes = [] for n in range(0,nmesh): volumes.append( geomFields.volume[cellSites[n]].asNumPyArray() ) cellCentroids =[] for n in range(0,nmesh): cellCentroids.append( geomFields.coordinate[cellSites[n]].asNumPyArray() ) velFields = [] for n in range(0,nmesh): velFields.append( thermalFields.temperature[cellSites[n]].asNumPyArray() ) coords = [] for n in range(0,nmesh): coords.append( meshes[n].getNodeCoordinates().asNumPyArray() ) print "shape( coords[", n, "] ) = ", shape( coords[n] ) file_name = "temp_proc" + str(MPI.COMM_WORLD.Get_rank()) + ".dat" f = open(file_name, 'w') f.write("Title = \" tecplot file for 2D Cavity problem \" \n") f.write("variables = \"x\", \"y\", \"z\", \"velX\", \"cellCentroidY\" \n") for n in range(0,nmesh): title_name = "nmesh" + str(n) ncell = cellSites[n].getSelfCount() nnode = nodeSites[n].getCount() zone_name = "Zone T = " + "\"" + title_name + "\"" + \ " N = " + str( nodeSites[n].getCount() ) + \ " E = " + str( ncell ) + \ " DATAPACKING = BLOCK, VARLOCATION = ([4-5]=CELLCENTERED), " + \ " ZONETYPE=FETRIANGLE \n" f.write( zone_name ) #write x for i in range(0,nnode): f.write(str(coords[n][i][0])+" ") if ( i % 5 == 4 ): f.write("\n") f.write("\n") #write y for i in range(0,nnode): f.write(str(coords[n][i][1])+" ") if ( i % 5 == 4 ): f.write("\n") f.write("\n") #write z for i in range(0,nnode): f.write(str(coords[n][i][2])+" ") if ( i % 5 == 4 ): f.write("\n") f.write("\n") #write velX for i in range(0,ncell): f.write( str(velFields[n][i]) + " ") if ( i % 5 == 4 ): f.write("\n") f.write("\n") #write velX for i in range(0,ncell): f.write( str(cellCentroids[n][i][1]) + " ") if ( i % 5 == 4 ): f.write("\n") f.write("\n") #connectivity for i in range(0,ncell): nnodes_per_cell = cellNodes[n].getCount(i) for node in range(0,nnodes_per_cell): f.write( str(cellNodes[n](i,node)+1) + " ") f.write("\n") f.write("\n") f.close() # change as needed #import debug reader = FluentCase(sys.argv[1]) reader.read() fluent_meshes = reader.getMeshList() nmesh = 1 import time t0 = time.time() #print "nmesh = ", nmesh #npart = fvmparallel.IntVector(1,nmesh) #total of distributed meshes #etype = fvmparallel.IntVector(1,1) #triangle npart = [MPI.COMM_WORLD.Get_size()] etype = [1] #partMesh constructor and setTypes #time profile for partmesh part_mesh_time = zeros(1,dtype='d') part_mesh_start = zeros(1, dtype='d') part_mesh_end = zeros(1, dtype='d') part_mesh_maxtime = zeros(1,dtype='d') part_mesh_mintime = zeros(1, dtype='d') part_mesh_start[0] = MPI.Wtime() part_mesh = fvmparallel.PartMesh( fluent_meshes, npart, etype ); part_mesh.setWeightType(0); part_mesh.setNumFlag(0); #print "nmesh = ", nmesh, "procID = ", MPI.COMM_WORLD.Get_rank() #actions part_mesh.partition() part_mesh.mesh() #part_mesh.mesh_debug() meshes = part_mesh.meshList() part_mesh_end[0] = MPI.Wtime() part_mesh_time[0] = part_mesh_end[0] - part_mesh_start[0] MPI.COMM_WORLD.Allreduce( [part_mesh_time,MPI.DOUBLE], [part_mesh_maxtime, MPI.DOUBLE], op=MPI.MAX) MPI.COMM_WORLD.Allreduce( [part_mesh_time,MPI.DOUBLE], [part_mesh_mintime, MPI.DOUBLE], op=MPI.MIN) target_id = int(0) group = fvmbaseExt.IntSet() for i in range(0,npart[0]): group.insert( int(i) ) mesh0 = meshes[0] cellSite = mesh0.getCells() cellSiteMerger = fvmbaseExt.StorageSiteMerger( 0, group, cellSite ) cellSiteMerger.debug_print()
{"hexsha": "58e507fa9e2942c5e6c4e824820878ca5baf4b79", "size": 6112, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/fvm/test/PARALLEL_TESTS/testMerger.py", "max_stars_repo_name": "drm42/fvm-drm", "max_stars_repo_head_hexsha": "c9b940e593034f1aa3020d63ff1e09ebef9c182a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/fvm/test/PARALLEL_TESTS/testMerger.py", "max_issues_repo_name": "drm42/fvm-drm", "max_issues_repo_head_hexsha": "c9b940e593034f1aa3020d63ff1e09ebef9c182a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/fvm/test/PARALLEL_TESTS/testMerger.py", "max_forks_repo_name": "drm42/fvm-drm", "max_forks_repo_head_hexsha": "c9b940e593034f1aa3020d63ff1e09ebef9c182a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.4080717489, "max_line_length": 100, "alphanum_fraction": 0.6194371728, "include": true, "reason": "from numpy", "num_tokens": 1903}
import cv2 import numpy as np from imutils.video import FileVideoStream vs=FileVideoStream('messi.webm').start() ball=cv2.imread('ball.png') Ball=ball.copy() ball=cv2.cvtColor(ball,cv2.COLOR_BGR2GRAY) ball=cv2.medianBlur(ball,5) ball=cv2.adaptiveThreshold(ball,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,3,5) while vs.more(): frame=vs.read() if frame is None: continue output=frame.copy() frame=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY) frame=cv2.medianBlur(frame,5) frame=cv2.adaptiveThreshold(frame,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,3,5) if frame is None: continue orb=cv2.ORB_create() kp1,des1=orb.detectAndCompute(ball,None) kp2,des2=orb.detectAndCompute(frame,None) bf=cv2.BFMatcher(cv2.NORM_HAMMING,crossCheck=True) if des1 is None or des2 is None: continue matches=bf.match(des1,des2) matches=sorted(matches,key=lambda x:x.distance) img=cv2.drawMatches(Ball,kp1,output,kp2,matches[:10],flags=2,outImg=output) cv2.imshow('result',img) cv2.waitKey(1) cv2.destroyAllWindows() vs.stop()
{"hexsha": "af004acf49fe9dbb6556de71bdf574aed536601f", "size": 1038, "ext": "py", "lang": "Python", "max_stars_repo_path": "OpenCV/5.2.py", "max_stars_repo_name": "Nivedya-27/Autumn-of-Automation", "max_stars_repo_head_hexsha": "2f645b58d035d6277f7ee0ff77814be812815f6d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "OpenCV/5.2.py", "max_issues_repo_name": "Nivedya-27/Autumn-of-Automation", "max_issues_repo_head_hexsha": "2f645b58d035d6277f7ee0ff77814be812815f6d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "OpenCV/5.2.py", "max_forks_repo_name": "Nivedya-27/Autumn-of-Automation", "max_forks_repo_head_hexsha": "2f645b58d035d6277f7ee0ff77814be812815f6d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.4375, "max_line_length": 92, "alphanum_fraction": 0.7870905588, "include": true, "reason": "import numpy", "num_tokens": 314}
#!/usr/bin/python # # Copyright (c) PhaseSpace, Inc 2019 # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # PHASESPACE, INC BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # import owl #import sys import time import matplotlib.pyplot as plt import matplotlib.animation as anim from mpl_toolkits.mplot3d import Axes3D #import numpy as np #import pandas as pd #SERVER = sys.argv[1] SERVER = "192.168.1.230" ''' print("Enter the duration of recording (seconds) ") r_duration = float(input()) print("Enter the recorded file name (ex. output01.txt) ") fn = input() ''' # instantiate context o = owl.Context() # connect to server with timeout of 10000000 microseconds o.open(SERVER, "timeout=10000000") # initialize session o.initialize("streaming=1") # main loop t1 = time.time() count = 0 evt = None def getLatestEvent(): global evt global o #print("getting latest event...") temp = None while o.isOpen() and o.property("initialized") and evt: temp = evt evt = o.nextEvent() evt = temp while not evt: evt = o.nextEvent(1000000) print("searching for event") fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.axes.set_xlim3d(left=-2000, right=2000) ax.axes.set_ylim3d(bottom=-2000, top=2000) ax.axes.set_zlim3d(bottom=-2000, top=2000) ax.set_autoscale_on(False) #while evt or (o.isOpen() and o.property("initialized")): def animate(i): global evt ax.cla() ax.axes.set_xlim3d(left=-2000, right=2000) ax.axes.set_ylim3d(bottom=-2000, top=2000) ax.axes.set_zlim3d(bottom=-2000, top=2000) ax.set_autoscale_on(False) ax.scatter(0,0,0) ax.text(0,0,0,"(0, 0, 0)") # poll for events with a timeout (microseconds) getLatestEvent() # nothing received, keep waiting if not evt: return # process event if evt.type_id == owl.Type.FRAME: # print markers if "markers" in evt: for m in evt.markers: ax.scatter(m.x, m.y, m.z) #print(str(m.x) + ", " + str(m.y) + ", " + str(m.z)) # print rigids ''' if "rigids" in evt: for r in evt.rigids: print(r) if count == 1: b = a else: b = np.vstack((b,a)) ''' elif evt.type_id == owl.Type.ERROR: # handle errors print(evt.name, evt.data) if evt.name == "fatal": return elif evt.name == "done": # done event is sent when master connection stops session print("done") return ani = anim.FuncAnimation(fig, animate, interval=0) #result_available.wait(0.020) # end main loop # end session #o.done() # close socket #o.close()
{"hexsha": "621759eeb90331ee3fc4cb591f7c92bb30c7f722", "size": 3227, "ext": "py", "lang": "Python", "max_stars_repo_path": "plottest.py", "max_stars_repo_name": "jonathanzxu/mocap-roboticarm", "max_stars_repo_head_hexsha": "957f77249766c1b40b8416a9236a2c60c734e3cd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-08-13T23:03:19.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-13T23:03:19.000Z", "max_issues_repo_path": "plottest.py", "max_issues_repo_name": "jonathanzxu/mocap-roboticarm", "max_issues_repo_head_hexsha": "957f77249766c1b40b8416a9236a2c60c734e3cd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "plottest.py", "max_forks_repo_name": "jonathanzxu/mocap-roboticarm", "max_forks_repo_head_hexsha": "957f77249766c1b40b8416a9236a2c60c734e3cd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.8916666667, "max_line_length": 456, "alphanum_fraction": 0.6052060738, "include": true, "reason": "import numpy", "num_tokens": 838}
classdef mme_xfmr3p_opf < mp.mme_xfmr3p % MATPOWER % Copyright (c) 2022, Power Systems Engineering Research Center (PSERC) % by Ray Zimmerman, PSERC Cornell % % This file is part of MATPOWER. % Covered by the 3-clause BSD License (see LICENSE file for details). % See https://matpower.org for more info. % properties % end methods function x0 = interior_x0(obj, mm, nm, dm, x0) end end %% methods end %% classdef
{"author": "MATPOWER", "repo": "matpower", "sha": "7da926d978824bf675a71e0a5cb91f8967f97007", "save_path": "github-repos/MATLAB/MATPOWER-matpower", "path": "github-repos/MATLAB/MATPOWER-matpower/matpower-7da926d978824bf675a71e0a5cb91f8967f97007/lib/+mp/mme_xfmr3p_opf.m"}
""" The :py:mod:`h2_fuel` module contains a class to read the required data and a class to evaluate the power-to-fuel system. """ import os import pandas as pd import numpy as np import pvlib class ReadData: """ This class enables to read data from the data files. Parameters ---------- filename_climate : str The directory of the file with information on the climate data. """ def __init__(self, filename_climate): self.filename_climate = filename_climate self.path = os.path.dirname(os.path.abspath(__file__)) def load_climate(self): """ This method loads the hourly solar irradiance data and ambient temperature data, situated in the 'sol_irr' and 'T_amb' columns of the climate data file. Returns ------- sol_irr : ndarray The hourly solar irradiance data for a Typical Meteorological Year. (8760 elements) t_amb : ndarray The hourly ambient temperature data for a Typical Meteorological Year. (8760 elements) """ data = pd.read_csv(self.filename_climate) sol_irr = data['sol_irr'].to_numpy() t_amb = data['T_amb'].to_numpy() return sol_irr, t_amb def load_parameters(self): """ This method loads the deterministic values of the model parameters, defined in the design_space file. This is useful when the deterministic performance of a specific design needs to be evaluated. Returns ------- param_dict : dict Dictionary with the names of the model parameters and the corresponding deterministic values. """ param_dict = {} design_space = os.path.join(self.path, 'design_space') # read the deterministic values for the parameters in `design_space` with open(design_space, 'r') as file: for line in file: tmp = line.split() if tmp[1] == 'par': param_dict[tmp[0]] = float(tmp[2]) return param_dict class Evaluation: """ This class evaluates the photovoltaic-electrolyzer system. For a given design, the solar irradiance, ambient temperature and the characterization of the model parameters, the levelized cost of hydrogen and the annual hydrogen production are quantified. Parameters ---------- sol_irr : ndarray The hourly solar irradiance for the evaluated year. t_amb : ndarray The hourly ambient temperature for the evaluated year. parameters : dict Dictionary with the model parameters and design variables values. """ def __init__(self, sol_irr, t_amb, par): self.par = par # the solar irradiance and ambient temperature are scaled with the # corresponding uncertainty self.sol_irr = sol_irr * self.par['u_sol_irr'] self.t_amb = t_amb + self.par['u_t_amb'] # the result dictionary self.res = {} # the system lifetime self.par['life_sys'] = 20. # the amount of hydrogen produced self.res['m_h2'] = 0. # initialize the pv power that is consumed by the electrolyzer array self.res['p_pv_consumed'] = 0. # initialize the operating hours of the electrolyzer array self.res['running_hours_pemel'] = 0. # the number of PEM electrolyzer cells, corresponding to the # nominal capacity of the considered PEM cell and the provided # PEM capacity self.n_pemel_array = self.par['n_pemel'] / 0.4 # generate the fitted polynomial on the electrolyzer array # power - current relation self.polyfit_pemel() ############################# # photovoltaic array module # ############################# def quantify_mpp(self, sol_irr, t_amb, pv_system): """ Quantify the maximum power of the photovoltaic array for a given solar irradiance and ambient temperature. Parameters ---------- sol_irr : float The solar irradiance [W/m2]. t_amb : float The ambient temperature [C]. pv_system : pandas.core.series.Series The pv system characteristics Returns ------- pmp : float The maximum power. """ # quantify the parameters for the pv system using De Soto method pv_inputs = pvlib.pvsystem.calcparams_desoto(sol_irr, t_amb, pv_system['alpha_sc'], pv_system['a_ref'], pv_system['I_L_ref'], pv_system['I_o_ref'], pv_system['R_sh_ref'], pv_system['R_s'], EgRef=1.121, dEgdT=-0.0002677, irrad_ref=1000., temp_ref=25.) # determine the maximum power for the given pv system pmp = pvlib.pvsystem.max_power_point(pv_inputs[0], pv_inputs[1], pv_inputs[2], pv_inputs[3], pv_inputs[4], method='newton')['p_mp'] return pmp def photovoltaic(self): """ The hourly photovoltaic power is quantified via the PVlib package. Using this package, first the characteristics for a typical photovoltaic panel are defined. Based on these characteristics, the maximum power point is quantified for each hour, based on the corresponding solar irradiance and ambient temperature. Finally, the hourly power production is scaled by the considered photovoltaic array capacity. """ p_pv = np.zeros(len(self.sol_irr)) # get the specific photovoltaic panel characteristics pv_database = pvlib.pvsystem.retrieve_sam('CECmod') pv_system = pv_database.SunPower_SPR_X19_240_BLK p_mpp_ref = self.quantify_mpp(1000., 25., pv_system) # W # maximum power point determination for each hour in the timeframe for i, irr in enumerate(self.sol_irr): if irr > 0.: p_mpp = self.quantify_mpp(irr, self.t_amb[i], pv_system) p_mpp_array = p_mpp / p_mpp_ref * self.par['n_pv'] p_pv[i] = min(p_mpp_array, self.par['n_dcdc_pv']) * 1e3 # W else: p_pv[i] = 0. # store the hourly pv power in the result dictionary self.res['p_pv'] = p_pv ############################# # electrolyzer array module # ############################# def pemel(self, i_pemel): """ The electrolyzer model, based on the work of Saeed et al. [1]. For a given current, the model determines the operating voltage by considering the activation, concentration and ohmic overpotentials. The model quantifies the operating voltage, power, efficiency and hydrogen production. [1] Saeed, E. W., & Warkozek, E. G. (2015). Modeling and Analysis of Renewable PEM Fuel Cell System. Energy Procedia, 74, 87–101. https://doi.org/10.1016/j.egypro.2015.07.527 Parameters ---------- i_pemel : float The electrolyzer input current [A]. Returns ------- res : dict Dictionary with the operating conditions of the electrolyzer for a given current. It contains items on the operating voltage, power, efficiency and hydrogen mass flow rate. """ par_pemel = {'T': 353., 'a': 1., 'p_o2': 1., 'p_h2': 1., 'p_h2o': 1., 'i_L': 2., 'A': 100., 'i_0': 1e-4, 'n': 2., 't_mem': 50e-4, 'alpha': 0.3, 'R': 8.3143, 'F': 96485., 'HHV': 141.7e6, } res = {} i = i_pemel / par_pemel['A'] # minimum operating voltage of electrolyzer e_0 = (1.48 - 0.85e-3 * (par_pemel['T'] - 298.15) + 4.3085e-5 * par_pemel['T'] * np.log(par_pemel['p_h2'] * np.sqrt(par_pemel['p_o2']) / par_pemel['p_h2o'])) # activation overpotential v_act = (np.log(i / par_pemel['i_0']) / (par_pemel['alpha'] * par_pemel['n'] * par_pemel['F']) * par_pemel['R'] * par_pemel['T']) # ohmic overpotential lambda_mem = (0.043 + 17.81 * par_pemel['a'] - 39.85 * par_pemel['a']**2. + 36. * par_pemel['a']**3.) sigma_mem = ((0.005139 * lambda_mem - 0.00326) * np.exp(1268 * (1. / 303. - 1. / par_pemel['T']))) v_ohm = i * par_pemel['t_mem'] / sigma_mem # the concentration overpotential v_con = - (par_pemel['R'] * par_pemel['T'] / (par_pemel['n'] * par_pemel['F']) * np.log(1. - i / par_pemel['i_L'])) # model outputs res['v_pemel'] = (e_0 + v_act + v_ohm + v_con) * self.n_pemel_array res['m_pemel'] = self.current_to_mh2(i_pemel) * self.n_pemel_array res['p_pemel'] = i_pemel * res['v_pemel'] res['eff_pemel'] = (res['m_pemel'] * par_pemel['HHV'] / (res['p_pemel'] * 3600.)) return res def current_to_mh2(self, current): """ When current is provided, this function determines the corresponding hydrogen mass flow rate per hour. Parameters ---------- current : float The electrolyzer input current [A]. Returns ------- m_h2 : float The produced hydrogen mass flow rate [kg/h]. """ far_cons = 96485. m_h2 = current / (2. * far_cons) * 2.02e-3 * 3600. return m_h2 def polyfit_pemel(self): """ The electrolyzer stack is evaluated over a range of input currents. Following these evaluations, a polynomial is fitted on the power - current relation of the electrolyzer. This polynomial enables to rapidly determine the input current when a certain amount of power is available. Since this relation is fairly linear, the polynomial should reach good agreement with the actual power - current relation, while maintaining the level of fidelity of the actual model. """ # evaluate the electrolyzer stack for a set of currents i_list = np.arange(start=3, stop=200, step=4) p_pemel = np.zeros(len(i_list)) for index, i in enumerate(i_list): res = self.pemel(i) p_pemel[index] = res['p_pemel'] # generate a polynomial fitted on the power - current points self.p_to_i_pemel = polyfit_func(p_pemel, i_list) def charge_pemel(self, p_pemel): """ For a given power supplied to the electrolyzer, this function determines the actual hydrogen produced. First, the method evaluates if the power supplied lies within the operating bounds of the electrolyzer stack. If the power is situated below the lower limit, the electrolyzer does not run. Instead, when the power is situated above the upper limit, the electrolyzer operates at nominal conditions. At nominal conditions, the current is known and the hydrogen mass flow rate is quantified. Otherwise, the input current is determined through the fitted polynomial on the power - current relation of the electrolyzer stack. As the power is an output from the electrolyzer model, considering this polynomial avoids the use of root finding methods and is therefore more robust in optimization and uncertainty quantification approaches. Finally, when hydrogen is produced, the running hours of the electrolyzer stack is increased by 1 and the hydrogen mass flow rate is returned. Parameters ---------- p_pemel : float The power available for electrolysis [W]. Returns ------- m_h2 : float The produced hydrogen mass flow rate [kg/h]. """ # the operating bounds op_lower_lim = self.par['n_pemel'] * 10. op_upper_lim = self.par['n_pemel'] * 1e3 # check if power is higher than the lowest operating point if p_pemel > op_lower_lim: # if the power exceeds the upper bound, operate at upper bound p_pemel_applied = min(p_pemel, op_upper_lim) # current at this applied power i_pemel = self.p_to_i_pemel(p_pemel_applied) m_h2 = self.current_to_mh2(i_pemel) * self.n_pemel_array self.res['p_pv_consumed'] += p_pemel_applied # increase the operating hours by 1 self.res['running_hours_pemel'] += 1. # no hydrogen production when the power falls outside the operating # bounds else: m_h2 = 0. return m_h2 ##################### # evaluation module # ##################### def evaluation(self): """ This is the main method of the Evaluation class. In this method, the hourly photovoltaic power is quantified first. Then, for each hour, the hydrogen is determined. Finally, the electrolyzer lifetime and the system cost are determined. """ # get the hourly photovoltaic array power self.photovoltaic() # evaluate hourly hydrogen production for p_pv in self.res['p_pv']: self.res['m_h2'] += self.charge_pemel(p_pv) # determine the electrolyzer lifetime self.lifetime() # determine the system cost and levelized cost of hydrogen self.cost() def lifetime(self): """ The lifetime method determines the lifetime of the electrolyzer array, based on the number of operating hours during the evaluated year. """ # lifetime of the electrolyzer array if self.res['running_hours_pemel'] == 0.: self.res['life_pemel'] = 1e8 else: self.res['life_pemel'] = (self.par['life_pemel'] / self.res['running_hours_pemel']) def cost(self): """ Based on the capital recovery factor, the CAPEX, OPEX and replacement cost of the system components, the levelized cost of hydrogen is determined. The formula for the annualized system cost is adopted from Zakeri et al. [2]. [2] Zakeri, B., & Syri, S. (2015). Electrical energy storage systems: A comparative life cycle cost analysis. Renewable and Sustainable Energy Reviews, 42, 569–596. https://doi.org/10.1016/j.rser.2014.10.011 """ # the capital recovery factor inv_rate = ((self.par['int_rate'] - self.par['infl_rate']) / (1. + self.par['infl_rate'])) crf = (((1. + inv_rate)**self.par['life_sys'] - 1.) / (inv_rate * (1. + inv_rate)**self.par['life_sys']))**(-1) # annual cost of photovoltaic array and DC-DC converter pv_cost = self.par['n_pv'] * (crf * self.par['capex_pv'] + self.par['opex_pv']) pv_dcdc_cost = self.par['n_dcdc_pv'] * (self.par['capex_dcdc'] * (crf + self.par['opex_dcdc'])) components_cost = pv_cost + pv_dcdc_cost # annual cost of electrolyzer array pemel_cost = self.par['n_pemel'] * (self.par['capex_pemel'] * (crf + self.par['opex_pemel'])) components_cost += pemel_cost # annual replacement cost of the electrolyzer array arc = crf * sum([(1. + inv_rate)**(-(i + 1.) * self.res['life_pemel']) * self.par['n_pemel'] * self.par['repl_pemel'] * self.par['capex_pemel'] for i in range(int(self.par['life_sys'] / self.res['life_pemel']))]) # the levelized cost of hydrogen cost = arc + components_cost if self.res['m_h2'] < 1e-5: self.res['lcoh'] = 1e8 else: self.res['lcoh'] = cost / self.res['m_h2'] def print_results(self): """ This method prints the levelized cost of hydrogen, the hydrogen production, the annual energy produced by the photovoltaic array and the energy consumed by the electrolyzer array. """ print('outputs:') print('LCOH:'.ljust(30) + '%.2f euro/kg' % self.res['lcoh']) print('m_h2:'.ljust(30) + '%.2f kg' % self.res['m_h2']) print('PV electricity generated:'.ljust(30) + '%.2f MWh' % (sum(self.res['p_pv']) / 1e6)) print( 'PV electricity consumed:'.ljust(30) + '%.2f MWh' % (self.res['p_pv_consumed'] / 1e6)) print('self-consumption ratio:'.ljust(30) + '%.2f %%' % (1e2 * self.res['p_pv_consumed'] / sum(self.res['p_pv']))) print('lifetime electrolyzer:'.ljust(30) + '%.2f year' % self.res['life_pemel']) def polyfit_func(x_in, y_in, threshold=0.99999999): """ The function fits a polynomial to the points of x_in and y_in. The polynomial starts with order 1. To evaluate its performance, the R-squared performance indicator is quantified. If the value for R-squared does not reach the defined threshold, the polynomial order is increased and the polynomial is fitted again on the points, until the threshold is satisfied. Once satisfied, the function returns the polynomial. Parameters ---------- x_in : ndarray The x-coordinates for the sample points. y_in : ndarray The y-coordinates for the sample points. threshold : float, optional The threshold for the R-squared parameter. The default is 0.99999. Returns ------- poly_func : numpy.poly1d A one-dimensional polynomial. """ order = 0 r_squared = 0. while r_squared < threshold: order += 1 # the polynomial poly_coeff = np.polyfit(x_in, y_in, order) poly_func = np.poly1d(poly_coeff) # r-squared yhat = poly_func(x_in) ybar = np.sum(y_in) / len(y_in) ssreg = np.sum((yhat - ybar)**2.) sstot = np.sum((y_in - ybar)**2.) r_squared = ssreg / sstot return poly_func
{"hexsha": "0a94dee890f7756946beb4979301126f3782f582", "size": 19587, "ext": "py", "lang": "Python", "max_stars_repo_path": "rheia/CASES/H2_FUEL/h2_fuel.py", "max_stars_repo_name": "Tsiri/RHEIA", "max_stars_repo_head_hexsha": "a7bacd72e5515242e78ee413f9e8959ab4f1115d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-29T08:26:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T08:26:49.000Z", "max_issues_repo_path": "rheia/CASES/H2_FUEL/h2_fuel.py", "max_issues_repo_name": "Tsiri/RHEIA", "max_issues_repo_head_hexsha": "a7bacd72e5515242e78ee413f9e8959ab4f1115d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rheia/CASES/H2_FUEL/h2_fuel.py", "max_forks_repo_name": "Tsiri/RHEIA", "max_forks_repo_head_hexsha": "a7bacd72e5515242e78ee413f9e8959ab4f1115d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-25T18:55:10.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-25T18:55:10.000Z", "avg_line_length": 35.6127272727, "max_line_length": 79, "alphanum_fraction": 0.5519477204, "include": true, "reason": "import numpy", "num_tokens": 4580}
import numpy as np from numpy.fft import fft2, ifft2, ifftshift from scipy.sparse import spdiags, eye as speye from scipy.sparse.linalg import spsolve from menpofit.math.fft_utils import pad, crop def mosse(X, y, l=0.01, boundary='constant', crop_filter=True): r""" Minimum Output Sum of Squared Errors (MOSSE) filter. Parameters ---------- X : ``(n_images, n_channels, image_h, image_w)`` `ndarray` The training images. y : ``(1, response_h, response_w)`` `ndarray` The desired response. l : `float`, optional Regularization parameter. boundary : ``{'constant', 'symmetric'}``, optional Determines how the image is padded. crop_filter : `bool`, optional If ``True``, the shape of the MOSSE filter is the same as the shape of the desired response. If ``False``, the filter's shape is equal to: ``X[0].shape + y.shape - 1`` Returns ------- f : ``(1, response_h, response_w)`` `ndarray` Minimum Output Sum od Squared Errors (MOSSE) filter associated to the training images. sXY : ``(N,)`` `ndarray` The auto-correlation array, where ``N = (image_h+response_h-1) * (image_w+response_w-1) * n_channels``. sXX : ``(N, N)`` `ndarray` The cross-correlation array, where ``N = (image_h+response_h-1) * (image_w+response_w-1) * n_channels``. References ---------- .. [1] D. S. Bolme, J. R. Beveridge, B. A. Draper, and Y. M. Lui. "Visual Object Tracking using Adaptive Correlation Filters", IEEE Proceedings of International Conference on Computer Vision and Pattern Recognition (CVPR), 2010. """ # number of images, number of channels, height and width n, k, hx, wx = X.shape # height and width of desired responses _, hy, wy = y.shape y_shape = (hy, wy) # extended shape ext_h = hx + hy - 1 ext_w = wx + wy - 1 ext_shape = (ext_h, ext_w) # extend desired response ext_y = pad(y, ext_shape) # fft of extended desired response fft_ext_y = fft2(ext_y) # auto and cross spectral energy matrices sXX = 0 sXY = 0 # for each training image and desired response for x in X: # extend image ext_x = pad(x, ext_shape, boundary=boundary) # fft of extended image fft_ext_x = fft2(ext_x) # update auto and cross spectral energy matrices sXX += fft_ext_x.conj() * fft_ext_x sXY += fft_ext_x.conj() * fft_ext_y # compute desired correlation filter fft_ext_f = sXY / (sXX + l) # reshape extended filter to extended image shape fft_ext_f = fft_ext_f.reshape((k, ext_h, ext_w)) # compute extended filter inverse fft f = np.real(ifftshift(ifft2(fft_ext_f), axes=(-2, -1))) if crop_filter: # crop extended filter to match desired response shape f = crop(f, y_shape) return f, sXY, sXX def imosse(A, B, n_ab, X, y, l=0.01, boundary='constant', crop_filter=True, f=1.0): r""" Incremental Minimum Output Sum of Squared Errors (iMOSSE) filter. Parameters ---------- A : ``(N,)`` `ndarray` The current auto-correlation array, where ``N = (patch_h+response_h-1) * (patch_w+response_w-1) * n_channels``. B : ``(N, N)`` `ndarray` The current cross-correlation array, where ``N = (patch_h+response_h-1) * (patch_w+response_w-1) * n_channels``. n_ab : `int` The current number of images. X : ``(n_images, n_channels, image_h, image_w)`` `ndarray` The training images (patches). y : ``(1, response_h, response_w)`` `ndarray` The desired response. l : `float`, optional Regularization parameter. boundary : ``{'constant', 'symmetric'}``, optional Determines how the image is padded. crop_filter : `bool`, optional If ``True``, the shape of the MOSSE filter is the same as the shape of the desired response. If ``False``, the filter's shape is equal to: ``X[0].shape + y.shape - 1`` f : ``[0, 1]`` `float`, optional Forgetting factor that weights the relative contribution of new samples vs old samples. If ``1.0``, all samples are weighted equally. If ``<1.0``, more emphasis is put on the new samples. Returns ------- f : ``(1, response_h, response_w)`` `ndarray` Minimum Output Sum od Squared Errors (MOSSE) filter associated to the training images. sXY : ``(N,)`` `ndarray` The auto-correlation array, where ``N = (image_h+response_h-1) * (image_w+response_w-1) * n_channels``. sXX : ``(N, N)`` `ndarray` The cross-correlation array, where ``N = (image_h+response_h-1) * (image_w+response_w-1) * n_channels``. References ---------- .. [1] D. S. Bolme, J. R. Beveridge, B. A. Draper, and Y. M. Lui. "Visual Object Tracking using Adaptive Correlation Filters", IEEE Proceedings of International Conference on Computer Vision and Pattern Recognition (CVPR), 2010. """ # number of images; number of channels, height and width n_x, k, hz, wz = X.shape # height and width of desired responses _, hy, wy = y.shape y_shape = (hy, wy) # multiply the number of samples used to produce the auto and cross # spectral energy matrices A and B by forgetting factor n_ab *= f # total number of samples n = n_ab + n_x # compute weighting factors nu_ab = n_ab / n nu_x = n_x / n # extended shape ext_h = hz + hy - 1 ext_w = wz + wy - 1 ext_shape = (ext_h, ext_w) # extend desired response ext_y = pad(y, ext_shape) # fft of extended desired response fft_ext_y = fft2(ext_y) # extend images ext_X = pad(X, ext_shape, boundary=boundary) # auto and cross spectral energy matrices sXX = 0 sXY = 0 # for each training image and desired response for ext_x in ext_X: # fft of extended image fft_ext_x = fft2(ext_x) # update auto and cross spectral energy matrices sXX += fft_ext_x.conj() * fft_ext_x sXY += fft_ext_x.conj() * fft_ext_y # combine old and new auto and cross spectral energy matrices sXY = nu_ab * A + nu_x * sXY sXX = nu_ab * B + nu_x * sXX # compute desired correlation filter fft_ext_f = sXY / (sXX + l) # reshape extended filter to extended image shape fft_ext_f = fft_ext_f.reshape((k, ext_h, ext_w)) # compute filter inverse fft f = np.real(ifftshift(ifft2(fft_ext_f), axes=(-2, -1))) if crop_filter: # crop extended filter to match desired response shape f = crop(f, y_shape) return f, sXY, sXX def mccf(X, y, l=0.01, boundary='constant', crop_filter=True): r""" Multi-Channel Correlation Filter (MCCF). Parameters ---------- X : ``(n_images, n_channels, image_h, image_w)`` `ndarray` The training images. y : ``(1, response_h, response_w)`` `ndarray` The desired response. l : `float`, optional Regularization parameter. boundary : ``{'constant', 'symmetric'}``, optional Determines how the image is padded. crop_filter : `bool`, optional If ``True``, the shape of the MOSSE filter is the same as the shape of the desired response. If ``False``, the filter's shape is equal to: ``X[0].shape + y.shape - 1`` Returns ------- f : ``(1, response_h, response_w)`` `ndarray` Multi-Channel Correlation Filter (MCCF) filter associated to the training images. sXY : ``(N,)`` `ndarray` The auto-correlation array, where ``N = (image_h+response_h-1) * (image_w+response_w-1) * n_channels``. sXX : ``(N, N)`` `ndarray` The cross-correlation array, where ``N = (image_h+response_h-1) * (image_w+response_w-1) * n_channels``. References ---------- .. [1] H. K. Galoogahi, T. Sim, and Simon Lucey. "Multi-Channel Correlation Filters". IEEE Proceedings of International Conference on Computer Vision (ICCV), 2013. """ # number of images; number of channels, height and width n, k, hx, wx = X.shape # height and width of desired responses _, hy, wy = y.shape y_shape = (hy, wy) # extended shape ext_h = hx + hy - 1 ext_w = wx + wy - 1 ext_shape = (ext_h, ext_w) # extended dimensionality ext_d = ext_h * ext_w # extend desired response ext_y = pad(y, ext_shape) # fft of extended desired response fft_ext_y = fft2(ext_y) # extend images ext_X = pad(X, ext_shape, boundary=boundary) # auto and cross spectral energy matrices sXX = 0 sXY = 0 # for each training image and desired response for ext_x in ext_X: # fft of extended image fft_ext_x = fft2(ext_x) # store extended image fft as sparse diagonal matrix diag_fft_x = spdiags(fft_ext_x.reshape((k, -1)), -np.arange(0, k) * ext_d, ext_d * k, ext_d).T # vectorize extended desired response fft diag_fft_y = fft_ext_y.ravel() # update auto and cross spectral energy matrices sXX += diag_fft_x.conj().T.dot(diag_fft_x) sXY += diag_fft_x.conj().T.dot(diag_fft_y) # solve ext_d independent k x k linear systems (with regularization) # to obtain desired extended multi-channel correlation filter fft_ext_f = spsolve(sXX + l * speye(sXX.shape[-1]), sXY) # reshape extended filter to extended image shape fft_ext_f = fft_ext_f.reshape((k, ext_h, ext_w)) # compute filter inverse fft f = np.real(ifftshift(ifft2(fft_ext_f), axes=(-2, -1))) if crop_filter: # crop extended filter to match desired response shape f = crop(f, y_shape) return f, sXY, sXX def imccf(A, B, n_ab, X, y, l=0.01, boundary='constant', crop_filter=True, f=1.0): r""" Incremental Multi-Channel Correlation Filter (MCCF) Parameters ---------- A : ``(N,)`` `ndarray` The current auto-correlation array, where ``N = (patch_h+response_h-1) * (patch_w+response_w-1) * n_channels``. B : ``(N, N)`` `ndarray` The current cross-correlation array, where ``N = (patch_h+response_h-1) * (patch_w+response_w-1) * n_channels``. n_ab : `int` The current number of images. X : ``(n_images, n_channels, image_h, image_w)`` `ndarray` The training images (patches). y : ``(1, response_h, response_w)`` `ndarray` The desired response. l : `float`, optional Regularization parameter. boundary : ``{'constant', 'symmetric'}``, optional Determines how the image is padded. crop_filter : `bool`, optional If ``True``, the shape of the MOSSE filter is the same as the shape of the desired response. If ``False``, the filter's shape is equal to: ``X[0].shape + y.shape - 1`` f : ``[0, 1]`` `float`, optional Forgetting factor that weights the relative contribution of new samples vs old samples. If ``1.0``, all samples are weighted equally. If ``<1.0``, more emphasis is put on the new samples. Returns ------- f : ``(1, response_h, response_w)`` `ndarray` Multi-Channel Correlation Filter (MCCF) filter associated to the training images. sXY : ``(N,)`` `ndarray` The auto-correlation array, where ``N = (image_h+response_h-1) * (image_w+response_w-1) * n_channels``. sXX : ``(N, N)`` `ndarray` The cross-correlation array, where ``N = (image_h+response_h-1) * (image_w+response_w-1) * n_channels``. References ---------- .. [1] D. S. Bolme, J. R. Beveridge, B. A. Draper, and Y. M. Lui. "Visual Object Tracking using Adaptive Correlation Filters", IEEE Proceedings of International Conference on Computer Vision and Pattern Recognition (CVPR), 2010. .. [2] H. K. Galoogahi, T. Sim, and Simon Lucey. "Multi-Channel Correlation Filters". IEEE Proceedings of International Conference on Computer Vision (ICCV), 2013. """ # number of images; number of channels, height and width n_x, k, hz, wz = X.shape # height and width of desired responses _, hy, wy = y.shape y_shape = (hy, wy) # multiply the number of samples used to produce the auto and cross # spectral energy matrices A and B by forgetting factor n_ab *= f # total number of samples n = n_ab + n_x # compute weighting factors nu_ab = n_ab / n nu_x = n_x / n # extended shape ext_h = hz + hy - 1 ext_w = wz + wy - 1 ext_shape = (ext_h, ext_w) # extended dimensionality ext_d = ext_h * ext_w # extend desired response ext_y = pad(y, ext_shape) # fft of extended desired response fft_ext_y = fft2(ext_y) # extend images ext_X = pad(X, ext_shape, boundary=boundary) # auto and cross spectral energy matrices sXX = 0 sXY = 0 # for each training image and desired response for ext_x in ext_X: # fft of extended image fft_ext_x = fft2(ext_x) # store extended image fft as sparse diagonal matrix diag_fft_x = spdiags(fft_ext_x.reshape((k, -1)), -np.arange(0, k) * ext_d, ext_d * k, ext_d).T # vectorize extended desired response fft diag_fft_y = fft_ext_y.ravel() # update auto and cross spectral energy matrices sXX += diag_fft_x.conj().T.dot(diag_fft_x) sXY += diag_fft_x.conj().T.dot(diag_fft_y) # combine old and new auto and cross spectral energy matrices sXY = nu_ab * A + nu_x * sXY sXX = nu_ab * B + nu_x * sXX # solve ext_d independent k x k linear systems (with regularization) # to obtain desired extended multi-channel correlation filter fft_ext_f = spsolve(sXX + l * speye(sXX.shape[-1]), sXY) # reshape extended filter to extended image shape fft_ext_f = fft_ext_f.reshape((k, ext_h, ext_w)) # compute filter inverse fft f = np.real(ifftshift(ifft2(fft_ext_f), axes=(-2, -1))) if crop_filter: # crop extended filter to match desired response shape f = crop(f, y_shape) return f, sXY, sXX
{"hexsha": "2b80d9e1b419f80ff46311e072543bc34e8aa2a5", "size": 14354, "ext": "py", "lang": "Python", "max_stars_repo_path": "DeepAlignmentNetwork/menpofit/math/correlationfilter.py", "max_stars_repo_name": "chiawei-liu/DeepAlignmentNetwork", "max_stars_repo_head_hexsha": "52621cd2f697abe372b88c9ea0ee08f0d93b43d8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 220, "max_stars_repo_stars_event_min_datetime": "2019-09-01T01:52:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T12:52:07.000Z", "max_issues_repo_path": "DeepAlignmentNetwork/menpofit/math/correlationfilter.py", "max_issues_repo_name": "chiawei-liu/DeepAlignmentNetwork", "max_issues_repo_head_hexsha": "52621cd2f697abe372b88c9ea0ee08f0d93b43d8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 80, "max_issues_repo_issues_event_min_datetime": "2015-01-05T16:17:39.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-22T13:42:00.000Z", "max_forks_repo_path": "DeepAlignmentNetwork/menpofit/math/correlationfilter.py", "max_forks_repo_name": "chiawei-liu/DeepAlignmentNetwork", "max_forks_repo_head_hexsha": "52621cd2f697abe372b88c9ea0ee08f0d93b43d8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 64, "max_forks_repo_forks_event_min_datetime": "2015-02-02T15:11:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-28T06:19:31.000Z", "avg_line_length": 34.5879518072, "max_line_length": 78, "alphanum_fraction": 0.6179462171, "include": true, "reason": "import numpy,from numpy,from scipy", "num_tokens": 4009}
import numpy as np import torch from torch import nn class GaussianHeadWithStateIndependentCovariance(nn.Module): """Gaussian head with state-independent learned covariance. This link is intended to be attached to a neural network that outputs the mean of a Gaussian policy. The only learnable parameter this link has determines the variance in a state-independent way. State-independent parameterization of the variance of a Gaussian policy is often used with PPO and TRPO, e.g., in https://arxiv.org/abs/1709.06560. Args: action_size (int): Number of dimensions of the action space. var_type (str): Type of parameterization of variance. It must be 'spherical' or 'diagonal'. var_func (callable): Callable that computes the variance from the var parameter. It should always return positive values. var_param_init (float): Initial value the var parameter. """ def __init__( self, action_size, var_type="spherical", var_func=nn.functional.softplus, var_param_init=0, ): super().__init__() self.var_func = var_func var_size = {"spherical": 1, "diagonal": action_size}[var_type] self.var_param = nn.Parameter( torch.tensor( np.broadcast_to(var_param_init, var_size), dtype=torch.float, ) ) def forward(self, mean): """Return a Gaussian with given mean. Args: mean (torch.Tensor or ndarray): Mean of Gaussian. Returns: torch.distributions.Distribution: Gaussian whose mean is the mean argument and whose variance is computed from the parameter of this link. """ var = self.var_func(self.var_param) return torch.distributions.Independent( torch.distributions.Normal(loc=mean, scale=torch.sqrt(var)), 1 ) class GaussianHeadWithDiagonalCovariance(nn.Module): """Gaussian head with diagonal covariance. This module is intended to be attached to a neural network that outputs a vector that is twice the size of an action vector. The vector is split and interpreted as the mean and diagonal covariance of a Gaussian policy. Args: var_func (callable): Callable that computes the variance from the second input. It should always return positive values. """ def __init__(self, var_func=nn.functional.softplus): super().__init__() self.var_func = var_func def forward(self, mean_and_var): """Return a Gaussian with given mean and diagonal covariance. Args: mean_and_var (torch.Tensor): Vector that is twice the size of an action vector. Returns: torch.distributions.Distribution: Gaussian distribution with given mean and diagonal covariance. """ assert mean_and_var.ndim == 2 mean, pre_var = mean_and_var.chunk(2, dim=1) scale = self.var_func(pre_var).sqrt() return torch.distributions.Independent( torch.distributions.Normal(loc=mean, scale=scale), 1 ) class GaussianHeadWithFixedCovariance(nn.Module): """Gaussian head with fixed covariance. This module is intended to be attached to a neural network that outputs the mean of a Gaussian policy. Its covariance is fixed to a diagonal matrix with a given scale. Args: scale (float): Scale parameter. """ def __init__(self, scale=1): super().__init__() self.scale = scale def forward(self, mean): """Return a Gaussian with given mean. Args: mean (torch.Tensor): Batch of mean vectors. Returns: torch.distributions.Distribution: Multivariate Gaussian whose mean is the mean argument and whose scale is fixed. """ return torch.distributions.Independent( torch.distributions.Normal(loc=mean, scale=self.scale), 1 )
{"hexsha": "3faa32f5d73355e7535d488702b6391a6c98ea77", "size": 4099, "ext": "py", "lang": "Python", "max_stars_repo_path": "pfrl/policies/gaussian_policy.py", "max_stars_repo_name": "ummavi/pfrl-1", "max_stars_repo_head_hexsha": "e856a7cca30fcc3871024cdf7522d066006a5f0c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 824, "max_stars_repo_stars_event_min_datetime": "2020-07-29T00:30:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T13:12:34.000Z", "max_issues_repo_path": "pfrl/policies/gaussian_policy.py", "max_issues_repo_name": "ummavi/pfrl-1", "max_issues_repo_head_hexsha": "e856a7cca30fcc3871024cdf7522d066006a5f0c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 124, "max_issues_repo_issues_event_min_datetime": "2020-07-30T01:53:47.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-15T07:05:38.000Z", "max_forks_repo_path": "pfrl/policies/gaussian_policy.py", "max_forks_repo_name": "ummavi/pfrl-1", "max_forks_repo_head_hexsha": "e856a7cca30fcc3871024cdf7522d066006a5f0c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 122, "max_forks_repo_forks_event_min_datetime": "2020-07-29T04:33:35.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-13T13:29:41.000Z", "avg_line_length": 33.0564516129, "max_line_length": 79, "alphanum_fraction": 0.6460112222, "include": true, "reason": "import numpy", "num_tokens": 846}
"""Logic for alerting the user on possibly problematic patterns in the data (e.g. high number of zeros , constant values, high correlations).""" from enum import Enum, unique from typing import List, Union import warnings from contextlib import suppress import re from dateutil.parser import parse import numpy as np from ..model.correlations import perform_check_correlation from ..config import config from ..model.base import Variable @unique class MessageType(Enum): """Message Types""" CONSTANT = 1 """This variable has a constant value.""" ZEROS = 2 """This variable contains zeros.""" HIGH_CORRELATION = 3 """This variable is highly correlated.""" RECODED = 4 """This variable is correlated (categorical).""" HIGH_CARDINALITY = 5 """This variable has a high cardinality.""" UNSUPPORTED = 6 """This variable is unsupported.""" DUPLICATES = 7 """This variable contains duplicates.""" SKEWED = 8 """This variable is highly skewed.""" MISSING = 9 """This variable contains missing values.""" INFINITE = 10 """This variable contains infinite values.""" TYPE_DATE = 11 """This variable is likely a datetime, but treated as categorical.""" UNIQUE = 12 """This variable has unique values.""" CONSTANT_LENGTH = 13 """This variable has a constant length""" REJECTED = 15 """Variables are rejected if we do not want to consider them for further analysis.""" UNIFORM = 14 """The variable is uniformly distributed""" class Message(object): """A message object (type, values, column).""" def __init__( self, message_type: MessageType, values: dict, column_name: Union[str, None] = None, fields=None, ): if fields is None: fields = set() self.fields = fields self.message_type = message_type self.values = values self.column_name = column_name self.anchor_id = hash(column_name) def fmt(self): # TODO: render in template name = self.message_type.name.replace("_", " ") if name == "HIGH CORRELATION": num = len(self.values["fields"]) title = ", ".join(self.values["fields"]) name = f'<abbr title="This variable has a high correlation with {num} fields: {title}">HIGH CORRELATION</abbr>' return name def __repr__(self): message_type = self.message_type.name column = self.column_name return f"[{message_type}] warning on column {column}" def check_table_messages(table: dict) -> List[Message]: """Checks the overall dataset for warnings. Args: table: Overall dataset statistics. Returns: A list of messages. """ messages = [] if warning_value(table["n_duplicates"]): messages.append( Message( message_type=MessageType.DUPLICATES, values=table, fields={"n_duplicates"}, ) ) return messages def check_variable_messages(col: str, description: dict) -> List[Message]: """Checks individual variables for warnings. Args: col: The column name that is checked. description: The series description. Returns: A list of messages. """ messages = [] # Missing if warning_value(description["p_missing"]): messages.append( Message( column_name=col, message_type=MessageType.MISSING, values=description, fields={"p_missing", "n_missing"}, ) ) if description["type"] == Variable.S_TYPE_UNSUPPORTED: messages.append( Message( column_name=col, message_type=MessageType.UNSUPPORTED, values=description, fields={}, ) ) elif description["distinct_count_with_nan"] <= 1: messages.append( Message( column_name=col, message_type=MessageType.CONSTANT, values=description, fields={"n_unique"}, ) ) if ( description["type"] == Variable.S_TYPE_UNSUPPORTED or description["distinct_count_with_nan"] <= 1 ): messages.append( Message( column_name=col, message_type=MessageType.REJECTED, values=description, fields={}, ) ) elif description["distinct_count_without_nan"] == description["n"]: messages.append( Message( column_name=col, message_type=MessageType.UNIQUE, values=description, fields={"n_unique", "p_unique"}, ) ) # Infinite values if warning_value(description["p_infinite"]): messages.append( Message( column_name=col, message_type=MessageType.INFINITE, values=description, fields={"p_infinite", "n_infinite"}, ) ) # Date if description["type"] == Variable.TYPE_DATE: # Uniformity chi_squared_threshold = config["vars"]["num"]["chi_squared_threshold"].get( float ) # chi_squared_threshold = 0.5 if 0.0 < chi_squared_threshold < description["chi_squared"][1]: messages.append( Message(column_name=col, message_type=MessageType.UNIFORM, values={}) ) # Categorical if description["type"] == Variable.TYPE_CAT: if description["date_warning"]: messages.append( Message(column_name=col, message_type=MessageType.TYPE_DATE, values={}) ) # Uniformity chi_squared_threshold = config["vars"]["cat"]["chi_squared_threshold"].get( float ) if 0.0 < chi_squared_threshold < description["chi_squared"][1]: messages.append( Message(column_name=col, message_type=MessageType.UNIFORM, values={}) ) # High cardinality if description["distinct_count"] > config["vars"]["cat"][ "cardinality_threshold" ].get(int): messages.append( Message( column_name=col, message_type=MessageType.HIGH_CARDINALITY, values=description, fields={"n_unique"}, ) ) # Constant length if ( "composition" in description and description["min_length"] == description["max_length"] ): messages.append( Message( column_name=col, message_type=MessageType.CONSTANT_LENGTH, values=description, fields={"composition_min_length", "composition_max_length"}, ) ) # Numerical if description["type"] in {Variable.TYPE_NUM, Variable.TYPE_INT, Variable.TYPE_FLOAT}: # Skewness if warning_skewness(description["skewness"]): messages.append( Message( column_name=col, message_type=MessageType.SKEWED, values=description, fields={"skewness"}, ) ) # Uniformity chi_squared_threshold = config["vars"]["num"]["chi_squared_threshold"].get( float ) if 0.0 < chi_squared_threshold < description["chi_squared"][1]: messages.append( Message(column_name=col, message_type=MessageType.UNIFORM, values={}) ) # Zeros if warning_value(description["p_zeros"]): messages.append( Message( column_name=col, message_type=MessageType.ZEROS, values=description, fields={"n_zeros", "p_zeros"}, ) ) return messages def check_correlation_messages(correlations): messages = [] for corr, matrix in correlations.items(): if config["correlations"][corr]["warn_high_correlations"].get(bool): threshold = config["correlations"][corr]["threshold"].get(float) correlated_mapping = perform_check_correlation(matrix, threshold) if len(correlated_mapping) > 0: for k, v in correlated_mapping.items(): messages.append( Message( column_name=k, message_type=MessageType.HIGH_CORRELATION, values={"corr": corr, "fields": v}, ) ) return messages def warning_value(value: float) -> bool: return not np.isnan(value) and value > 0.01 def warning_skewness(v: float) -> bool: return not np.isnan(v) and ( v < -config["vars"]["num"]["skewness_threshold"].get(int) or v > config["vars"]["num"]["skewness_threshold"].get(int) ) def _date_parser(date_string): pattern = re.compile(r"[.\-:]") pieces = re.split(pattern, date_string) if len(pieces) < 3: raise ValueError("Must have at least year, month and date passed") return parse(date_string) def warning_type_date(series): with suppress(ValueError, TypeError): series.apply(_date_parser) return True return False
{"hexsha": "3be7ad25c4cdf519aad86fc074b3f9340f48588a", "size": 9688, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/pandas_profiling_study/model/messages.py", "max_stars_repo_name": "lucasiscoviciMoon/pandas-profiling-study", "max_stars_repo_head_hexsha": "142d3b0f5e3139cdb531819f637a407682fa5684", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/pandas_profiling_study/model/messages.py", "max_issues_repo_name": "lucasiscoviciMoon/pandas-profiling-study", "max_issues_repo_head_hexsha": "142d3b0f5e3139cdb531819f637a407682fa5684", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/pandas_profiling_study/model/messages.py", "max_forks_repo_name": "lucasiscoviciMoon/pandas-profiling-study", "max_forks_repo_head_hexsha": "142d3b0f5e3139cdb531819f637a407682fa5684", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-04-25T15:20:39.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-25T15:20:39.000Z", "avg_line_length": 29.005988024, "max_line_length": 123, "alphanum_fraction": 0.5603839802, "include": true, "reason": "import numpy", "num_tokens": 1930}
import os import sys import numpy as np from google.protobuf import text_format from .layers import * from .core import print_stderr try: import caffe PYCAFFE_AVAILABLE = True except ImportError: import caffepb PYCAFFE_AVAILABLE = False print_stderr('WARNING: PyCaffe not found!') print_stderr('Falling back to protocol buffer implementation.') print_stderr('* Conversions will be drastically slower.') print_stderr('* This backend is UNTESTED!') if PYCAFFE_AVAILABLE: # Use the protobuf code from the imported distribution. # This way, Caffe variants with custom layers will work. try: sys.path.append(os.path.join(os.path.dirname(caffe.__file__), 'proto/')) import caffe_pb2 as caffepb except ImportError: import caffepb print_stderr('Failed to import dist protobuf code. Using failsafe.') print_stderr('Custom layers might not work.') class Node(object): def __init__(self, name, kind, layer=None): self.name = name self.kind = kind self.layer = LayerAdapter(layer, kind) if layer else None self.parents = [] self.children = [] self.data = None self.output_shape = None self.metadata = {} def add_parent(self, parent_node): assert parent_node not in self.parents self.parents.append(parent_node) if self not in parent_node.children: parent_node.children.append(self) def add_child(self, child_node): assert child_node not in self.children self.children.append(child_node) if self not in child_node.parents: child_node.parents.append(self) def get_only_parent(self): if len(self.parents)!=1: raise KaffeError('Node (%s) expected to have 1 parent. Found %s.'%(self, len(self.parents))) return self.parents[0] @property def parameters(self): if self.layer is not None: return self.layer.parameters return None @property def data_shape(self): assert self.data return self.data[IDX_WEIGHTS].shape def __str__(self): return '[%s] %s'%(self.kind, self.name) def __repr__(self): return '%s (0x%x)'%(self.name, id(self)) class Graph(object): def __init__(self, nodes=None, name=None): self.nodes = nodes or [] self.node_lut = {node.name:node for node in self.nodes} self.name = name def add_node(self, node): self.nodes.append(node) self.node_lut[node.name] = node def get_node(self, name): try: return self.node_lut[name] except KeyError: raise KaffeError('Layer not found: %s'%name) def get_input_nodes(self): return [node for node in self.nodes if len(node.parents)==0] def get_output_nodes(self): return [node for node in self.nodes if len(node.children)==0] def topologically_sorted(self): sorted_nodes = [] unsorted_nodes = list(self.nodes) temp_marked = set() perm_marked = set() def visit(node): if node in temp_marked: raise KaffeError('Graph is not a DAG.') if node in perm_marked: return temp_marked.add(node) for child in node.children: visit(child) perm_marked.add(node) temp_marked.remove(node) sorted_nodes.insert(0, node) while len(unsorted_nodes): visit(unsorted_nodes.pop()) return sorted_nodes def compute_output_shapes(self): sorted_nodes = self.topologically_sorted() for node in sorted_nodes: node.output_shape = NodeKind.compute_output_shape(node) def __contains__(self, key): return key in self.node_lut def __str__(self): hdr = '{:<20} {:<30} {:>20} {:>20}'.format('Type', 'Name', 'Param', 'Output') s = [hdr, '-'*94] for node in self.topologically_sorted(): data_shape = node.data[IDX_WEIGHTS].shape if node.data else '--' out_shape = node.output_shape or '--' s.append('{:<20} {:<30} {:>20} {:>20}'.format(node.kind, node.name, data_shape, out_shape)) return '\n'.join(s) class DataInjector(object): def __init__(self, def_path, data_path): self.def_path = def_path self.data_path = data_path self.did_use_pb = False self.load() def load(self): if PYCAFFE_AVAILABLE: self.load_using_caffe() else: self.load_using_pb() def load_using_caffe(self): net = caffe.Net(self.def_path, self.data_path, caffe.TEST) data = lambda blob: blob.data self.params = [(k, map(data, v)) for k,v in net.params.items()] def load_using_pb(self): data = caffepb.NetParameter() data.MergeFromString(open(self.data_path, 'rb').read()) pair = lambda layer: (layer.name, self.transform_data(layer)) layers = data.layers or data.layer self.params = [pair(layer) for layer in layers if layer.blobs] self.did_use_pb = True def transform_data(self, layer): transformed = [] for idx, blob in enumerate(layer.blobs): if len(blob.shape.dim): dims = blob.shape.dim c_o, c_i, h, w = map(int, [1]*(4-len(dims))+list(dims)) else: c_o = blob.num c_i = blob.channels h = blob.height w = blob.width data = np.array(blob.data, dtype=np.float32).reshape(c_o, c_i, h, w) transformed.append(data) return transformed def adjust_parameters(self, node, data): if not self.did_use_pb: return data # When using the protobuf-backend, each parameter initially has four dimensions. # In certain cases (like FC layers), we want to eliminate the singleton dimensions. # This implementation takes care of the common cases. However, it does leave the # potential for future issues. # The Caffe-backend does not suffer from this problem. data = list(data) squeeze_indices = [1] # Squeeze biases. if node.kind==NodeKind.InnerProduct: squeeze_indices.append(0) # Squeeze FC. for idx in squeeze_indices: data[idx] = np.squeeze(data[idx]) return data def inject(self, graph): for layer_name, data in self.params: if layer_name in graph: node = graph.get_node(layer_name) node.data = self.adjust_parameters(node, data) else: print_stderr('Ignoring parameters for non-existent layer: %s'%layer_name) class DataReshaper(object): def __init__(self, mapping): self.mapping = mapping def map(self, ndim): try: return self.mapping[ndim] except KeyError: raise KaffeError('Ordering not found for %d dimensional tensor.'%ndim) def transpose(self, data): return data.transpose(self.map(data.ndim)) def has_spatial_parent(self, node): try: parent = node.get_only_parent() s = parent.output_shape return (s[IDX_H]>1 or s[IDX_W]>1) except KaffeError: return False def reshape(self, graph, replace=True): for node in graph.nodes: if node.data is None: continue data = node.data[IDX_WEIGHTS] if (node.kind==NodeKind.InnerProduct) and self.has_spatial_parent(node): # The FC layer connected to the spatial layer needs to be # re-wired to match the new spatial ordering. in_shape = node.get_only_parent().output_shape fc_shape = data.shape fc_order = self.map(2) data = data.reshape((fc_shape[IDX_C_OUT], in_shape[IDX_C], in_shape[IDX_H], in_shape[IDX_W])) data = self.transpose(data) node.reshaped_data = data.reshape(fc_shape[fc_order[0]], fc_shape[fc_order[1]]) else: node.reshaped_data = self.transpose(data) if replace: for node in graph.nodes: if node.data is not None: node.data[IDX_WEIGHTS] = node.reshaped_data del node.reshaped_data class GraphBuilder(object): def __init__(self, def_path, data_path=None, phase='test'): self.def_path = def_path self.data_path = data_path self.phase = phase self.load() def load(self): self.params = caffepb.NetParameter() with open(self.def_path, 'rb') as def_file: text_format.Merge(def_file.read(), self.params) def filter_layers(self, layers): phase_map = {0:'train', 1:'test'} filtered_layer_names = set() filtered_layers = [] for layer in layers: phase = self.phase if len(layer.include): phase = phase_map[layer.include[0].phase] if len(layer.exclude): phase = phase_map[1-layer.include[0].phase] exclude = (phase!=self.phase) # Dropout layers appear in a fair number of Caffe # test-time networks. These are just ignored. We'll # filter them out here. if (not exclude) and (phase=='test'): exclude = (layer.type==LayerType.Dropout) if not exclude: filtered_layers.append(layer) # Guard against dupes. assert layer.name not in filtered_layer_names filtered_layer_names.add(layer.name) return filtered_layers def make_node(self, layer): kind = NodeKind.map_raw_kind(layer.type) if kind is None: raise KaffeError('Unknown layer type encountered: %s'%layer.type) return Node(layer.name, kind, layer=layer) def make_input_nodes(self): # This method is for old-style inputs, where the input specification # was not treated as a first-class layer in the prototext. # Newer models use the "Input layer" type. nodes = [Node(name, NodeKind.Data) for name in self.params.input] if len(nodes): input_dim = map(int, self.params.input_dim) if not input_dim: if len(self.params.input_shape)>0: input_dim = map(int, self.params.input_shape[0].dim) else: raise KaffeError('Dimensions for input not specified.') for node in nodes: node.output_shape = tuple(input_dim) return nodes def fuse_relus(self, nodes): fused_nodes = [] for node in nodes: if node.kind!=NodeKind.ReLU: continue parent = node.get_only_parent() if len(parent.children)!=1: # We can only fuse this ReLU if its parent's # value isn't used by any other node. continue # Rewrite the ReLU's children to its parent. for child in node.children: child.parents.remove(node) parent.add_child(child) # Disconnect the ReLU from the graph. parent.children.remove(node) fused_nodes.append(node) # Annotate the fused node. parent.metadata['relu'] = True return [node for node in nodes if node not in fused_nodes] def build(self, fuse_relus=True): layers = self.params.layers or self.params.layer layers = self.filter_layers(layers) nodes = self.make_input_nodes() nodes += [self.make_node(layer) for layer in layers] graph = Graph(nodes=nodes, name=self.params.name) node_outputs = {} for layer in layers: node = graph.get_node(layer.name) for parent_name in layer.bottom: assert parent_name!=layer.name parent_node = node_outputs.get(parent_name) if (parent_node is None) or (parent_node==node): parent_node = graph.get_node(parent_name) node.add_parent(parent_node) for child_name in layer.top: if child_name==layer.name: continue if child_name in graph: # This is an "in-place operation" that overwrites an existing node. # This would create a cycle in the graph. We'll undo the in-placing # by substituting this node wherever the overwritten node is referenced. node_outputs[child_name] = node else: # This is an "implicit" child node: not explicitly # defined in the prototxt, but as a top (output) for some layer. graph.add_node(Node(child_name, NodeKind.Implicit)) node.add_child(graph.get_node(child_name)) if fuse_relus: graph = Graph(nodes=self.fuse_relus(graph.nodes), name=graph.name) graph.compute_output_shapes() if self.data_path is not None: DataInjector(self.def_path, self.data_path).inject(graph) return graph class NodeMapper(NodeDispatch): def __init__(self, graph): self.graph = graph def attach_node(self, node): return True def map(self): nodes = self.graph.topologically_sorted() # Remove input nodes - we'll handle them separately. input_nodes = self.graph.get_input_nodes() nodes = [t for t in nodes if t not in input_nodes] # Remove implicit nodes. nodes = [t for t in nodes if t.kind!=NodeKind.Implicit] # Decompose DAG into chains. chains = [] for node in nodes: attach_to_chain = None if len(node.parents)==1: parent = node.get_only_parent() for chain in chains: if chain[-1]==parent: # Node is part of an existing chain. attach_to_chain = chain break if attach_to_chain is None: # Start a new chain for this node. attach_to_chain = [] chains.append(attach_to_chain) attach_to_chain.append(node) # Map each chain. mapped_chains = [] for chain in chains: mapped_chains.append(self.map_chain(chain)) return self.commit(mapped_chains) def map_chain(self, chain): return [self.map_node(node) for node in chain] def map_node(self, node): map_func = self.get_handler(node.kind, 'map') mapped_node = map_func(node) assert mapped_node is not None if self.attach_node(node): mapped_node.node = node return mapped_node def commit(self, mapped_chains): raise NotImplementedError('Must be implemented by subclass.')
{"hexsha": "7d01c64ee813bbbabc23d0c1c33c862e58182544", "size": 15245, "ext": "py", "lang": "Python", "max_stars_repo_path": "nn/kaffe/core.py", "max_stars_repo_name": "polltooh/FineGrainedAction", "max_stars_repo_head_hexsha": "4582b4179e643119448c7c20ab06044fb211163e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "nn/kaffe/core.py", "max_issues_repo_name": "polltooh/FineGrainedAction", "max_issues_repo_head_hexsha": "4582b4179e643119448c7c20ab06044fb211163e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nn/kaffe/core.py", "max_forks_repo_name": "polltooh/FineGrainedAction", "max_forks_repo_head_hexsha": "4582b4179e643119448c7c20ab06044fb211163e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.457002457, "max_line_length": 109, "alphanum_fraction": 0.5876680879, "include": true, "reason": "import numpy", "num_tokens": 3252}
# -*- coding: utf-8 -*- """ comments author: diqiuzhuanzhuan email: diqiuzhuanzhuan@gmail.com """ import unittest import numpy as np import os from .dataman import Sample from .dataman import TrainingInstance from .dataman import create_attention_mask from .dataman import PreTrainingDataMan class SampleTest(unittest.TestCase): def setUp(self) -> None: self.sample = Sample() def test_block_wise_masking(self): x = ["[SOS]", "你", "好", "我", "是", "20", "##19", "一", "个", "好", "的", "人", "[EOS]", "你", "呢", "你", "在", "干", "嘛", "呢", "[EOS]"] for i in range(1000): vocab_words = list(set(x).difference(set({"[SOS]", "[EOS]"}))) M = self.sample.block_wise_masking(x, vocab_words, max_predictions_per_seq=0, mask_ratio=0.15) self.assertFalse("[MASK]" in M[0]) self.assertLessEqual(len(M[4]), 0) (output_tokens, output_tokens_positions, masked_lm_positions, masked_lm_labels, pseudo_masked_lm_positions, pseudo_masked_lm_labels, pseudo_index, masked_index) = \ self.sample.block_wise_masking(x, vocab_words, max_predictions_per_seq=4, mask_ratio=0.30) print(output_tokens) #for i, j in zip(masked_index, masked_lm_positions): # self.assertTrue(output_tokens[i] == '[MASK]' or output_tokens[i] == x[j] or output_tok) for i in pseudo_index: for j in i: self.assertTrue(output_tokens[j] == '[Pseudo]', "index is {}, tokens is {}".format(j, output_tokens[j])) for i, label in zip(masked_lm_positions, masked_lm_labels): self.assertTrue(x[i] == label) for sublist_index, sublist_labels in zip(pseudo_index, pseudo_masked_lm_labels): for i, j in zip(sublist_index, sublist_labels): self.assertTrue(output_tokens[i-len(sublist_index)] == j, "output_tokens[i] is {}, j is {}".format(output_tokens[i], j)) class DatamanTest(unittest.TestCase): def test_create_mask_matrix(self): tokens = ['x1', 'x2', '[Pesudo]', '[MASK]', 'x3', 'x4', 'x5', '[Pesudo]', '[Pesudo]', '[MASK]', '[MASK]', 'x6', '[PAD]', '[PAD]', '[PAD]'] output_token_positions = [0, 1, 1, 1, 2, 3, 4, 3, 4, 3, 4, 5, 0, 0, 0] input_mask = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0] segment_ids = [1] * len(output_token_positions) is_random_next = False pseudo_index = [[7, 8], [2], [], []] pseudo_masked_sub_list_len = [2, 1, 0, 0] mask_index = [3, 9, 10, 0, 0] pseudo_masked_lm_positions = [[3, 4], [1]] masked_lm_positions = [2, 5, 6, 0, 0] pseudo_masked_lm_labels = [['x2'], ['x4', 'x5']] masked_lm_labels = ['x2', 'x4', 'x5'] instance = TrainingInstance( tokens=tokens, output_tokens_positions=output_token_positions, segment_ids=segment_ids, is_random_next=is_random_next, pseudo_index=pseudo_index, pseudo_masked_lm_positions=pseudo_masked_lm_positions, masked_lm_positions=masked_lm_positions, pseudo_masked_lm_labels=pseudo_masked_lm_labels, masked_lm_labels=masked_lm_labels, masked_index=mask_index ) flatten_index = [x for _ in instance.pseudo_index for x in _] masked_matrix = create_attention_mask(instance.tokens, input_mask, flatten_index, pseudo_masked_sub_list_len) expected_matrix = [ [1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0], [1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0], [1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0], [1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0], [1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0], [1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0], [1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0], [1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0], [1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0], [1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ] masked_matrix = np.cast[np.int](masked_matrix) print(masked_matrix) self.assertListEqual(masked_matrix.tolist(), expected_matrix) def test_create_pretrain_data(self): vocab_file = os.path.join(os.path.dirname(__file__), "test_data", "vocab.txt") input_file = os.path.join(os.path.dirname(__file__), "../bert/sample_text.txt") output_file = os.path.join(os.path.dirname(__file__), "test_data", "pretraining_data") ptdm = PreTrainingDataMan(vocab_file=vocab_file, max_seq_length=128, max_predictions_per_seq=20) ptdm.create_pretraining_data(input_file, output_file) dataset = ptdm.read_data_from_tfrecord(output_file, is_training=True, batch_size=1) dataset = dataset.repeat(1) for data in dataset: input_ids = data["input_ids"].numpy()[0] input_mask = data["input_mask"].numpy()[0] masked_index = data["masked_index"].numpy()[0] masked_lm_ids = data["masked_lm_ids"].numpy()[0] pseudo_masked_index = data["pseudo_masked_index"].numpy()[0] pseudo_masked_lm_ids = data["pseudo_masked_lm_ids"].numpy()[0] attention_mask = data["attention_mask"].numpy()[0] pseudo_masked_sub_list_len = data["pseudo_masked_sub_list_len"].numpy()[0] start_offset = 0 pseudo_masked_sub_list_len = pseudo_masked_sub_list_len[0:np.count_nonzero(pseudo_masked_sub_list_len)] pseudo_masked_index = pseudo_masked_index[0:np.sum(pseudo_masked_sub_list_len)] for sublist_len in reversed(pseudo_masked_sub_list_len): reversed_pseudo_masked_index = pseudo_masked_index[::-1] sublist = reversed_pseudo_masked_index[start_offset:start_offset+sublist_len] for i in sublist: for j in sublist: self.assertTrue(attention_mask[i][j] == 1, "i: {}, j: {}, value: {}, {}, {}".format(i, j, attention_mask[i, j], pseudo_masked_index, pseudo_masked_sub_list_len)) self.assertTrue(attention_mask[i-sublist_len][j-sublist_len] == 1, "i: {}, j: {}, value: {}, {}, {}".format(i, j, attention_mask[i, j], pseudo_masked_index, pseudo_masked_sub_list_len)) start_offset += sublist_len for i, value in enumerate(input_mask): if value == 0: self.assertTrue(np.sum(attention_mask[i][:]) == 0) self.assertTrue(np.sum(attention_mask[:][i]) == 0) continue if __name__ == "__main__": unittest.main()
{"hexsha": "5642196fbbc7407bd904daee56db7bf6c60c9bb1", "size": 7231, "ext": "py", "lang": "Python", "max_stars_repo_path": "poros/unilmv2/dataman_test.py", "max_stars_repo_name": "diqiuzhuanzhuan/poros", "max_stars_repo_head_hexsha": "b4f0ff4c5094aa9df12a9195e5b3edd85c3460f4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2018-12-11T06:14:02.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-24T01:51:26.000Z", "max_issues_repo_path": "poros/unilmv2/dataman_test.py", "max_issues_repo_name": "diqiuzhuanzhuan/poros", "max_issues_repo_head_hexsha": "b4f0ff4c5094aa9df12a9195e5b3edd85c3460f4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-06-04T09:05:05.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-17T12:07:07.000Z", "max_forks_repo_path": "poros/unilmv2/dataman_test.py", "max_forks_repo_name": "diqiuzhuanzhuan/poros", "max_forks_repo_head_hexsha": "b4f0ff4c5094aa9df12a9195e5b3edd85c3460f4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-01-14T08:06:26.000Z", "max_forks_repo_forks_event_max_datetime": "2019-04-10T02:36:18.000Z", "avg_line_length": 53.1691176471, "max_line_length": 159, "alphanum_fraction": 0.5574609321, "include": true, "reason": "import numpy", "num_tokens": 2260}
import time,threading import cv2,os,sys,socket,struct,pickle,psutil import numpy as np from tkinter import * datalist=sorted(os.listdir('DATA/')) datacounter=len(datalist)+1 for i in range(len(datalist)): data=np.load("DATA/"+datalist[i],allow_pickle=True) print("diving this",datalist[i]) np.save("DATA/data85_"+str(datacounter)+".npy",np.array(data[:int(len(data)/2)], dtype=object)) datacounter += 1 np.save("DATA/data85_"+str(datacounter)+".npy",np.array(data[int(len(data)/2):], dtype=object)) datacounter+=1 print("divided as ",datacounter-1,datacounter-2) time.sleep(100) data=0
{"hexsha": "d51679e901395e74c5420aa27ae0c129d1bfde8e", "size": 622, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/collecting_data/datadivider.py", "max_stars_repo_name": "gururajo/Self-Driving-Car", "max_stars_repo_head_hexsha": "fecde52396df6c17e6948cc77ab9d17a524150d3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/collecting_data/datadivider.py", "max_issues_repo_name": "gururajo/Self-Driving-Car", "max_issues_repo_head_hexsha": "fecde52396df6c17e6948cc77ab9d17a524150d3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/collecting_data/datadivider.py", "max_forks_repo_name": "gururajo/Self-Driving-Car", "max_forks_repo_head_hexsha": "fecde52396df6c17e6948cc77ab9d17a524150d3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.5882352941, "max_line_length": 99, "alphanum_fraction": 0.7025723473, "include": true, "reason": "import numpy", "num_tokens": 185}