text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
Set Implicit Arguments.
Require Import Shared.
Require Import LibFix.
Require Import JsSemanticsAux JsWf JsWfAux JsSafety JsScopes JsInterpreter.
(**************************************************************)
(** ** Correctness of proto_comp. *)
Section Proto.
(** Termination of [proto_comp] *)
Inductive proto_closer : binary (heap * field * loc) :=
| proto_closer_next : forall h f (l l':loc),
ok_heap h ->
~ indom h l f ->
binds h l field_proto l' ->
proto_closer (h, f, l') (h, f, l).
Lemma proto_closer_wf : wf proto_closer.
Proof.
intros [[h f] l]. constructor.
intros [[h' f'] l'] H. inverts H as O D B1.
lets~ N: ok_heap_protochain B1. inverts N as B2 P.
false. forwards*: ok_heap_null B1.
forwards E: binds_func_loc B2 B1. subst.
clears O B1 B2 D.
induction P; constructor; intros [[h2 f2] l2] M; inverts M.
false. forwards*: ok_heap_null.
forwards E: binds_func_loc H H8. subst*.
Qed.
Lemma proto_comp_fix : forall h f l,
ok_heap h -> proto_comp h f l = proto_comp_body proto_comp h f l.
Proof.
applys~ (FixFun3_fix_partial proto_closer). apply proto_closer_wf.
intros h1 f1 l1 proto_comp1 proto_comp2 O Cont. unfolds.
repeat case_if*.
sets_eq v: (read h1 l1 field_proto). destruct~ v.
applys~ Cont. constructor~. rewrite* binds_equiv_read.
Qed.
(** Correctness and completeness of [proto_comp] *)
Hint Constructors proto.
Lemma proto_comp_correct : forall h f l l',
ok_heap h ->
bound h l ->
proto_comp h f l = l' ->
proto h f l l'.
Proof.
introv OK B E. forwards~ PC: ok_heap_protochain_bound B.
induction PC.
lets (f'&B1): B. rewrite indom_equiv_binds in B1.
lets (v&B2): B1. forwards*: ok_heap_null B2.
rewrite~ proto_comp_fix in E.
unfold proto_comp_body in E.
case_if*. subst~.
case_if*. subst~.
case_if* in E.
rewrite~ binds_equiv_read in H.
rewrite H in E. rewrite* <- binds_equiv_read in H.
apply* proto_next.
tests: (l'0 = loc_null).
rewrite~ proto_comp_fix in E.
unfolds in E. case_if. subst~.
apply* IHPC. inverts* PC. apply* binds_bound.
false n1.
lets (v&B1): B. rewrite indom_equiv_binds in B1.
lets (v'&B2): B1. forwards~ B3: ok_heap_protochain B2.
inverts* B3. rewrite* indom_equiv_binds.
Qed.
Lemma proto_comp_complete : forall h f l l',
ok_heap h ->
bound h l ->
proto h f l l' ->
proto_comp h f l = l'.
Proof.
introv OK B P. induction P;
rewrite* proto_comp_fix; unfold proto_comp_body; case_if*.
case_if*.
subst. lets (f'&B'): B. rewrite~ indom_equiv_binds in B'.
lets (v&B''): B'. forwards*: ok_heap_null B''.
case_if*. case_if*.
rewrite (binds_read H0).
tests: (l' = loc_null).
asserts: (l'' = loc_null).
apply* proto_func.
subst. rewrite* proto_comp_fix. unfold proto_comp_body. case_if*.
apply* IHP.
forwards*: ok_heap_ok_value H0.
inverts* H1. inverts* H2. apply* indom_bound.
false n1. rewrite* indom_equiv_binds.
Qed.
End Proto.
(**************************************************************)
(** ** Correctness of scope_comp. *)
Section Scopes.
(** Correctness and completeness of [scope_comp] *)
Lemma scope_comp_correct : forall h f L l',
scope_comp h f L = l' ->
ok_heap h ->
ok_scope h L ->
scopes h f L l'.
Proof.
introv E OK OKL.
lets~ FOK: ok_scope_all_bound (rm OKL) OK.
gen h f L l'. induction L; introv OKL E.
inverts E. constructor*.
simpls. inverts OKL as (Ba & BL).
lets* (l&Hl): proto_defined h f a. apply* indom_bound.
assert (forall l', proto_comp h f a = l' -> l = l').
introv E'. lets*: proto_comp_correct E'.
apply* indom_bound.
apply* proto_func.
forwards: (rm H); [ reflexivity | ]. subst. case_if*.
constructor*.
apply* proto_comp_correct.
apply* indom_bound.
apply* scopes_here.
Qed.
Lemma scope_comp_complete : forall h f L l',
scopes h f L l' ->
ok_heap h ->
ok_scope h L ->
scope_comp h f L = l'.
Proof.
introv Sc OK OKL. forwards~ FOK: ok_scope_all_bound (rm OKL).
induction Sc; simpls*.
asserts Eq: (proto_comp h f l = l').
forwards*: proto_comp_complete H. inverts* H.
apply* indom_bound.
apply* binds_bound.
inverts FOK. rewrite Eq. forwards*: proto_comp_correct Eq. case_if*.
inverts FOK. case_if*. false. lets*: proto_comp_complete H.
(* LATER: use [case_if* as C] *)
Qed.
End Scopes.
(**************************************************************)
(** ** Correctness of getvalue_comp. *)
Section Getvalue.
(** Correctness and completness of [getvalue_comp] *)
Lemma getvalue_comp_correct_ref : forall h l f v,
getvalue_comp h (Ref l (field_normal f)) = Some v ->
ok_heap h ->
bound h l ->
getvalue h (Ref l (field_normal f)) v.
Proof.
introv E OK B. unfolds in E. case_if*.
asserts [l' Hl']: (exists l', proto_comp h (field_normal f) l = l').
destruct* proto_comp.
rewrite Hl' in E. case_if*; inverts~ E.
apply* getvalue_ref_null. subst. apply* proto_comp_correct.
lets~ M: proto_comp_correct Hl'. applys* getvalue_ref_not_null.
applys~ read_binds. apply* proto_indom.
Qed.
Lemma getvalue_comp_correct : forall h r v,
getvalue_comp h r = Some v ->
ok_result h r ->
ok_heap h ->
getvalue h r v.
Proof.
introv E R OK. unfolds getvalue_comp.
destruct r as [|[l f]].
inverts E. constructor.
asserts [f' Hf]: (exists f', f = field_normal f').
destruct* f; false.
subst. apply* getvalue_comp_correct_ref. case_if*.
inverts R as R. inverts* R.
apply* indom_bound.
Qed.
Lemma getvalue_comp_complete : forall h r v,
getvalue h r v ->
ok_heap h ->
getvalue_comp h r = Some v.
Proof.
introv Gv OK. unfold getvalue_comp. induction Gv.
fequals.
case_if*. forwards* M: proto_comp_complete H.
inverts H; tryfalse. apply* binds_bound. applys* binds_bound.
rewrite M. case_if*. fequals. applys* binds_read.
case_if*. forwards*: proto_comp_complete H. (* ARTHUR: can you factorize the pattern with the other case? *)
inverts H; tryfalse. apply* binds_bound.
case_if*.
Qed.
End Getvalue.
(**************************************************************)
(** ** Lemmas for the correctness of the interpreter *)
Section Correctness.
Global Instance out_comparable : Comparable out.
Proof.
(* Warning: This proof is classical, and is only there for the proofs.
It shouldn't be extracted. *)
(* TODO: do we want/need a version that can be extracted? *)
(* Martin: I don't thing so for this case: I'm just using it to apply the lemmas `elim_*'. *)
applys (@comparable_beq out) (fun (o1 o2 : out) =>
If o1 = o2 then true else false). (* todo: remove type annot *)
split; introv E.
case_if*.
subst; case_if*.
Qed.
Lemma wrong_not_ret : forall h h' r,
wrong h <> out_return h' (ret_result r).
Proof.
introv. unfold wrong.
destruct Mnostuck; discriminate.
Qed.
Lemma ret_not_wrong : forall h h' r,
out_return h' (ret_result r) <> wrong h.
Proof. introv E. symmetry in E. forwards*: wrong_not_ret E. Qed.
Lemma elim_if_success : forall r0 k h r,
if_success r0 k = out_return h r ->
(r0 = out_return h r /\ forall v, r <> ret_result v) \/
exists r1 h0, r0 = out_return h0 (ret_result r1).
Proof.
introv E. destruct r0.
destruct* r0. inverts E. left. split*. introv. discriminate.
simpls. inverts* E.
simpls. inverts* E.
Qed.
Lemma elim_if_defined : forall A h f r (a : option A),
if_defined h a f = r ->
a = None \/ exists b, a = Some b.
Proof. introv E. destruct* a. Qed.
Lemma elim_if_success_value : forall r0 k h r,
if_success_value r0 k = out_return h r ->
(r0 = out_return h r /\ forall v, r <> ret_result v) \/
(exists v h, r0 = out_return h (ret_result v) /\ getvalue_comp h v = None) \/
exists v h b, r0 = out_return h (ret_result v) /\ getvalue_comp h v = Some b.
Proof.
introv E.
unfolds in E.
forwards~ [OK | (v&h'&E')]: elim_if_success E.
right. subst. simpls.
forwards~ [? | ?]: elim_if_defined E.
rewrite H in E. simpls.
left*.
lets (b&E'): H. right*.
Qed.
Lemma elim_if_is_ref : forall h o k r,
if_is_ref h o k = r ->
((exists h', wrong h' = r) /\ exists v, o = result_value v)
\/ exists l f, o = result_ref (Ref l f).
Proof.
introv E. destruct* o.
inverts E. right. destruct* r0.
Qed.
Lemma elim_if_is_null_ref : forall r k1 k2 rf,
if_is_null_ref r k1 k2 = rf ->
(exists v, r = result_value v) \/
(exists l f, l <> loc_null /\ r = Ref l f /\ rf = k2 r) \/
exists f, r = Ref loc_null f /\ rf = k1 f.
Proof.
introv E. destruct r.
left*.
right. destruct r. simpl in E.
case_if.
subst*.
left*.
Qed.
Lemma elim_if_is_field_normal : forall h f k r,
if_is_field_normal h f k = r ->
(r = wrong h) \/ exists f', f = field_normal f'.
Proof. introv E. destruct f; simpls*. Qed.
Lemma elim_if_eq : forall l0 h o k1 k2 r,
if_eq l0 h o k1 k2 = r ->
o = None \/
(exists v, o = Some v /\ r = wrong h) \/
(o = Some (value_loc l0) /\ r = k1 I) \/
exists l, o = Some (value_loc l) /\ l <> l0 /\ r = k2 l.
Proof.
introv E. destruct* o.
right. destruct v; inverts* E.
right. tests: (l0 = l).
left. split~. simpl. case_if*.
right. exists l. splits~. simpl. case_if*.
Qed.
Lemma elim_if_not_eq : forall l0 h o k r,
if_not_eq l0 h o k = r ->
o = None \/
((exists h', wrong h' = r) /\ exists v, o = Some v) \/
exists l, o = Some (value_loc l) /\ l <> l0.
Proof.
introv E.
forwards* [eqr | [(v&eqo&eqr) | [(eqo&eqr) | (l&eqo&_&eqr)]]]: elim_if_eq E.
substs. simpls.
case_if.
branch 2. splits*.
branch 3. exists l. split~.
Qed.
Lemma elim_if_is_string : forall h o k r,
if_is_string h o k = r ->
o = None \/
((exists h', wrong h' = r) /\ exists v, o = Some v) \/
exists s, o = Some (value_string s).
Proof. introv E. destruct* o. right. destruct v; inverts* E. Qed.
Lemma elim_if_binds_field : forall f h l k r,
if_binds_field f h l k = r ->
(r = wrong h /\ ~indom h l f) \/
(exists v, r = k v /\ binds h l f v).
Proof.
introv E.
unfolds in E. case_if* in E.
right. eexists. split*.
rewrite* binds_equiv_read.
Qed.
Lemma elim_if_binds_field_loc : forall f h l k r,
if_binds_field_loc f h l k = r ->
(r = wrong h /\ forall l', ~binds h l f (value_loc l')) \/
(exists l', r = k l' /\ binds h l f (value_loc l')).
Proof.
introv E. unfolds in E.
lets* [C1 | C2]: elim_if_binds_field E.
lets (H&H0): C1. left. split~. introv B.
false H0. rewrite* indom_equiv_binds.
lets (v&R&B): C2.
destruct v; try (
left; split~; introv B';
forwards~ H: binds_func B B'; discriminate H).
right. exists l0. split~.
Qed.
Lemma elim_if_boolean : forall h v k1 k2 r,
if_boolean h v k1 k2 = r ->
(r = wrong h /\ forall b, v <> value_bool b) \/
(r = k1 I /\ v = value_bool true) \/
(r = k2 I /\ v = value_bool false).
Proof.
introv E. destruct v; simpls;
try (left; subst; split; [reflexivity | discriminate]).
right. destruct b; [left* | right*].
Qed.
Lemma elim_if_binds_scope_body : forall h l k r,
if_binds_scope_body h l k = r ->
r = wrong h \/
(indom h l field_body /\
indom h l field_scope /\
exists s f e, read h l field_scope = value_scope s /\
read h l field_body = value_body f e /\ k s f e = r).
Proof.
introv E. unfold if_binds_scope_body in E.
lets* [C1 | C2]: elim_if_binds_field E.
lets (v&R&B): C2. clear C2.
destruct v; try (left~; fail).
symmetry in R. lets* [C1 | C2]: elim_if_binds_field R.
lets (v&R'&B'): C2. clear C2.
destruct v; try (left~; fail).
right. splits; try rewrite* indom_equiv_binds.
repeat eexists; eauto; rewrite* <- binds_equiv_read; rewrite* indom_equiv_binds.
Qed.
Lemma sub_safety : forall h h' s e r,
red h s e h' r -> ok_heap h -> ok_scope h s ->
ok_heap h' /\ ok_scope h' s /\ ok_result h' r.
Proof. intros. splits; apply* safety. Qed.
Lemma arguments_comp_correct : forall xs vs lfv,
arguments_comp xs vs = lfv ->
arguments xs vs lfv.
Proof.
induction xs; introv E.
simpls. subst. constructors.
destruct vs.
simpls. rewrite <- E. apply* arguments_nil_values.
simpls. rewrite <- E. apply* arguments_cons.
Qed.
(**************************************************************)
(** ** Tactics for the correctness of the interpreter *)
Ltac name_heap_write h' :=
match goal with |- context [ write ?h ?l ?f ?v ] =>
sets_eq h': (write h l f v) end.
Ltac name_heap_sub_write h' :=
match goal with |- context [ write (write ?h ?l ?f ?v) _ _ _ ] =>
sets_eq h': (write h l f v) end.
Ltac name_heap_write_fields h' :=
match goal with |- context [ write_fields ?h ?l ?li ] =>
sets_eq h': (write_fields h l li) end.
Ltac name_heap_reserve_local_vars h' :=
match goal with |- context [ reserve_local_vars ?h ?l ?li ] =>
sets_eq h': (reserve_local_vars h l li) end.
Ltac name_heap_alloc_obj H h' :=
match goal with |- context [ alloc_obj ?h ?l ?l' ] =>
sets_eq h': (alloc_obj h l l') end.
Ltac name_heap_write_in H h' :=
match goal with H: context [ write ?h ?l ?f ?v ] |- _ =>
sets_eq h': (write h l f v) end.
Ltac name_heap_sub_write_in H h' :=
match goal with H: context [ write (write ?h ?l ?f ?v) _ _ _ ] |- _ =>
sets_eq h': (write h l f v) end.
Ltac name_heap_write_fields_in H h' :=
match goal with H: context [ write_fields ?h ?l ?li ] |- _ =>
sets_eq h': (write_fields h l li) end.
Ltac name_heap_sub_write_fields_in H h' :=
match goal with H: context [ write_fields (write_fields ?h ?l ?li) _ _ ] |- _ =>
sets_eq h': (write_fields h l li) end.
Ltac name_heap_reserve_local_vars_in H h' :=
match goal with H: context [ reserve_local_vars ?h ?l ?li ] |- _ =>
sets_eq h': (reserve_local_vars h l li) end.
Ltac name_heap_alloc_obj_in H h' :=
match goal with H: context [ alloc_obj ?h ?l ?l' ] |- _ =>
sets_eq h': (alloc_obj h l l') end.
(**************************************************************)
(** ** Correctness of the implementation of operators *)
Lemma typeof_comp_correct : forall h v str,
typeof_comp h v = Some str ->
ok_heap h ->
typeof_red h v str.
Proof.
introv E OK.
destruct v; try (inverts E; constructor).
simpl in E. case_if; inverts E.
rewrite indom_equiv_binds in i. lets (v&B): i.
apply* typeof_red_function. exists* v.
lets OKf: ok_heap_function OK. unfolds in OKf.
apply* typeof_red_object. introv (v&B). false n.
forwards (?&?&?&?&F&?&?): OKf B.
rewrite* indom_equiv_binds.
Qed.
Inductive proto_closer_for_binary_op_comp : binary (binary_op * heap * value * value) :=
| proto_closer_for_binary_op_comp_instanceof : forall h (l1 l2 l3 l4:loc),
ok_heap h ->
binds h l1 field_normal_prototype (value_loc l3) ->
binds h l2 field_proto (value_loc l4) ->
l3 <> l4 ->
proto_closer_for_binary_op_comp (binary_op_instanceof, h, value_loc l1, value_loc l4) (binary_op_instanceof, h, value_loc l1, value_loc l2).
Lemma proto_closer_for_binary_op_comp_wf : wf proto_closer_for_binary_op_comp.
Proof.
intros [[[b h] v1] v2]. constructor.
intros [[[b' h'] v1'] v2'] H. inverts H as O B1 B2 D.
lets~ N: ok_heap_protochain B2. inverts N as B3 P.
false. forwards*: ok_heap_null B2.
forwards*: binds_func_loc B3 B2. subst.
clears O B1 B2 B3 D.
induction P; constructor; intros [[[b'' h''] v1''] v2''] M; inverts M.
false. forwards*: ok_heap_null.
forwards E: binds_func_loc H H9. subst*.
Qed.
Lemma binary_op_comp_fix : forall h op v1 v2,
ok_heap h -> binary_op_comp op h v1 v2 = binary_op_comp_body binary_op_comp op h v1 v2.
Proof.
introv O. applys~ (FixFun4_fix_partial proto_closer_for_binary_op_comp (fun _ h _ _ => ok_heap h)).
apply proto_closer_for_binary_op_comp_wf.
introv O1 Cont. unfolds. destruct~ x1.
repeat case_if~. destruct~ x3. simpl. destruct~ x4.
case_if~; symmetry; case_if~.
sets_eq v: (read x2 l field_normal_prototype). destruct~ v.
sets_eq v: (read x2 l0 field_proto). destruct~ v.
simpls. repeat case_if~.
rewrite~ Cont.
apply~ proto_closer_for_binary_op_comp_instanceof.
rewrite* binds_equiv_read.
rewrite* binds_equiv_read.
auto*.
Qed.
Lemma binary_op_comp_correct : forall b h v1 v2 r,
binary_op_comp b h v1 v2 = Some r ->
ok_heap h -> ok_value h v1 -> ok_value h v2 ->
binary_op_red b h v1 v2 r.
Proof.
introv E OK O1 O2. rewrite~ binary_op_comp_fix in E.
destruct b; simpls.
(* add *)
destruct v1; destruct v2; simpls; tryfalse.
inverts E. constructor*.
inverts E. constructor*.
(* mult *)
destruct v1; destruct v2; simpls; tryfalse.
inverts E. constructor*.
(* div *)
destruct v1; destruct v2; simpls; tryfalse.
inverts E. constructor*.
(* equal *)
case_if in E as B; tryfalse. lets (B1&B2): a. inverts~ E.
rewrite~ value_compare_correct. constructor~.
(* instanceof *)
destruct v1; simpls; tryfalse.
apply* binary_op_red_instanceof.
case_if in E.
inverts E. apply* instanceof_red_value.
inverts* O2.
unfolds in H. rewrite~ indom_equiv_binds in H.
lets (v0 & B): (rm H).
lets~ N: ok_heap_protochain B.
clear n v0 B. induction N.
false. case_if in E.
set_eq v: (read h l field_normal_prototype) in E.
destruct v; tryfalse. simpl in E. case_if in E.
rewrite~ indom_equiv_binds in i0. lets (v & B): i0.
forwards*: ok_heap_null B.
gen E; intro E. case_if in E. (* FIXME: It seems there is a bug in `case_if' that make it ignore what stands after a `in' argument. *)
set_eq v: (read h l field_normal_prototype) in E.
destruct v; simpls; tryfalse. case_if in E.
set_eq v: (read h l0 field_proto) in E.
destruct v; simpls; tryfalse.
asserts: (l2 = l').
applys~ binds_func_loc H.
rewrite* binds_equiv_read.
subst l'. case_if in E.
inverts E. subst. apply* instanceof_red_true.
rewrite* binds_equiv_read.
apply* instanceof_red_trans.
rewrite* binds_equiv_read.
tests: (l2 = loc_null).
clear IHN. rewrite~ binary_op_comp_fix in E.
simpl in E. case_if in E.
inverts E. constructor~.
false n0. constructor.
apply* IHN; clear IHN.
rewrite~ binary_op_comp_fix in E.
simpl in E. case_if in E.
false. inverts~ b.
apply* E.
(* in *)
destruct v1; destruct v2; simpls; tryfalse. inverts E.
inverts O2.
inverts H.
apply* binary_op_red_in.
constructor.
case_if.
rewrite~ proto_comp_fix. unfold proto_comp_body.
case_if. rewrite decide_spec. fold_bool. apply* eqb_eq.
apply* binary_op_red_in.
apply* proto_comp_correct.
apply* indom_bound.
rewrite decide_spec. case_if*.
rewrite* eqb_eq.
rewrite* eqb_neq.
Qed.
Lemma unary_op_comp_correct : forall b h v r,
unary_op_comp b h v = Some r ->
unary_op_red b h v r.
Proof.
introv E.
destruct b; simpls; tryfalse.
(* not *)
destruct v; tryfalse.
inverts~ E. apply* unary_op_red_not.
(* void *)
inverts~ E. apply* unary_op_red_void.
Qed.
(**************************************************************)
(** ** Correctness of the interpreter *)
Lemma run_list_value_add_value : forall m s h0 es vs vs0 k k' r,
run_list_value m h0 s (vs ++ vs0) es k = r ->
(forall h vs', k' h vs' = k h (LibList.rev vs0 ++ vs')) ->
run_list_value m h0 s vs es k' = r.
Proof.
induction m.
simpl. intros; subst~.
introv E T. destruct es; simpls.
rewrite <- E. rewrite rev_app. apply* T.
destruct~ run. destruct~ r0. simpls.
destruct~ getvalue_comp. simpls. apply* IHm.
Qed.
Theorem run_correct : forall m h s e h' v,
run m h s e = out_return h' (ret_result v) ->
ok_heap h ->
ok_scope h s ->
red h s e h' v
with run_list_value_correct : forall m h1 s es k h3 v,
run_list_value m h1 s nil es k = out_return h3 (ret_result v) ->
ok_heap h1 ->
ok_scope h1 s ->
exists h2 vs,
k h2 vs = out_return h3 (ret_result v) /\
red_list_value h1 s es h2 vs.
Proof.
intro m. destruct m.
introv R OK OKL; false.
destruct e; introv R OK OKL; simpl in R;
try (inverts* R; fail).
(* this *)
forwards [(?&_) | (l'&eq&B)]: elim_if_binds_field_loc R.
forwards*: ret_not_wrong H.
inverts* eq.
apply* red_this.
apply* scope_comp_correct.
apply* proto_comp_correct.
sets_eq ls: (scope_comp h field_this s).
symmetry in EQls. forwards* Pro: scope_comp_correct EQls.
inverts Pro.
rewrite <- H in B.
rewrite* proto_comp_fix in B.
unfold proto_comp_body in B. case_if~ in B.
forwards*: ok_heap_null B.
inverts keep H; tryfalse.
exists~ field_this.
apply* binds_bound.
asserts (lp&Dlp&Plp): (exists l', l' <> loc_null /\ proto h field_this ls l').
apply* scopes_proto_not_null.
intro_subst.
rewrite* proto_comp_fix in B.
unfold proto_comp_body in B. case_if~ in B.
forwards*: ok_heap_null B.
inverts* Plp.
exists~ field_this.
apply* binds_bound.
(* variable *)
inverts* R.
apply red_variable.
apply* scope_comp_correct.
(* literal *)
inverts* R. constructor*.
(* obj *)
name_heap_alloc_obj_in R h3.
sets_eq sl: (split l). destruct sl as [lx lx0].
asserts OK3: (ok_heap h3).
subst h3. apply* ok_heap_alloc_obj.
apply* ok_heap_protochain_indom. applys* ok_heap_special_obj_proto OK.
right. applys* ok_heap_special_obj_proto OK.
apply fresh_for_spec.
asserts OKL3: (ok_scope h3 s).
applys* ok_scope_extends h.
subst h3. apply* extends_proto_write.
forwards~ (h2&vs&R'&IHR): run_list_value_correct R.
inverts R'.
apply* red_object.
apply* fresh_for_spec.
rewrite <- EQh3. apply IHR.
apply* arguments_comp_correct.
(* functions *)
destruct o.
(* --named *)
inverts R.
apply* red_function_named; apply* fresh_for_spec.
(* --unnamed *)
inverts R.
apply* red_function_unnamed; apply* fresh_for_spec.
(* access *)
forwards [(?&?) | (r1&h1&eq1)]: elim_if_success R; tryfalse.
rewrite eq1 in R. simpl in R.
forwards [eqr | [(eqr&v'&eqo) | (l&eqo&diffno)]]: elim_if_not_eq R.
rewrite eqr in R. simpl in R. forwards*: wrong_not_ret R.
lets (h'0&eqr'): eqr. forwards*: wrong_not_ret eqr'.
rewrite eqo in R. simpl in R.
case_if* in R; tryfalse.
forwards [(?&?) | (r2&h2&eq2)]: elim_if_success R; tryfalse.
rewrite eq2 in R. simpl in R.
forwards [eqr2 | [(eqr2&v''&eqr2') | (str&eqstr)]]: elim_if_is_string R.
rewrite eqr2 in R. simpl in R. forwards*: wrong_not_ret R.
lets (h'0&eqr2''): eqr2. forwards*: wrong_not_ret eqr2''.
rewrite eqstr in R; simpl in R.
inverts* R.
forwards* R1: run_correct eq1.
forwards* (OK1&OKL1&OKr1): sub_safety R1.
forwards* R2: run_correct eq2.
apply* red_access; try apply* getvalue_comp_correct; apply* safety.
(* member *)
forwards [(?&?) | (r1&h1&eq1)]: elim_if_success R; tryfalse.
rewrite eq1 in R; simpl in R.
forwards [((?&?)&(?&?)) | (l&f&eq2)]: elim_if_is_ref R.
rewrite H0 in R. simpl in R. forwards*: wrong_not_ret R.
rewrite eq2 in R. simpl in R.
forwards [? | (f'&eq3)]: elim_if_is_field_normal R.
false* wrong_not_ret.
rewrite eq3 in R. simpl in R.
subst. inverts* R.
forwards~ R1: run_correct eq1.
assert (f' = s0); subst.
inverts R1. inverts H9. inverts* H10.
apply* red_member.
(* new *)
forwards [(?&?) | (r1&h1&eq1)]: elim_if_success R; tryfalse.
rewrite eq1 in R. simpl in R.
forwards [eqr | [((h2&eqr)&(v'&eqo)) | (l1&eqo&diffno)]]: elim_if_not_eq R.
rewrite eqr in R. simpl in R. forwards*: wrong_not_ret R.
forwards*: wrong_not_ret eqr.
rewrite eqo in R; simpl in R.
case_if* in R.
forwards* [? | (Ib&Is&sc&f&e2&Escope&Ebody&R')]: elim_if_binds_scope_body R.
subst. forwards*: ret_not_wrong H.
clear R. rename R' into R.
forwards [(?&?) | (v'&R'&Bv')]: elim_if_binds_field R.
forwards*: ret_not_wrong H.
clear R. symmetry in R'. rename R' into R.
forwards* R1: run_correct eq1.
forwards* (OK1&OKL1&OKr1&Ext1): safety R1.
forwards~ (h2&vs&R'&IHR): run_list_value_correct R.
clear R. rename R' into R.
forwards [(?&?) | [(?&h0&eq3&eqv1) | (?&h0&v1&eq3&eqv1)]]: elim_if_success_value R; tryfalse.
rewrite eq3 in R. simpls. rewrite eqv1 in R. simpls. forwards*: wrong_not_ret R.
name_heap_reserve_local_vars_in eq3 h7.
name_heap_write_fields_in EQh7 h6.
name_heap_write_in EQh6 h5.
name_heap_sub_write_in EQh5 h4.
name_heap_alloc_obj_in EQh4 h3.
rewrite eq3 in R. simpls. rewrite eqv1 in R. simpls.
inverts R.
forwards* (OK2&OKL2&OKlv2&Ext2): safety_list IHR.
(* What follows is nearly a copy/paste of the corresponding proof in JsSafety. *)
asserts O3: (ok_heap h3). subst h3. apply* ok_heap_alloc_obj.
applys* obj_or_glob_of_value_protochain h1 l1 field_normal_prototype v'.
right. forwards~: ok_heap_special_obj_proto h1. apply* OK1.
forwards [(l3&El3)|?]: (value_loc_or_not v').
subst v'. simpl. forwards OV: ok_heap_ok_value OK1.
unfolds in OV. forwards~: OV Bv'. case_if*.
rewrite~ obj_or_glob_of_value_not_loc.
apply* fresh_for_spec.
asserts S3: (ok_scope h3 s). subst h3. apply* ok_scope_write.
asserts O4: (ok_heap h4). subst h4. apply* ok_heap_alloc_obj.
constructor. apply* fresh_for_spec.
asserts S4: (ok_scope h4 s). subst h4. apply* ok_scope_write.
asserts O5: (ok_heap h5). subst h5. forwards*: ok_heap_write_this h4 (fresh_for h3) (fresh_for h2).
subst h4. apply* binds_write_neq. apply* binds_write_eq.
apply* fresh_for_spec.
applys neq_sym. applys~ fresh_binds_neq h3. apply* fresh_for_spec.
applys~ ok_heap_special_global_this. apply* O3.
subst h4 h3. do 2 apply* indom_write. apply* indom_write_eq.
asserts S5: (ok_scope h5 s). subst h5. apply* ok_scope_write.
asserts O6: (ok_heap h6). subst h6. apply* ok_heap_write_fields_user.
subst h5 h4 h3. apply* indom_write. indom_simpl.
apply* fresh_for_spec.
apply* arguments_ok_value.
apply* arguments_comp_correct.
applys~ Forall_trans value (ok_value h2).
introv Oa. applys~ ok_value_extends h2.
subst h5 h4 h3. repeat apply* extends_proto_write_trans.
asserts S6: (ok_scope h6 s). subst h6. apply* ok_scope_write_fields.
asserts O7: (ok_heap h7). subst h7. apply* ok_heap_write_fields_user_undef.
subst h6 h5 h4. apply* indom_write_fields. apply* indom_write. apply* indom_write_eq.
apply* fresh_for_spec.
asserts S7: (ok_scope h7 s). applys* ok_scope_extends.
subst h7. apply* extends_proto_write_fields_trans.
assert (ok_scope h7 (fresh_for h3 :: sc)).
subst h7. apply* ok_scope_write_fields.
subst h6. apply* ok_scope_write_fields.
subst h5. apply* ok_scope_write.
subst h4. apply* ok_scope_cons.
subst h3. repeat apply* ok_scope_write.
forwards~ Of: ok_heap_function OK1.
unfolds in Of.
rewrite* <- binds_equiv_read in Escope.
forwards: Of l1.
left. apply Escope.
applys* ok_scope_extends h1.
apply* ok_heap_binds_ok_scope.
apply* indom_write_eq.
forwards~ R2: run_correct eq3.
forwards* (O'&S'&OKr2&E'): safety R2.
apply* red_new.
apply* getvalue_comp_correct.
rewrite* binds_equiv_read.
rewrite* binds_equiv_read.
apply* fresh_for_spec.
apply* fresh_for_spec.
apply* arguments_comp_correct.
subst*.
apply* getvalue_comp_correct.
(* call *)
forwards [(?&?) | (r1&h1&eq1)]: elim_if_success R; tryfalse.
rewrite eq1 in R. simpl in R.
forwards* [eqr | [(v1&eqo&eqr) | [(eqo&eqr) | (l1&eqo¬Eval&eqr)]]]: elim_if_eq R.
rewrite eqr in R. simpl in R. forwards*: wrong_not_ret R.
forwards*: ret_not_wrong eqr.
(* --call to eval *)
unfold make_error in eqr. inverts* eqr.
(* -- call to function *)
clears R. symmetry in eqr. rename eqr into R.
forwards* [? | (Ib&Is&sc&f&e2&Escope&Ebody&R')]: elim_if_binds_scope_body R.
subst. forwards*: ret_not_wrong H.
clear R. rename R' into R.
forwards* R1: run_correct eq1.
forwards* (OK1&OKL1&OKr1&Ext1): safety R1.
forwards~ (h2&vs&R'&IHR): run_list_value_correct R.
clears R. rename R' into R.
forwards [(?&?) | [(r2&h3&eq2&eqv0) | (r2&h3&v2&eq2&eqv0)]]: elim_if_success_value R; tryfalse.
rewrite eq2 in R. simpls. rewrite eqv0 in R. simpls. forwards*: wrong_not_ret R.
rewrite eq2 in R. simpls. rewrite eqv0 in R. simpls.
inverts* R.
forwards* (OK2&OKL2&OKlv2&Ext2): safety_list IHR.
name_heap_write_fields_in eq2 h6.
asserts OK2sc: (ok_scope h2 sc).
lets Of: ok_heap_function OK1. unfolds in Of.
rewrite* <- binds_equiv_read in Escope.
forwards: Of l1.
left. apply Escope.
applys* ok_scope_extends h1.
apply* ok_heap_binds_ok_scope.
name_heap_sub_write_fields_in EQh6 h5.
name_heap_write_in EQh5 h4.
name_heap_alloc_obj_in EQh4 h3.
asserts OK6: (ok_heap h6).
(* This part is very closed to the corresponding one in JsSafety:
maybe it can be factorized?*)
asserts: (has_some_proto h3 (fresh_for h2)). subst h3. indom_simpl.
subst h6. apply* ok_heap_write_fields_user_undef.
subst h5 h4 h3. apply~ indom_write_fields. indom_simpl.
apply* fresh_for_spec.
subst h5. apply* ok_heap_write_fields_user.
subst h4 h3. indom_simpl.
apply* fresh_for_spec.
subst h4. applys ok_heap_write_this h3 (fresh_for h2) (get_this h1 r1) (@eq_refl).
subst h3. apply* ok_heap_alloc_obj. constructor.
apply* fresh_for_spec.
subst h3. apply* binds_write_neq. apply* binds_write_eq.
apply* fresh_for_spec.
applys neq_sym. applys~ fresh_binds_neq h2. apply* fresh_for_spec.
applys~ ok_heap_special_global_this. apply* OK2.
auto.
destruct r1 as [v1|[l0 f0]].
subst h3. do 2 apply* indom_write.
apply* ok_heap_special_global_proto. apply* OK2.
unfold get_this. case_if.
subst h3. do 2 apply* indom_write.
apply* ok_heap_special_global_proto. apply* OK2.
subst h3.
do 2 apply* has_some_proto_write. inverts OKr1 as [N|P].
subst l0. simpl in eqo. case_if in eqo.
forwards~: extends_proto_elim Ext2 P.
apply* arguments_ok_value. apply* arguments_comp_correct.
applys~ Forall_trans value (ok_value h2).
introv Oa. applys~ ok_value_extends h2.
subst h4 h3. repeat apply* extends_proto_write_trans.
asserts OKL6: (ok_scope h6 (fresh_for h2 :: sc)).
subst h6. apply* ok_scope_write_fields.
subst h5. apply* ok_scope_write_fields.
subst h4. apply* ok_scope_write.
subst h3. apply* ok_scope_cons.
apply* ok_scope_write.
apply* indom_write_eq.
forwards* R2: run_correct eq2.
forwards* (_&OKL'&OKr2&Ext6): safety R2.
apply* red_call.
apply* getvalue_comp_correct.
rewrite* binds_equiv_read.
rewrite* binds_equiv_read.
apply* fresh_for_spec.
apply* arguments_comp_correct.
rewrite <- EQh3. rewrite <- EQh4. rewrite <- EQh5.
unfold reserve_local_vars. rewrite <- EQh6.
apply* R2.
apply* getvalue_comp_correct.
apply* safety.
(* unary_op *)
destruct u.
(* not *)
forwards [(?&?) | [(v0&h1&eq1&eqv0) | (v0&h1&b'&eq1&eqv0)]]: elim_if_success_value R; tryfalse.
rewrite eq1 in R. simpls. rewrite eqv0 in R. simpls. forwards*: wrong_not_ret R.
rewrite eq1 in R. simpls. rewrite eqv0 in R. simpls.
forwards [? | (v2&eqv2)]: elim_if_defined R.
rewrite H in R; simpl in R; forwards*: wrong_not_ret R.
rewrite eqv2 in R; simpl in R.
destruct b'; tryfalse. inverts eqv2.
inverts~ R.
forwards~ R': run_correct eq1.
forwards* (OK1&OKL1&OKr1): sub_safety R'.
apply* red_unary_op.
apply* getvalue_comp_correct.
apply* unary_op_comp_correct.
(* delete *)
forwards [(?&?) | (r1&h1&eq1)]: elim_if_success R; tryfalse.
rewrite eq1 in R. simpl in R.
case_if in R; inverts R.
apply* red_delete_false.
apply* red_delete_true.
(* typeof *)
forwards [(?&?) | (r1&h1&eq1)]: elim_if_success R; tryfalse.
rewrite eq1 in R. simpl in R.
forwards [(v1&eqr) | [(l&f&diffno&eqr&eqres) | (f&eqr&eqres)]]: elim_if_is_null_ref R.
rewrite eqr in R. simpl in R.
forwards [? | (v2&eqv2)]: elim_if_defined R.
rewrite H in R. simpl in R. forwards*: wrong_not_ret R.
rewrite eqv2 in R. simpl in R. inverts R.
forwards~ R1: run_correct eq1.
forwards* (OK'&OKL'&OKr1): sub_safety R1.
apply* red_typeof_value.
subst r1. constructor.
apply* typeof_comp_correct.
clear R. symmetry in eqres.
forwards [? | (v2&eqv2)]: elim_if_defined eqres.
rewrite H in eqres. simpl in eqres. forwards*: wrong_not_ret eqres.
rewrite eqv2 in eqres. simpl in eqres.
forwards [? | (v3&eqv3)]: elim_if_defined eqres.
rewrite H in eqres. simpl in eqres. forwards*: wrong_not_ret eqres.
rewrite eqv3 in eqres. simpl in eqres. inverts eqres.
forwards~ R1: run_correct eq1.
forwards* (OK'&OKL'&OKr1): sub_safety R1.
apply* red_typeof_value.
apply* getvalue_comp_correct.
apply* typeof_comp_correct.
clear R. inverts eqres.
forwards~ R1: run_correct eq1. subst r1.
apply* red_typeof_undefined.
(* The four next cases are copy/pasted. *)
(* pre_incr *)
forwards [(?&?) | (r1&h1&eq1)]: elim_if_success R; tryfalse.
rewrite eq1 in R. simpl in R.
forwards [(eqw&v'&eqv') | (l&f&eq')]: elim_if_is_ref R.
lets (h'0&eqw'): eqw. forwards*: wrong_not_ret eqw'.
rewrite eq' in R. simpl in R.
forwards [? | (v1&eqv1)]: elim_if_defined R.
rewrite H in R. simpl in R. forwards*: wrong_not_ret R.
destruct f; tryfalse.
rewrite eqv1 in R. simpl in R.
forwards [? | (v2&eqv2)]: elim_if_defined R.
rewrite H in R. simpl in R. forwards*: wrong_not_ret R.
rewrite eqv2 in R. simpl in R.
inverts R. substs.
forwards~ R1: run_correct eq1.
forwards* (OK1&OKL1&OKr1): sub_safety R1.
asserts OV1: (ok_value h1 v1).
repeat case_if in eqv1; inverts eqv1. constructor*.
lets OKV: ok_heap_ok_value OK1. unfolds in OKV.
apply* OKV. rewrite* binds_equiv_read.
apply* proto_indom. apply* proto_comp_correct.
inverts~ OKr1. inverts* H0.
apply* indom_bound.
auto*.
asserts OKV1': (ok_value h v1).
inverts* OV1. false.
rewrite~ binary_op_comp_fix in eqv2. simpl in eqv2.
false eqv2.
apply* red_pre_incr.
apply* getvalue_comp_correct.
apply* binary_op_comp_correct.
(* post_incr *)
forwards [(?&?) | (r1&h1&eq1)]: elim_if_success R; tryfalse.
rewrite eq1 in R. simpl in R.
forwards [(eqw&v'&eqv') | (l&f&eq')]: elim_if_is_ref R.
lets (h'0&eqw'): eqw. forwards*: wrong_not_ret eqw'.
rewrite eq' in R. simpl in R.
forwards [? | (v1&eqv1)]: elim_if_defined R.
rewrite H in R. simpl in R. forwards*: wrong_not_ret R.
destruct f; tryfalse.
rewrite eqv1 in R. simpl in R.
forwards [? | (v2&eqv2)]: elim_if_defined R.
rewrite H in R. simpl in R. forwards*: wrong_not_ret R.
rewrite eqv2 in R. simpl in R.
inverts R. substs.
forwards~ R1: run_correct eq1.
forwards* (OK1&OKL1&OKr1): sub_safety R1.
asserts OV1: (ok_value h1 v1).
repeat case_if in eqv1; inverts eqv1. constructor*.
lets OKV: ok_heap_ok_value OK1. unfolds in OKV.
apply* OKV. rewrite* binds_equiv_read.
apply* proto_indom. apply* proto_comp_correct.
inverts~ OKr1. inverts* H0.
apply* indom_bound.
auto*.
asserts OKV1': (ok_value h v1).
inverts* OV1. false.
rewrite~ binary_op_comp_fix in eqv2. simpl in eqv2.
false eqv2.
apply* red_post_incr.
apply* getvalue_comp_correct.
apply* binary_op_comp_correct.
(* pre_decr *)
forwards [(?&?) | (r1&h1&eq1)]: elim_if_success R; tryfalse.
rewrite eq1 in R. simpl in R.
forwards [(eqw&v'&eqv') | (l&f&eq')]: elim_if_is_ref R.
lets (h'0&eqw'): eqw. forwards*: wrong_not_ret eqw'.
rewrite eq' in R. simpl in R.
forwards [? | (v1&eqv1)]: elim_if_defined R.
rewrite H in R. simpl in R. forwards*: wrong_not_ret R.
destruct f; tryfalse.
rewrite eqv1 in R. simpl in R.
forwards [? | (v2&eqv2)]: elim_if_defined R.
rewrite H in R. simpl in R. forwards*: wrong_not_ret R.
rewrite eqv2 in R. simpl in R.
inverts R. substs.
forwards~ R1: run_correct eq1.
forwards* (OK1&OKL1&OKr1): sub_safety R1.
asserts OV1: (ok_value h1 v1).
repeat case_if in eqv1; inverts eqv1. constructor*.
lets OKV: ok_heap_ok_value OK1. unfolds in OKV.
apply* OKV. rewrite* binds_equiv_read.
apply* proto_indom. apply* proto_comp_correct.
inverts~ OKr1. inverts* H0.
apply* indom_bound.
auto*.
asserts OKV1': (ok_value h v1).
inverts* OV1. false.
rewrite~ binary_op_comp_fix in eqv2. simpl in eqv2.
false eqv2.
apply* red_pre_decr.
apply* getvalue_comp_correct.
apply* binary_op_comp_correct.
(* post_decr *)
forwards [(?&?) | (r1&h1&eq1)]: elim_if_success R; tryfalse.
rewrite eq1 in R. simpl in R.
forwards [(eqw&v'&eqv') | (l&f&eq')]: elim_if_is_ref R.
lets (h'0&eqw'): eqw. forwards*: wrong_not_ret eqw'.
rewrite eq' in R. simpl in R.
forwards [? | (v1&eqv1)]: elim_if_defined R.
rewrite H in R. simpl in R. forwards*: wrong_not_ret R.
destruct f; tryfalse.
rewrite eqv1 in R. simpl in R.
forwards [? | (v2&eqv2)]: elim_if_defined R.
rewrite H in R. simpl in R. forwards*: wrong_not_ret R.
rewrite eqv2 in R. simpl in R.
inverts R. substs.
forwards~ R1: run_correct eq1.
forwards* (OK1&OKL1&OKr1): sub_safety R1.
asserts OV1: (ok_value h1 v1).
repeat case_if in eqv1; inverts eqv1. constructor*.
lets OKV: ok_heap_ok_value OK1. unfolds in OKV.
apply* OKV. rewrite* binds_equiv_read.
apply* proto_indom. apply* proto_comp_correct.
inverts~ OKr1. inverts* H0.
apply* indom_bound.
auto*.
asserts OKV1': (ok_value h v1).
inverts* OV1. false.
rewrite~ binary_op_comp_fix in eqv2. simpl in eqv2.
false eqv2.
apply* red_post_decr.
apply* getvalue_comp_correct.
apply* binary_op_comp_correct.
(* void *) (* Note: this is more or less a copy/paste of the proof of `not' above. *)
forwards [(?&?) | [(v0&h1&eq1&eqv0) | (v0&h1&b'&eq1&eqv0)]]: elim_if_success_value R; tryfalse.
rewrite eq1 in R. simpls. rewrite eqv0 in R. simpls. forwards*: wrong_not_ret R.
rewrite eq1 in R. simpls. rewrite eqv0 in R. simpls.
inverts~ R.
forwards~ R': run_correct eq1.
forwards* (OK1&OKL1&OKr1): sub_safety R'.
apply* red_unary_op.
apply* getvalue_comp_correct.
apply* unary_op_comp_correct.
(* binary_op *)
forwards [(?&?) | [(v0&h1&eq1&eqv0) | (v0&h1&b'&eq1&eqv0)]]: elim_if_success_value R; tryfalse.
rewrite eq1 in R. simpls. rewrite eqv0 in R. simpls. forwards*: wrong_not_ret R.
rewrite eq1 in R. simpls. rewrite eqv0 in R. simpls.
forwards [(?&?) | [(v1&h2&eq2&eqv1) | (v1&h2&b''&eq2&eqv1)]]: elim_if_success_value R; tryfalse.
rewrite eq2 in R. simpls. rewrite eqv1 in R. simpls. forwards*: wrong_not_ret R.
rewrite eq2 in R. simpls. rewrite eqv1 in R. simpls.
forwards [? | (v3&eqv3)]: elim_if_defined R.
rewrite H in R. simpl in R. forwards*: wrong_not_ret R.
rewrite eqv3 in R. simpl in R.
inverts* R.
forwards* He1: run_correct eq1.
forwards* (OK1&OKL1&OKr1): sub_safety He1.
forwards* R2: run_correct eq2.
forwards* (OK2&OKL2&OKr2&Ext'): safety R2.
forwards* G0: getvalue_comp_correct eqv0.
forwards* G1: getvalue_comp_correct eqv1.
apply* red_binary_op.
apply* binary_op_comp_correct.
applys* ok_value_extends h1. forwards* O0: ok_result_prove G0.
inverts~ O0.
forwards* O1: ok_result_prove G1. inverts~ O1.
(* and *)
forwards [(?&?) | [(v0&h1&eq1&eqv0) | (v0&h1&b'&eq1&eqv0)]]: elim_if_success_value R; tryfalse.
rewrite eq1 in R. simpls. rewrite eqv0 in R. simpls. forwards*: wrong_not_ret R.
rewrite eq1 in R. simpls. rewrite eqv0 in R. simpls.
forwards~ R1: run_correct eq1.
forwards* (OK1&OKL1&OKr1): sub_safety R1.
forwards [(?&?) | [(eq2&eqv) | (eq2&eqv)]]: elim_if_boolean R.
forwards*: ret_not_wrong H.
subst b'. simpls.
forwards [(?&?) | [(v1&h2&eq3&eqv1) | (v1&h2&b''&eq3&eqv1)]]: elim_if_success_value R; tryfalse.
rewrite eq3 in R. simpls. rewrite eqv1 in R. simpls. forwards*: wrong_not_ret R.
rewrite eq3 in R; simpls. rewrite eqv1 in R. simpls.
forwards~ R2: run_correct eq3.
forwards* (OK2&OKL2&OKr2): sub_safety R2.
inverts~ R.
apply* red_and_true.
apply* getvalue_comp_correct.
apply* getvalue_comp_correct.
inverts~ eq2.
subst b'. simpls.
apply* red_and_false.
apply* getvalue_comp_correct.
(* or *)
forwards [(?&?) | [(v0&h1&eq1&eqv0) | (v0&h1&b'&eq1&eqv0)]]: elim_if_success_value R; tryfalse.
rewrite eq1 in R. simpls. rewrite eqv0 in R. simpls. forwards*: wrong_not_ret R.
rewrite eq1 in R. simpls. rewrite eqv0 in R. simpls.
forwards~ R1: run_correct eq1.
forwards* (OK1&OKL1&OKr1): sub_safety R1.
forwards [(?&?) | [(eq2&eqv) | (eq2&eqv)]]: elim_if_boolean R.
forwards*: ret_not_wrong H.
subst b'. simpls.
inverts~ R.
apply* red_or_true.
apply* getvalue_comp_correct.
subst b'. simpls.
forwards [(?&?) | [(v1&h2&eq3&eqv1) | (v1&h2&b''&eq3&eqv1)]]: elim_if_success_value R; tryfalse.
rewrite eq3 in R. simpls. rewrite eqv1 in R. simpls. forwards*: wrong_not_ret R.
rewrite eq3 in R. simpls. rewrite eqv1 in R. simpls.
forwards~ R2: run_correct eq3.
forwards* (OK2&OKL2&OKr2): sub_safety R2.
inverts~ R.
apply* red_or_false.
apply* getvalue_comp_correct.
apply* getvalue_comp_correct.
(* assign *)
destruct o.
(* with an operator *)
forwards [(?&?) | (r1&h1&eq1)]: elim_if_success R; tryfalse.
rewrite eq1 in R. simpl in R.
forwards [(eqw&v'&eqv') | (l&f&eq')]: elim_if_is_ref R.
lets (h'0&eqw'): eqw. forwards*: wrong_not_ret eqw'.
rewrite eq' in R. simpl in R.
forwards [? | (v1&eqv1)]: elim_if_defined R.
rewrite H in R. simpl in R. forwards*: wrong_not_ret R.
rewrite eqv1 in R. simpl in R.
forwards [(?&?) | [(r2&h2&eq2&eqv2) | (r2&h2&v2&eq2&eqv2)]]: elim_if_success_value R; tryfalse.
rewrite eq2 in R. simpls. rewrite eqv2 in R. simpls. forwards*: wrong_not_ret R.
rewrite eq2 in R. simpls. rewrite eqv2 in R. simpls.
destruct f; tryfalse.
subst r1.
forwards [? | (v3&eqv3)]: elim_if_defined R.
rewrite H in R. simpl in R. forwards*: wrong_not_ret R.
rewrite eqv3 in R. simpl in R.
inverts R.
forwards* R1: run_correct eq1.
forwards* (OK1&OKL1&OKr1&Ext1): safety R1.
forwards* R2: run_correct eq2.
forwards* (OK2&OKL2&OKr2&Ext2): safety R2.
apply* red_assign_op.
apply* getvalue_comp_correct.
apply* getvalue_comp_correct.
apply* binary_op_comp_correct.
applys ok_value_extends Ext2.
case_if in eqv1. case_if in eqv1.
inverts* eqv1.
inverts* eqv1. apply* ok_heap_ok_value.
rewrite* binds_equiv_read.
apply* proto_indom.
apply* proto_comp_correct.
apply* indom_bound.
inverts OKr1.
inverts* H0.
auto*.
inverts* OKr2.
simpls. inverts~ eqv2.
simpls. inverts~ eqv2.
case_if in H1. case_if in H1.
inverts* H1.
inverts* H1. apply* ok_heap_ok_value.
rewrite* binds_equiv_read.
apply* proto_indom.
apply* proto_comp_correct.
apply* indom_bound.
auto*.
(* without *)
forwards [(?&?) | (r1&h1&eq1)]: elim_if_success R; tryfalse.
rewrite eq1 in R. simpl in R.
forwards [(eqw&v'&eqv') | (l&f&eq')]: elim_if_is_ref R.
lets (h'0&eqw'): eqw. forwards*: wrong_not_ret eqw'.
rewrite eq' in R. simpl in R.
forwards [(?&?) | [(v0&h2&eq2&eqv0) | (v0&h2&b&eq2&eqv0)]]: elim_if_success_value R; tryfalse.
rewrite eq2 in R. simpls. rewrite eqv0 in R. simpls. forwards*: wrong_not_ret R.
rewrite eq2 in R. simpls. rewrite eqv0 in R. simpls.
inverts* R.
forwards* R1: run_correct eq1.
forwards* (OK1&OKL1&OKr1): sub_safety R1.
inverts* OKr1; tryfalse.
inverts H0.
forwards* R2: run_correct eq2.
forwards* (OK2&OKL2&OKr2): sub_safety R2.
apply* red_assign.
apply* getvalue_comp_correct.
(* seq *)
forwards [(?&?) | (r1&h1&eq1)]: elim_if_success R; tryfalse.
rewrite eq1 in R. simpl in R.
forwards [(?&?) | (r2&h2&eq2)]: elim_if_success R; tryfalse.
rewrite eq2 in R. simpl in R.
inverts* R.
forwards~ R1: run_correct eq1.
forwards* (OK1&OKL1&OKr1): sub_safety R1.
forwards~ R2: run_correct eq2.
forwards* (OK2&OKL2&OKr2): sub_safety R2.
apply* red_seq.
destruct o. (* NEWSYNTAX -- reorganize the two cases *)
(* var_decl_expr *)
forwards [(?&?) | (v0&h0&eq)]: elim_if_success R; tryfalse.
rewrite eq in R. simpl in R.
forwards* R1: run_correct eq.
inverts R.
apply* red_var_decl_expr.
(* var_decl *)
inverts R.
apply* red_var_decl.
destruct o. (* NEWSYTNAX --reorganized *)
(* if *)
forwards [(?&?) | [(v0&h1&eq1&eqv0) | (v0&h1&b'&eq1&eqv0)]]: elim_if_success_value R; tryfalse.
rewrite eq1 in R. simpls. rewrite eqv0 in R. simpls. forwards*: wrong_not_ret R.
rewrite eq1 in R. simpls. rewrite eqv0 in R. simpls.
forwards~ R1: run_correct eq1.
forwards* (OK1&OKL1&OKr1): sub_safety R1.
forwards [(?&?) | [(eq2&eqv) | (eq2&eqv)]]: elim_if_boolean R.
forwards*: ret_not_wrong H.
eapply red_if_true.
apply* run_correct.
subst b'. apply* getvalue_comp_correct.
apply* run_correct.
eapply red_if_false.
apply* run_correct.
subst b'. apply* getvalue_comp_correct.
apply* run_correct.
(* if *)
forwards [(?&?) | [(v0&h1&eq1&eqv0) | (v0&h1&b'&eq1&eqv0)]]: elim_if_success_value R; tryfalse.
rewrite eq1 in R. simpls. rewrite eqv0 in R. simpls. forwards*: wrong_not_ret R.
rewrite eq1 in R. simpls. rewrite eqv0 in R. simpls.
forwards~ R1: run_correct eq1.
forwards* (OK1&OKL1&OKr1): sub_safety R1.
forwards [(?&?) | [(eq2&eqv) | (eq2&eqv)]]: elim_if_boolean R.
forwards*: ret_not_wrong H.
eapply red_if_true.
apply* run_correct.
subst b'. apply* getvalue_comp_correct.
apply* run_correct.
inverts eq2.
eapply red_if_false_implicit.
apply* run_correct.
subst b'. apply* getvalue_comp_correct.
(* while *)
forwards [(?&?) | [(v0&h1&eq1&eqv0) | (v0&h1&b'&eq1&eqv0)]]: elim_if_success_value R; tryfalse.
rewrite eq1 in R. simpls. rewrite eqv0 in R. simpls. forwards*: wrong_not_ret R.
rewrite eq1 in R. simpls. rewrite eqv0 in R. simpls.
forwards~ R1: run_correct eq1.
forwards* (OK1&OKL1&OKr1): sub_safety R1.
forwards [(?&?) | [(_&eqv) | (_&eqv)]]: elim_if_boolean R.
forwards*: ret_not_wrong H.
subst b'. simpls.
forwards [(?&?) | (r2&h2&eq2)]: elim_if_success R; tryfalse.
rewrite eq2 in R. simpl in R.
inverts R. apply* red_while_true. apply* getvalue_comp_correct.
subst b'. simpls.
inverts R. apply* red_while_false. apply* getvalue_comp_correct.
(* with *)
forwards [(?&?) | (r1&h1&eq1)]: elim_if_success R; tryfalse.
rewrite eq1 in R. simpl in R.
forwards [eqr | [(eqr&v'&eqo) | (l&eqo&diffno)]]: elim_if_not_eq R.
rewrite eqr in R. simpl in R. forwards*: wrong_not_ret R.
lets (h'0&eqr'): eqr. false (>> wrong_not_ret eqr').
rewrite eqo in R. simpl in R.
case_if in R.
forwards [(?&?) | (r2&h2&eq2)]: elim_if_success R; tryfalse.
rewrite eq2 in R. simpl in R.
inverts* R.
forwards* R1: run_correct eq1.
forwards* (OK1&OKL1&OKr1): sub_safety R1.
forwards* R2: run_correct eq2.
apply* ok_scope_cons.
assert (ok_value h1 l).
inverts OKr1; simpls.
inverts* eqo.
inverts H;
case_if* in eqo; tryfalse.
case_if* in eqo;
tryfalse.
inverts eqo.
lets OKV1: ok_heap_ok_value OK1.
unfold ok_heap_ok_value_def in OKV1.
apply~ OKV1.
rewrite* binds_equiv_read.
apply* proto_indom.
apply* proto_comp_correct.
apply* indom_bound.
split; discriminate.
inverts* H.
apply* red_with.
apply* getvalue_comp_correct.
(* skip *)
inverts R.
apply* red_skip.
(* red_list_value *)
intro m. destruct m.
introv R OK OKL. false.
introv R OK OKL. destruct es; simpl in R.
do 2 eexists. splits*. constructor.
forwards [(?&?) | [(v0&h2&eq2&eqv0) | (v0&h2&b&eq2&eqv0)]]: elim_if_success_value R; tryfalse.
rewrite eq2 in R. simpls. rewrite eqv0 in R. simpls. forwards*: wrong_not_ret R.
rewrite eq2 in R. simpls. rewrite eqv0 in R. simpls.
rewrite <- (app_nil_l (b :: nil)) in R.
forwards R': run_list_value_add_value R.
introv. reflexivity.
forwards~ Rc: run_correct eq2.
forwards~ (O2&S2&Or2&E2): safety Rc.
forwards~ (h4&vs'&E&Rl): run_list_value_correct R'.
do 2 eexists. splits*.
apply* red_list_cons.
apply* getvalue_comp_correct.
Admitted. (* Admitted for the same reasons than the one of JsSafety:
This proof requires a lot of memory and time! *)
(* Require a deterministic semantic:
Theorem run_complete : forall h h' L e v,
red h L e h' v ->
ok_heap h -> ok_scope h L ->
exists m, run m h L e = out_return h' (ret_result v).
*)
End Correctness.
|
{"author": "jeremyjohnston", "repo": "javascript-vm", "sha": "eb4b20f46d36c8342f0f012cd38500ca6dab3e1a", "save_path": "github-repos/coq/jeremyjohnston-javascript-vm", "path": "github-repos/coq/jeremyjohnston-javascript-vm/javascript-vm-eb4b20f46d36c8342f0f012cd38500ca6dab3e1a/jscert/core_js_src/JsInterpreterProofs.v"}
|
import numpy as np
import matplotlib.pyplot as plt
import scipy.io as sio
from Hodge import *
if __name__ == "__main__":
foldername = "VideoMix/NumberedVideos"
X = sio.loadmat("TurkResults/Class.mat")
R = X['R']
Y = R[:, 2]
R = R[:, 0:2]
W = np.ones(Y.shape)
(s, I, H) = doHodge(R, W, Y, verbose = True)
INorm = getWNorm(I, W)
HNorm = getWNorm(H, W)
idx = np.argsort(s)
print(idx)
#Output results in HTML format in descending order of maximum persistence
fout = open("%s/ClassRankings.html"%foldername, "w")
fout.write("<html><body>")
fout.write("<h1>INorm = %g</h1>"%INorm)
fout.write("<h1>HNorm = %g</h1>"%HNorm)
fout.write("<table border = '1'>")
count = 1
for i in idx:
fout.write("<tr><td><h2>%i</h2>%i.ogg<BR><BR>s = <b>%g</b></td>"%(count, i, s[i]))
fout.write("<td><video controls><source src=\"%i.ogg\" type=\"video/ogg\">Your browser does not support the video tag.</video>"%i)
fout.write("</tr>\n")
count += 1
fout.write("</table></body></html>")
fout.close()
|
{"hexsha": "7a6c7a74aafd6c98ad8a8e2396c0ecba4a627297", "size": 1089, "ext": "py", "lang": "Python", "max_stars_repo_path": "rankVideosHumanAggregate.py", "max_stars_repo_name": "ctralie/SlidingWindowVideoTDA", "max_stars_repo_head_hexsha": "d707a0c4727e068778d5c805f938556c91d6f1ce", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2017-05-09T12:21:04.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-29T10:14:23.000Z", "max_issues_repo_path": "rankVideosHumanAggregate.py", "max_issues_repo_name": "ctralie/SlidingWindowVideoTDA", "max_issues_repo_head_hexsha": "d707a0c4727e068778d5c805f938556c91d6f1ce", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rankVideosHumanAggregate.py", "max_forks_repo_name": "ctralie/SlidingWindowVideoTDA", "max_forks_repo_head_hexsha": "d707a0c4727e068778d5c805f938556c91d6f1ce", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2017-05-23T07:00:33.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-11T11:32:36.000Z", "avg_line_length": 33.0, "max_line_length": 138, "alphanum_fraction": 0.5812672176, "include": true, "reason": "import numpy,import scipy", "num_tokens": 352}
|
# #
# Reproduce the figures in
# [V. Duchêne and C. Klein](http://dx.doi.org/10.3934/dcdsb.2021300)
# on the Serre-Green-Naghdi and its fully dispersive counterpart.
# #
export PlotSolitaryWaveWGN1,PlotSolitaryWaveWGN2,PlotSolitaryWaveWGN3,PlotJacobianWGN,IntegrateSolitaryWaveWGN,StabilitySolitaryWaveWGN,IntegrateWGN
using WaterWaves1D,FFTW,Plots,LinearAlgebra,ProgressMeter;
#using JLD #(uncomment if using function save)
#---- Figures 1 and 2
"""
PlotSolitaryWaveWGN1(;kwargs)
First method to compute the WGN solitary wave.
Use GMRES-iterative method for the inversion of the Jacobian.
Arguments are all optional:
- `c` the velocity,
- `N` the number of collocation points,
- `L` the half-length of the mesh,
- `verbose` provides details on residuals if `true` (default is `false`)
- `name`: a string used to save the figure as a name.pdf.
Return `(η,u,v,mesh)`, where `mesh.x` are the collocation points.
"""
function PlotSolitaryWaveWGN1(;c=2,N=2^10,L=10*π,verbose=false,name=nothing)
param = ( μ = 1,
ϵ = 1,
N = N,
L = L,
c = c
)
(η,u,v,mesh) = SolitaryWaveWhithamGreenNaghdi(
param;
method=2, α = 0,
tol = 1e-14, max_iter=10,
ktol =0*1e-11, gtol = 1e-16,
iterative = true, q=1,
verbose = verbose)
(ηGN,uGN) = SolitaryWaveSerreGreenNaghdi(param)
plt = plot(layout=(1,2))
plot!(plt[1,1], mesh.x, [u uGN];
title=string("c=",c),
xlabel = "x",
ylabel = "u",
label=["WGN" "SGN"])
plot!(plt[1,2], fftshift(mesh.k),
[log10.(abs.(fftshift(fft(u)))) log10.(abs.(fftshift(fft(uGN))))];
title="frequency",
label=["WGN" "SGN"])
display(plt)
if name != nothing savefig(string(name,".pdf")); end
return (η,u,v,mesh)
end
#---- Figure 3
"""
PlotSolitaryWaveWGN2(;kwargs)
Second method to compute the WGN solitary wave. Same as `PlotSolitaryWaveWGN1`, but
use non-iterative method for the inversion of the Jacobian.
To be used with higher values of the velocity (default is `c=20`).
"""
function PlotSolitaryWaveWGN2(;c=20,L=10*π,N=2^10,verbose=false,name=nothing)
param = ( μ = 1,
ϵ = 1,
N = N,
L = L,
c = c )
(η,u,v,mesh) = SolitaryWaveWhithamGreenNaghdi(
param;
method=2, α = 1, #ici α = 1 évite des oscillations important si c = 3 ou c = 20
tol = 1e-14, max_iter=15,
iterative = false, q=1,
verbose = verbose)
(ηGN,uGN) = SolitaryWaveSerreGreenNaghdi(param)
plt = plot(layout=(1,2))
plot!(plt[1,1], mesh.x, [u uGN];
title=string("c=",c),
xlabel = "u",
ylabel = "u/c",
label=["WGN" "SGN"])
plot!(plt[1,2], fftshift(mesh.k),
[log10.(abs.(fftshift(fft(u)))) log10.(abs.(fftshift(fft(uGN))))];
title="frequency",
label=["WGN" "SGN"])
display(plt)
if name != nothing savefig(string(name,".pdf")); end
return (η,u,v,mesh)
end
#---- Figure 4
"""
PlotSolitaryWaveWGN3(;kwargs)
Third method to compute the WGN solitary wave. Same as `PlotSolitaryWaveWGN1`, but
- use non-iterative method for the inversion of the Jacobian; and
- use a rescaled equation.
To be used with highest values of the velocity (default is `c=100`).
"""
function PlotSolitaryWaveWGN3(;c=100,L=10*π,N=2^10,verbose=false,name=nothing)
param = ( μ = 1,
ϵ = 1,
N = N,
L = L,
c = c )
(η,u,v,mesh) = SolitaryWaveWhithamGreenNaghdi(
param;
method=3, α = 1,
tol = 1e-10, max_iter=10,
iterative = false, q=1,
verbose = verbose)
(ηGN,uGN) = SolitaryWaveSerreGreenNaghdi(param)
plt = plot(layout=(1,2))
plot!(plt[1,1], mesh.x, [u/c uGN/c];
title=string("c=",c),
xlabel = "x",
ylabel = "u/c",
label=["WGN" "SGN"])
plot!(plt[1,2], fftshift(mesh.k),
[log10.(abs.(fftshift(fft(u/c)))) log10.(abs.(fftshift(fft(uGN/c)))) ];
title="frequency",
label=["WGN" "SGN"])
display(plt)
if name != nothing savefig(string(name,".pdf")); end
return (η,u,v,mesh)
end
#------ Figure 6
"""
PlotJacobianWGN(;kwargs)
Compute the Jacobian matrix to be inverted in SGN or WGN equation.
Arguments are all optional:
- `c` the velocity,
- `N` the number of collocation points,
- `L` the half-length of the mesh,
- `SGN`: use SGN if `true`, and WGN if `false` (default is `false`),
- `name`: a string used to save the figure as a name.pdf.
"""
function PlotJacobianWGN(;c=20,L=10*π,N=2^10,SGN=false,verbose=false,name=nothing)
ϵ,μ,α=1,1,0
(η,u,v,mesh) = SolitaryWaveWhithamGreenNaghdi(
(c=c,L=L,N=N,μ=μ,ϵ=ϵ); SGN = SGN,
method=2, α = 1, #ici α = 1 évite des oscillations important si c = 3 ou c = 20
tol = 1e-12, max_iter=15,
iterative = false, q=1,
verbose = verbose)
k,x=mesh.k,mesh.x
if SGN == true
F₀ = sqrt(μ)*1im * k
else
F₁ = tanh.(sqrt(μ)*abs.(k))./(sqrt(μ)*abs.(k))
F₁[1] = 1
F₀ = 1im * sqrt.(3*(1 ./F₁ .- 1)).*sign.(k)
end
x₀ = mesh.x[1]
FFT = exp.(-1im*k*(x.-x₀)');
IFFT = exp.(1im*k*(x.-x₀)')/length(x);
M₀ = IFFT * Diagonal( F₀ )* FFT
M(v) = Diagonal( v )
dxu = real.(ifft(1im*k.*fft(u)))
dxu ./= norm(dxu,2)
hu = c ./(c .- ϵ*u)
Fu = hu.* real.(ifft(F₀.*fft(u)))
F2u = real.(ifft(F₀.*fft(hu.^2 .* Fu ) ))
Du = c ./hu .+ 2*ϵ/3 * F2u ./ hu .+ ϵ^2/c * hu .* Fu.^2 .- (hu.^2)/c
Jac = (-1/3 *M(1 ./ hu.^2 )* M₀ * M(hu.^3)* (c*M₀ .+ 3*ϵ * M( Fu ))
.+ ϵ * M( hu .* Fu ) * M₀
.+ M( Du ) .+ α*M( hu.^2 )*dxu*dxu' *M(1 ./ hu.^2 ) )
Jacstar = -1/3 *M(1 ./ hu.^2 )* M₀ * M(hu.^3)* c*M₀
plt = plot(layout=(1,2))
surface!(plt[1,1],fftshift(k),fftshift(k)[N:-1:1],
log10.(abs.(FFT*Jac*IFFT)),
title = "Jacobian")
surface!(plt[1,2],fftshift(k),fftshift(k)[N:-1:1],
log10.(abs.(FFT*Jacstar*IFFT)),
title = "non-diagonal part")
display(plt)
if name != nothing savefig(string(name,".pdf")); end
return (Jac,Jacstar,FFT,IFFT)
end
#---- Figures 7 and 8
"""
IntegrateSolitaryWaveWGN(;kwargs)
Integrate in time the SGN or WGN solitary wave.
Arguments are all optional:
- `c` the velocity,
- `N` the number of collocation points,
- `L` the half-length of the mesh,
- `T` the final time of integration,
- `dt` the timestep,
- `SGN`: use SGN if `true`, and WGN if `false` (default is `false`),
- `name`: a string used to save raw data and the figure.
Return `problem` of type `Problem`, containing all the information.
"""
function IntegrateSolitaryWaveWGN(;SGN=false,c=2,N=2^10,L=10*π,T=1,dt=1/2000,name=nothing)
if name != nothing ns=floor(Int,max(1,T/dt/100)) else ns=1 end
param = ( μ = 1, ϵ = 1, c = c,
N = N,
L = L,
T = T,
dt = dt, ns=ns)
if SGN == true
(ηinit,uinit,vinit,mesh) = SolitaryWaveSerreGreenNaghdi(param)
(ηfin,ufin,vfin) = SolitaryWaveSerreGreenNaghdi(param; x₀ = c*T)
else
(ηinit,uinit,vinit,mesh) = SolitaryWaveWhithamGreenNaghdi(
param; method = 3, max_iter=10,α=1,verbose=true, tol=1e-12)
(ηfin,ufin,vfin) = SolitaryWaveWhithamGreenNaghdi(
param; x₀ = c*T, method = 3, max_iter=10,α=1,verbose=true, tol=1e-12)
end
init = Init(mesh,ηinit,vinit)
model = WhithamGreenNaghdi(param;SGN=SGN, ktol=0, iterate=true, precond = true)
problem = Problem(model, init, param)
solve!( problem )
(ηcomp,vcomp,ucomp) = model.mapfrofull(last(problem.data.U))
E(η,u,v) = sum(η.^2 .+ (1 .+ param.ϵ*η).*u.*v)
dE(η1,u1,v1,η2,u2,v2) = sum(η1.^2-η2.^2) + sum((1 .+ param.ϵ*η1).*u1.*v1 - (1 .+ param.ϵ*η2).*u2.*v2)
print(string("absolute energy difference: ",dE(ηinit,uinit,vinit,ηcomp,ucomp,vcomp),"\n"))
print(string("relative energy difference: ",dE(ηinit,uinit,vinit,ηcomp,ucomp,vcomp)/E(ηinit,uinit,vinit),"\n"))
plt = plot(layout=(1,2))
plot!(plt[1,1],fftshift(mesh.k),fftshift(log10.(abs.(fft(uinit))));
title = "frequency",
label = "initial",
xlabel="x",
ylabel="u")
plot!(plt[1,1],fftshift(mesh.k),fftshift(log10.(abs.(fft(ucomp))));
label = "final")
plot!(plt[1,2],mesh.x,ufin-ucomp;
title = string("error at time t=",problem.times.tfin),
label="difference",
xlabel="x",
ylabel="δu")
display(plt)
if name != nothing
savefig(string(name,".pdf"));
create_animation(problem;name=string(name,"-anim.pdf"))
plot_solution(problem)
savefig(string(name,"-final.pdf"));
#save(problem,name);
end
return problem
end
#------ Figures 9 to 15
"""
StabilitySolitaryWaveWGN(;kwargs)
Integrate in time a pertubed SGN or WGN solitary wave.
Arguments are all optional:
- `p` (1,2, or a real) is the type of perturbation:
1. If `p=1`, then `u` is multiplied by `λ = 0.99`
2. If `p=2`, then `u` is multiplied by `λ = 1.01`
3. Otherwise, one adds `p exp(-x^2)` to `u`
- `c` the velocity,
- `N` the number of collocation points,
- `L` the half-length of the mesh,
- `T` the final time of integration,
- `dt` the timestep,
- `SGN`: use SGN if `true`, and WGN if `false` (default is `false`),
- `iterate`: use GMRES if `true`, and LU decomposition otherwise (default is `true`),
- `precond` gives some choice in the preconditioner for GMRES,
- `dealias` use 2/3 dealiasing rule if `1`, (default is `0`, i.e. no dealiasing),
- `name`: a string used to save raw data and the figures.
Return `problem` of type `Problem`, containing all the information.
"""
function StabilitySolitaryWaveWGN(;p=2,c=2,N=2^10,L=10*π,T=10,dt=10/10^4,SGN=false,precond=true,iterate=true,dealias=0,name=nothing)
if name != nothing ns=floor(Int,max(1,T/dt/100)) else ns=1 end
if p == 1
λ = 0.99
elseif p == 2
λ = 1.01
else
λ = p
end
param = ( μ = 1, ϵ = 1, c = c, λ = λ,
N = N,
L = L,
T = T,
dt = dt, ns=ns,
dealias = dealias,
SGN=SGN,
ktol=0*1e-6,
gtol=1e-12,
iterate=iterate)
if SGN == true
(η,u,v,mesh) = SolitaryWaveSerreGreenNaghdi(param)
else
(η,u,v,mesh) = SolitaryWaveWhithamGreenNaghdi(
param; method = 3, tol=1e-14, max_iter=10,α=1,verbose=true)
end
k = mesh.k
if precond > 0
precond = Diagonal( 1 .+ μ/3*(precond^2*k).^2 )
elseif precond < 0
precond = Diagonal( (1 .+ μ/3000*k.^8) )
end
if p == 1 || p == 2
u= λ*u
else
u .+= λ*exp.(-mesh.x.^2)
end
if SGN == true
F₀ = sqrt(param.μ)*1im * k
else
F₁ = tanh.(sqrt(param.μ)*abs.(k))./(sqrt(param.μ)*abs.(k))
F₁[1] = 1
F₀ = 1im * sqrt.(3*(1 ./F₁ .- 1)).*sign.(k)
end
DxF(v) = real.(ifft(F₀ .* fft(v)))
h = 1 .+ param.ϵ*η
v = u - 1/3 ./h .* (DxF(h.^3 .*DxF(u)))
init = Init(mesh,η,v)
model = WhithamGreenNaghdi(param;SGN=param.SGN, dealias = param.dealias, ktol=param.ktol, gtol=param.gtol, iterate=param.iterate, precond = precond)
problem = Problem(model, init, param)
solve!( problem )
#if name != nothing save(problem,name); end
(ηfin,vfin,ufin) = model.mapfrofull(last(problem.data.U))
E(η,u,v) = sum(η.^2 .+ (1 .+ param.ϵ*η).*u.*v)
dE(η1,u1,v1,η2,u2,v2) = sum(η1.^2-η2.^2) + sum((1 .+ param.ϵ*η1).*u1.*v1 - (1 .+ param.ϵ*η2).*u2.*v2)
print(string("normalized error: ",dE(η,u,v,ηfin,ufin,vfin)/E(η,u,v),"\n"))
plt = plot(layout=(1,2))
plot!(plt[1,1],fftshift(mesh.k),fftshift(log10.(abs.(fft(η))));
title = "frequency",
label = "initial")
plot!(plt[1,1],fftshift(mesh.k),fftshift(log10.(abs.(fft(ηfin))));
label = "final")
plot!(plt[1,2],mesh.x,[η ηfin];
title = string("at time t=",problem.times.tfin),
label=["zeta initial" "zeta final"])
plot!(plt[1,2],mesh.x,[u ufin];
label=["u initial" "u final"])
display(plt)
if name != nothing savefig(string(name,"-final.pdf")); end
ts = problem.times.ts
x = mesh.x
k = mesh.k
X=interpolate(mesh,real.(ifft(problem.data.U[1][:,1])))[1].x
zs=zeros(length(X),length(ts));
for i in 1:length(ts)
zs[:,i].=interpolate(mesh,real.(ifft(problem.data.U[i][:,1])))[2]
end
plt = plot()
scatter!(plt,ts,maximum(abs.(zs),dims=1)',
title="maximum of surface deformation",
label="",
xlabel="time t")
display(plt)
if name != nothing
savefig(string(name,"-znorm.pdf"));
us=zeros(length(X),length(ts));
@showprogress 1 "Computing v..." for i in 1:length(ts)
us[:,i].=interpolate(mesh,model.mapfro(problem.data.U[i])[2])[2]
end
plt = plot()
scatter!(plt,ts,maximum(abs.(us),dims=1)',
title="maximum of velocity",
label="",
xlabel="time t")
display(plt)
savefig(string(name,"-vnorm.pdf"));
plt=plot()
my_cg = cgrad([:blue,:green])
#surface!(plt,X,ts,zs',view_angle=(20,30), color = my_cg)
display(plt)
savefig(string(name,"-evol.pdf"));
create_animation(problem;name=string(name,"-anim"))
end
return problem
end
#---- Figures 16 to end
"""
IntegrateWGN(scenario;kwargs)
Integrate in time SGN or WGN with an initial data depending on a given `scenario`
- If `scenario = 1`, produces a dispersive shock wave for a unidirectional wave constructed from the Saint-Venant model.
- If `scenario = 2`, produces a dispersive shock wave for a unidirectional wave constructed from the Camassa-Holm model.
- If `scenario = 3`, the initial data challenges the non-cavitation assumption.
Other arguments are optional:
- `δ` is either
- the square root of the shallowness parameter (the nonlinear parameter `ϵ=1`) if `scenario=1` or `2`;
- the minimal depth if `scenario=3`
- `N` the number of collocation points,
- `L` the half-length of the mesh,
- `T` the final time of integration,
- `dt` the timestep,
- `SGN`: use SGN if `true`, and WGN if `false` (default is `false`),
- `iterate`: use GMRES if `true`, and LU decomposition otherwise (default is `true`),
- `precond` gives some choice in the preconditioner for GMRES,
- `dealias`: dealiasing with Orlicz rule `1-dealias/(dealias+2)` (default is `0`, i.e. no dealiasing);
- `name`: a string used to save raw data and the figures.
Return `(problem,plt)` where `problem` contains all the information and `plt` a plot of the final time solution.
"""
function IntegrateWGN(scenario;δ=0.1,N=2^11,L=3*π,x₀=-3,T= 5,dt = 5/10^4,SGN=false,dealias=0,iterate=true,precond=true,name=nothing)
if name != nothing ns=floor(Int,max(1,T/dt/100)) else ns=1 end
if scenario == 1 || scenario == 2
μ = δ^2
else
μ = 1
end
param = ( μ = μ, ϵ = 1,
N = N, L = L,
T = T, dt = dt,
ns=ns )
mesh=Mesh(param)
k=mesh.k
if precond > 0
precond = Diagonal( 1 .+ μ/3*(precond^2*k).^2 )
elseif precond < 0
precond = Diagonal( (1 .+ μ/3000*k.^8) )
end
if scenario == 1
η= exp.(-(mesh.x .-x₀).^2)
u= 2*sqrt.(1 .+ param.ϵ*η) .-2
elseif scenario == 2
function krasny!(v)
v[abs.(v).<1e-14].=0
return v
end
Dx(v) = real.(ifft( 1im*mesh.k.* krasny!(fft(v))))
Dx2(v) = Dx(Dx(v))
ϵ = param.ϵ;μ=param.μ;
w = - (mesh.x.-x₀).* exp.(-(mesh.x.-x₀).^2)
u = w .+ μ/12 *Dx2(w) .+ μ*ϵ/6* w.*Dx2(w)
η = u .+ ϵ/4*u.^2 .- μ/6* Dx2( u+3*ϵ/4* u.^2) .- μ*ϵ/6 * u .* Dx2(u) .- 5*μ*ϵ/48 * Dx(u).^2
elseif scenario == 3
η = -(1-δ)*exp.(-mesh.x.^2)
u = -mesh.x.*exp.(-mesh.x.^2)
else
error("the first argument must be 1, 2 or 3")
end
if SGN == true
F₀ = sqrt(param.μ)*1im*mesh.k
else
F₁ = tanh.(sqrt(param.μ)*abs.(mesh.k))./(sqrt(param.μ)*abs.(mesh.k))
F₁[1]= 1 # Differentiation
F₀ = 1im * sqrt.(3*(1 ./F₁ .- 1)).*sign.(mesh.k)
end
DxF(v) = real.(ifft(F₀ .* fft(v)))
h=1 .+ param.ϵ*η
v= u - 1/3 ./h .* (DxF(h.^3 .*DxF(u)))
init = Init(mesh,η,v)
model = WhithamGreenNaghdi(param;SGN=SGN, ktol=0, gtol=1e-12, iterate=iterate, precond = precond, dealias = dealias)
problem = Problem(model, init, param)
solve!( problem )
(ηfin,vfin,ufin) = model.mapfrofull(last(problem.data.U))
E(η,u,v) = sum(η.^2 .+ (1 .+ param.ϵ*η).*u.*v)
dE(η1,u1,v1,η2,u2,v2) = sum(η1.^2-η2.^2) + sum((1 .+ param.ϵ*η1).*u1.*v1 - (1 .+ param.ϵ*η2).*u2.*v2)
print(string("normalized error: ",dE(η,u,v,ηfin,ufin,vfin)/E(η,u,v),"\n"))
fftηfin=last(problem.data.U)[:,1]
plt = plot(layout=(1,2))
plot!(plt[1,1],fftshift(mesh.k),fftshift(log10.(abs.(fftηfin)));
title = "frequency",label="")
plot!(plt[1,2],mesh.x,real.(ifft(fftηfin));
title = string("surface deformation at time t=",problem.times.tfin),
label="")
display(plt)
if name != nothing
savefig(string(name,".pdf"));
#save(problem,name);
ts = problem.times.ts
x = mesh.x
k = mesh.k
zs=zeros(param.N,length(ts));
@showprogress 1 for i in 1:length(ts)
zs[:,i].=real.(ifft(problem.data.U[i][:,1]))
end
plt3=plot()
my_cg = cgrad([:blue,:green])
surface!(plt3,x,ts,zs',view_angle=(20,30), color = my_cg)
display(plt3)
savefig(string(name,"-evol.pdf"));
create_animation(problem;ylims=false,name=string(name,"-anim"))
end
display(plt)
return problem,plt
end
nothing
|
{"hexsha": "e0a4d79f636de3afbd3b97a67af76b68f41a527b", "size": 16340, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/StudyWhithamGreenNaghdi.jl", "max_stars_repo_name": "WaterWavesModels/ShallowWaterModels.jl", "max_stars_repo_head_hexsha": "ab745353a2472a76646d976c6fe536f0be664488", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-02-21T17:07:26.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-25T20:31:31.000Z", "max_issues_repo_path": "examples/StudyWhithamGreenNaghdi.jl", "max_issues_repo_name": "WaterWavesModels/ShallowWaterModels.jl", "max_issues_repo_head_hexsha": "ab745353a2472a76646d976c6fe536f0be664488", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2021-07-17T17:38:52.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-10T07:36:48.000Z", "max_forks_repo_path": "examples/StudyWhithamGreenNaghdi.jl", "max_forks_repo_name": "WaterWavesModels/ShallowWaterModels.jl", "max_forks_repo_head_hexsha": "ab745353a2472a76646d976c6fe536f0be664488", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.2592592593, "max_line_length": 149, "alphanum_fraction": 0.6252753978, "num_tokens": 6081}
|
"""
Summary: A collections of functions for visualization.
Description: contains a function that reads data and data types, and many
other functions for visualization
Author: Kunyu He, CAPP'20
Link: https://github.com/KunyuHe/ML-Pipeline-for-Crowdfunding-Project-Outcome-Prediction/blob/master/codes/train.py
"""
import os
import matplotlib.pyplot as plt
import numpy as np
import pydotplus
from matplotlib.font_manager import FontProperties
from sklearn.metrics import roc_curve, auc, precision_recall_curve
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import label_binarize
from sklearn.externals.six import StringIO
from sklearn.tree import export_graphviz
TITLE = FontProperties(family='serif', size=14, weight="semibold")
AXIS = FontProperties(family='serif', size=12)
TICKS = FontProperties(family='serif', size=10)
POSITIVE = 1
N_CLASSES = 2
#----------------------------------------------------------------------------#
def save_fig(dir_path, extension, title, fig, dpi=300):
"""
Save the figure and display it for 5 seconds on the screen before it closes
itself.
Inputs:
- dir_path (str): path of the directory for training visualization
- extension (str): name of the sub-folder under the directory defined by
`dir_path`
- title (string): the name of the model
- fig: the plot instance
- dpi (int): resolution of the saved image.
"""
fig_dir_path = dir_path + extension
if not os.path.exists(fig_dir_path):
os.makedirs(fig_dir_path)
fig_name = fig_dir_path + "%s.png" % (title)
fig.savefig(fig_name, dpi=dpi)
fig.tight_layout()
plt.show(block=False)
plt.pause(3)
plt.close()
def plot_predicted_scores(cv_scores, dir_path, title=""):
"""
Plot the cross-validation predicted scores on the training set with a
histogram.
Inputs:
- cv_scores (array of floats): cross-validation predicted scores
- dir_path (str): path of the directory for training visualization
- title (string): the name of the model
"""
fig, ax = plt.subplots()
ax.hist(cv_scores, 10, edgecolor='black')
ax.set_xlabel('Cross-Validation Predicted Scores', fontproperties=AXIS)
ax.set_ylabel('Probability density', fontproperties=AXIS)
ax.set_title('Frequency Distribution of Predicted Scores\n' + title,
fontproperties=AXIS)
save_fig(dir_path, "/predicted_proba/", title, fig)
def plot_auc_roc(clf, X_train, X_test, y_train, y_test, dir_path, title=""):
"""
Plot the AUC ROC curve of the specific classifier.
Inputs:
- clf: a classifier
- X_train (NumPy array): training data feature matrix
- X_test (NumPy array): test data feature matrix
- y_train (NumPy array): training data target matrix
- y_test (NumPy array): test data target matrix
- dir_path (str): path of the directory for training visualization
- title (string): the name of the model
"""
y_train = label_binarize(y_train, classes=[0, 1, 2])
y_test = label_binarize(y_test, classes=[0, 1, 2])
classifier = OneVsRestClassifier(clf)
if hasattr(classifier, "decision_function"):
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
else:
y_score = classifier.fit(X_train, y_train).predict_proba(X_test)
# Compute ROC curve and ROC area for binary classes
fpr, tpr, roc_auc = dict(), dict(), dict()
for i in range(N_CLASSES):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
fig, _ = plt.subplots()
plt.plot(fpr[POSITIVE], tpr[POSITIVE], color='darkorange', lw=1.5,
label='ROC curve (area = {:.4f})'.format(roc_auc[POSITIVE]))
plt.plot([0, 1], [0, 1], color='navy', lw=1.5, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate', fontproperties=AXIS)
plt.ylabel('True Positive Rate', fontproperties=AXIS)
plt.title('Receiver Operating Characteristic Curve\n' + title,
fontproperties=AXIS)
plt.legend(loc="lower right")
save_fig(dir_path, "/auc_roc/", title, fig)
def plot_feature_importances(importances, col_names, dir_path, top_n, title=""):
"""
Plot the feature importance of the classifier if it has this attribute. This
credit to the University of Michigan.
Inputs:
- importances (array of floats): feature importances
- col_names (list of strings): feature names
- dir_path (str): path of the directory for training visualization
- top_n (int): number of features with the highest importances to keep
- title (string): the name of the model
"""
indices = np.argsort(importances)[::-1][:top_n]
labels = col_names[indices][::-1]
fig, _ = plt.subplots(figsize=[12, 8])
plt.barh(range(top_n), sorted(importances, reverse=True)[:top_n][::-1],
color='g', alpha=0.4, edgecolor=['black'] * top_n)
plt.xlabel("Feature Importance", fontproperties=AXIS)
plt.ylabel("Feature Name", fontproperties=AXIS)
plt.yticks(np.arange(top_n), labels, fontproperties=AXIS)
save_fig(dir_path, "/feature importance/", title, fig)
def plot_decision_tree(tree, feature_names, target_name):
"""
:param tree:
:param feature_names:
:param target_name:
:return:
"""
dot_data = StringIO()
export_graphviz(tree, out_file=dot_data, filled=True, rounded=True,
special_characters=True, feature_names=feature_names,
class_names=target_name, impurity=False)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
return graph
|
{"hexsha": "fb4c31f4b053b31d20e928290b258eb6442a0f93", "size": 5993, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/minml/components/visualization/metrics.py", "max_stars_repo_name": "timhannifan/minml", "max_stars_repo_head_hexsha": "21ab6a93e30fc4ef2e67e054cce036cb64b3b684", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/minml/components/visualization/metrics.py", "max_issues_repo_name": "timhannifan/minml", "max_issues_repo_head_hexsha": "21ab6a93e30fc4ef2e67e054cce036cb64b3b684", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/minml/components/visualization/metrics.py", "max_forks_repo_name": "timhannifan/minml", "max_forks_repo_head_hexsha": "21ab6a93e30fc4ef2e67e054cce036cb64b3b684", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2457142857, "max_line_length": 115, "alphanum_fraction": 0.6656098782, "include": true, "reason": "import numpy", "num_tokens": 1474}
|
/**
* \file dcs/testbed/registry.hpp
*
* \brief Global registry class.
*
* \author Marco Guazzone (marco.guazzone@gmail.com)
*
* <hr/>
*
* Copyright 2012 Marco Guazzone (marco.guazzone@gmail.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef DCS_TESTBED_REGISTRY_HPP
#define DCS_TESTBED_REGISTRY_HPP
#define DCS_TESTBED_CONFIG_REGISTRY_USE_ABRAHAMS_SINGLETON
//#undef DCS_TESTBED_CONFIG_REGISTRY_USE_ABRAHAMS_SINGLETON
#if defined(DCS_TESTBED_CONFIG_REGISTRY_USE_ABRAHAMS_SINGLETON)
# include <boost/serialization/singleton.hpp>
#else // DCS_TESTBED_CONFIG_REGISTRY_USE_ABRAHAMS_SINGLETON
# include <boost/scoped_ptr.hpp>
# include <boost/thread/once.hpp>
# include <boost/utility.hpp>
#endif // DCS_TESTBED_CONFIG_REGISTRY_USE_ABRAHAMS_SINGLETON
#include <boost/shared_ptr.hpp>
#include <dcs/assert.hpp>
#include <dcs/debug.hpp>
#include <dcs/exception.hpp>
#include <dcs/testbed/base_virtual_machine_manager.hpp>
#include <map>
#include <stdexcept>
namespace dcs { namespace testbed {
#if !defined(DCS_TESTBED_CONFIG_REGISTRY_USE_ABRAHAMS_SINGLETON)
namespace detail { namespace /*<unnamed>*/ {
/**
* \brief Thread safe lazy singleton template class.
*
* This class is a thread-safe lazy singleton template class, which can be used
* during static initialization or anytime after.
*
* Original code found at http://www.boostcookbook.com/Recipe:/1235044
*
* \note
* - If T's constructor throws, instance() will return a null reference.
* - If your singleton class manages resources, you may provide a public
* destructor, and it will be called when the instance of your singleton
* class is out of scoped (see scoped_ptr docs).
* .
*
* \author Port4l, http://www.boostcookbook.com/User:/Port4l
* \author Marco Guazzone (marco.guazzone@gmail.com)
*/
template <typename T>
class singleton: ::boost::noncopyable
{
public: static T& instance()
{
// ::boost::call_once(init, flag_);
::boost::call_once(flag_, init);
return *ptr_t_;
}
public: static T const& const_instance()
{
// ::boost::call_once(init, flag_);
::boost::call_once(flag_, init);
return *ptr_t_;
}
protected: singleton() {}
protected: virtual ~singleton() { }
private: static void init() // never throws
{
ptr_t_.reset(new T());
}
private: static ::boost::once_flag flag_;
private: static ::boost::scoped_ptr<T> ptr_t_;
};
template <typename T>
::boost::once_flag singleton<T>::flag_ = BOOST_ONCE_INIT;
template <typename T>
::boost::scoped_ptr<T> singleton<T>::ptr_t_(0);
}} // Namespace detail::<unnamed>
#endif // DCS_TESTBED_CONFIG_REGISTRY_USE_ABRAHAMS_SINGLETON
#if defined(DCS_TESTBED_CONFIG_REGISTRY_USE_ABRAHAMS_SINGLETON)
template <typename TraitsT>
class registry: public ::boost::serialization::singleton< registry<TraitsT> >
{
private: typedef ::boost::serialization::singleton< registry<TraitsT> > base_type;
#else // DCS_TESTBED_CONFIG_REGISTRY_USE_ABRAHAMS_SINGLETON
template <typename TraitsT>
class registry: public detail::singleton< registry<TraitsT> >
{
friend class detail::singleton< registry<TraitsT> >;
#endif // DCS_TESTBED_CONFIG_REGISTRY_USE_ABRAHAMS_SINGLETON
public: typedef TraitsT traits_type;
public: typedef base_virtual_machine_manager<traits_type> vmm_type;
public: typedef ::boost::shared_ptr<vmm_type> vmm_pointer;
public: typedef typename vmm_type::identifier_type vmm_identifier_type;
private: typedef ::std::map<vmm_identifier_type,vmm_pointer> vmm_container;
public: static registry& instance()
{
return base_type::get_mutable_instance();
}
public: static registry const& const_instance()
{
return base_type::get_const_instance();
}
public: void add_vmm(vmm_pointer const& ptr_vmm)
{
// pre: ptr_vmm not null
DCS_ASSERT(ptr_vmm,
DCS_EXCEPTION_THROW(::std::invalid_argument,
"Invalid pointer to Virtual Machine Manager"));
vmm_map_[ptr_vmm->id()] = ptr_vmm;
}
public: vmm_pointer vmm(vmm_identifier_type id) const
{
// pre: id is a valid VMM identifier
DCS_ASSERT(vmm_map_.count(id) > 0,
DCS_EXCEPTION_THROW(::std::invalid_argument,
"Invalid Virtual Machine Manager identifier"));
return vmm_map_.at(id);
}
public: bool exists_vmm(vmm_identifier_type id) const
{
return vmm_map_.count(id) > 0 ? true : false;
}
// /// Default constructor
// protected: registry()
// : vmm_map_()
// {
// }
private: vmm_container vmm_map_;
}; // registry
}} // Namespace dcs::testbed
#endif // DCS_TESTBED_REGISTRY_HPP
|
{"hexsha": "42d04f40b40e4e70a7205b08601e2f576269b6c6", "size": 5015, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "inc/dcs/testbed/registry.hpp", "max_stars_repo_name": "sguazt/dcsxx-testbed", "max_stars_repo_head_hexsha": "e7210f0c7f54256d5bf0c90297e0c4f9eaf82da0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "inc/dcs/testbed/registry.hpp", "max_issues_repo_name": "sguazt/dcsxx-testbed", "max_issues_repo_head_hexsha": "e7210f0c7f54256d5bf0c90297e0c4f9eaf82da0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "inc/dcs/testbed/registry.hpp", "max_forks_repo_name": "sguazt/dcsxx-testbed", "max_forks_repo_head_hexsha": "e7210f0c7f54256d5bf0c90297e0c4f9eaf82da0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.8505154639, "max_line_length": 83, "alphanum_fraction": 0.7427716849, "num_tokens": 1268}
|
# Copyright 2019 AUI, Inc. Washington DC, USA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from numba import jit
import numpy as np
import math
#from numba import gdb
def ndim_list(shape):
return [ndim_list(shape[1:]) if len(shape) > 1 else None for _ in range(shape[0])]
def _graph_aperture_grid(vis_dataset,gcf_dataset,grid_parms,sel_parms):
import dask
import dask.array as da
import xarray as xr
import time
import itertools
import matplotlib.pyplot as plt
# Getting data for gridding
chan_chunk_size = vis_dataset[sel_parms["imaging_weight"]].chunks[2][0]
freq_chan = da.from_array(vis_dataset.coords['chan'].values, chunks=(chan_chunk_size))
n_chunks_in_each_dim = vis_dataset[sel_parms["imaging_weight"]].data.numblocks
chunk_indx = []
iter_chunks_indx = itertools.product(np.arange(n_chunks_in_each_dim[0]), np.arange(n_chunks_in_each_dim[1]),
np.arange(n_chunks_in_each_dim[2]), np.arange(n_chunks_in_each_dim[3]))
if grid_parms['chan_mode'] == 'continuum':
n_chan_chunks_img = 1
n_other_chunks = n_chunks_in_each_dim[0]*n_chunks_in_each_dim[1]*n_chunks_in_each_dim[2]*n_chunks_in_each_dim[3]
elif grid_parms['chan_mode'] == 'cube':
n_chan_chunks_img = n_chunks_in_each_dim[2]
n_other_chunks = n_chunks_in_each_dim[0]*n_chunks_in_each_dim[1]*n_chunks_in_each_dim[3]
#n_delayed = np.prod(n_chunks_in_each_dim)
chunk_sizes = vis_dataset[sel_parms["imaging_weight"]].chunks
list_of_grids = ndim_list((n_chan_chunks_img,n_other_chunks))
list_of_sum_weights = ndim_list((n_chan_chunks_img,n_other_chunks))
#print(cf_dataset)
grid_parms['complex_grid'] = True
# Build graph
for c_time, c_baseline, c_chan, c_pol in iter_chunks_indx:
if grid_parms['grid_weights']:
sub_grid_and_sum_weights = dask.delayed(_aperture_weight_grid_numpy_wrap)(
vis_dataset[sel_parms["uvw"]].data.partitions[c_time, c_baseline, 0],
vis_dataset[sel_parms["imaging_weight"]].data.partitions[c_time, c_baseline, c_chan, c_pol],
vis_dataset["field_id"].data.partitions[c_time],
gcf_dataset["CF_BASELINE_MAP"].data.partitions[c_baseline],
gcf_dataset["CF_CHAN_MAP"].data.partitions[c_chan],
gcf_dataset["CF_POL_MAP"].data.partitions[c_pol],
gcf_dataset["WEIGHT_CONV_KERNEL"].data,
gcf_dataset["SUPPORT"].data,
gcf_dataset["PHASE_GRADIENT"].data,
freq_chan.partitions[c_chan],
dask.delayed(grid_parms))
grid_dtype = np.complex128
else:
sub_grid_and_sum_weights = dask.delayed(_aperture_grid_numpy_wrap)(
vis_dataset[sel_parms["data"]].data.partitions[c_time, c_baseline, c_chan, c_pol],
vis_dataset[sel_parms["uvw"]].data.partitions[c_time, c_baseline, 0],
vis_dataset[sel_parms["imaging_weight"]].data.partitions[c_time, c_baseline, c_chan, c_pol],
vis_dataset["field_id"].data.partitions[c_time],
gcf_dataset["CF_BASELINE_MAP"].data.partitions[c_baseline],
gcf_dataset["CF_CHAN_MAP"].data.partitions[c_chan],
gcf_dataset["CF_POL_MAP"].data.partitions[c_pol],
gcf_dataset["CONV_KERNEL"].data,
gcf_dataset["SUPPORT"].data,
gcf_dataset["PHASE_GRADIENT"].data,
freq_chan.partitions[c_chan],
dask.delayed(grid_parms))
grid_dtype = np.complex128
if grid_parms['chan_mode'] == 'continuum':
c_time_baseline_chan_pol = c_pol + c_chan*n_chunks_in_each_dim[3] + c_baseline*n_chunks_in_each_dim[3]*n_chunks_in_each_dim[2] + c_time*n_chunks_in_each_dim[3]*n_chunks_in_each_dim[2]*n_chunks_in_each_dim[1]
list_of_grids[0][c_time_baseline_chan_pol] = da.from_delayed(sub_grid_and_sum_weights[0], (1, chunk_sizes[3][c_pol], grid_parms['image_size_padded'][0], grid_parms['image_size_padded'][1]),dtype=grid_dtype)
list_of_sum_weights[0][c_time_baseline_chan_pol] = da.from_delayed(sub_grid_and_sum_weights[1],(1, chunk_sizes[3][c_pol]),dtype=np.double)
elif grid_parms['chan_mode'] == 'cube':
c_time_baseline_pol = c_pol + c_baseline*n_chunks_in_each_dim[3] + c_time*n_chunks_in_each_dim[1]*n_chunks_in_each_dim[3]
list_of_grids[c_chan][c_time_baseline_pol] = da.from_delayed(sub_grid_and_sum_weights[0], (chunk_sizes[2][c_chan], chunk_sizes[3][c_pol], grid_parms['image_size_padded'][0], grid_parms['image_size_padded'][1]),dtype=grid_dtype)
list_of_sum_weights[c_chan][c_time_baseline_pol] = da.from_delayed(sub_grid_and_sum_weights[1],(chunk_sizes[2][c_chan], chunk_sizes[3][c_pol]),dtype=np.double)
# Sum grids
for c_chan in range(n_chan_chunks_img):
list_of_grids[c_chan] = _tree_sum_list(list_of_grids[c_chan])
list_of_sum_weights[c_chan] = _tree_sum_list(list_of_sum_weights[c_chan])
# Concatenate Cube
if grid_parms['chan_mode'] == 'cube':
list_of_grids_and_sum_weights = [da.concatenate(list_of_grids,axis=1)[0],da.concatenate(list_of_sum_weights,axis=1)[0]]
else:
list_of_grids_and_sum_weights = [list_of_grids[0][0],list_of_sum_weights[0][0]]
# Put axes in image orientation. How much does this add to compute?
list_of_grids_and_sum_weights[0] = da.moveaxis(list_of_grids_and_sum_weights[0], [0, 1],
[-2, -1])
list_of_grids_and_sum_weights[1] = da.moveaxis(list_of_grids_and_sum_weights[1],[0, 1], [-2, -1])
return list_of_grids_and_sum_weights
def _aperture_weight_grid_numpy_wrap(uvw,imaging_weight,field,cf_baseline_map,cf_chan_map,cf_pol_map,weight_conv_kernel,weight_support,phase_gradient,freq_chan,grid_parms):
#print('imaging_weight ', imaging_weight.shape)
#print('cf_chan_map ', cf_chan_map.shape, ' cf_baseline_map', cf_baseline_map.shape, 'cf_pol_map', cf_pol_map.shape )
n_chan = imaging_weight.shape[2]
if grid_parms['chan_mode'] == 'cube':
n_imag_chan = n_chan
chan_map = (np.arange(0, n_chan)).astype(np.int)
else: # continuum
n_imag_chan = 1 # Making only one continuum image.
chan_map = (np.zeros(n_chan)).astype(np.int)
n_imag_pol = imaging_weight.shape[3]
pol_map = (np.arange(0, n_imag_pol)).astype(np.int)
n_uv = grid_parms['image_size_padded']
delta_lm = grid_parms['cell_size']
oversampling = grid_parms['oversampling']
if grid_parms['complex_grid']:
grid = np.zeros((n_imag_chan, n_imag_pol, n_uv[0], n_uv[1]), dtype=np.complex128)
else:
grid = np.zeros((n_imag_chan, n_imag_pol, n_uv[0], n_uv[1]), dtype=np.double)
sum_weight = np.zeros((n_imag_chan, n_imag_pol), dtype=np.double)
_aperture_weight_grid_jit(grid, sum_weight, uvw, freq_chan, chan_map, pol_map, cf_baseline_map, cf_chan_map, cf_pol_map, imaging_weight, weight_conv_kernel, n_uv, delta_lm, weight_support, oversampling, field, phase_gradient)
return grid, sum_weight
@jit(nopython=True, cache=True, nogil=True)
def _aperture_weight_grid_jit(grid, sum_weight, uvw, freq_chan, chan_map, pol_map, cf_baseline_map, cf_chan_map, cf_pol_map, imaging_weight, weight_conv_kernel, n_uv, delta_lm, weight_support, oversampling, field, phase_gradient):
c = 299792458.0
uv_scale = np.zeros((2, len(freq_chan)), dtype=np.double)
uv_scale[0, :] = -(freq_chan * delta_lm[0] * n_uv[0]) / c
uv_scale[1, :] = -(freq_chan * delta_lm[1] * n_uv[1]) / c
uv_center = n_uv // 2
n_time = uvw.shape[0]
n_baseline = uvw.shape[1]
n_chan = len(chan_map)
n_pol = len(pol_map)
n_u = n_uv[0]
n_v = n_uv[1]
u_center = uv_center[0]
v_center = uv_center[1]
max_support_center = np.max(weight_support)
conv_v_center = weight_conv_kernel.shape[-1]//2
conv_u_center = weight_conv_kernel.shape[-2]//2
#print(phase_gradient.shape)
#print(weight_conv_kernel.shape)
prev_field = -1
for i_time in range(n_time):
if prev_field != field[i_time]:
weight_conv_kernel_phase_gradient = weight_conv_kernel*phase_gradient[field[i_time],:,:]
prev_field = field[i_time]
for i_baseline in range(n_baseline):
cf_baseline = cf_baseline_map[i_baseline]
for i_chan in range(n_chan):
cf_chan = cf_chan_map[i_chan]
a_chan = chan_map[i_chan]
u = uvw[i_time, i_baseline, 0] * uv_scale[0, i_chan]
v = uvw[i_time, i_baseline, 1] * uv_scale[1, i_chan]
if ~np.isnan(u) and ~np.isnan(v):
u_pos = u + uv_center[0]
v_pos = v + uv_center[1]
#Do not use numpy round
u_center_indx = int(u_pos + 0.5)
v_center_indx = int(v_pos + 0.5)
if (u_center_indx+max_support_center < n_u) and (v_center_indx+max_support_center < n_v) and (u_center_indx-max_support_center >= 0) and (v_center_indx-max_support_center >= 0):
#u_offset = u_center_indx - u_pos
#u_center_offset_indx = math.floor(u_offset * oversampling[0] + 0.5)
#v_offset = v_center_indx - v_pos
#v_center_offset_indx = math.floor(v_offset * oversampling[1] + 0.5)
for i_pol in range(n_pol):
weighted_data = imaging_weight[i_time, i_baseline, i_chan, i_pol]
if ~np.isnan(weighted_data) and (weighted_data != 0.0):
cf_pol = cf_pol_map[i_pol]
a_pol = pol_map[i_pol]
norm = 0.0
'''
support = weight_support[cf_baseline,cf_chan,cf_pol,:]
#support = np.array([13,13])
support_center = support // 2
start_support = - support_center
end_support = support - support_center # end_support is larger by 1 so that python range() gives correct indices
'''
support_u = weight_support[cf_baseline,cf_chan,cf_pol,0]
support_v = weight_support[cf_baseline,cf_chan,cf_pol,1]
support_center_u = support_u // 2
support_center_v = support_v // 2
start_support_u = - support_center_u
start_support_v = - support_center_v
end_support_u = support_u - support_center_u
end_support_v = support_v - support_center_v
for i_v in range(start_support_v,end_support_v):
v_indx = v_center + i_v
cf_v_indx = oversampling[1]*i_v + conv_v_center
for i_u in range(start_support_u,end_support_u):
u_indx = u_center + i_u
cf_u_indx = oversampling[0]*i_u + conv_u_center
conv = weight_conv_kernel_phase_gradient[cf_baseline,cf_chan,cf_pol,cf_u_indx,cf_v_indx]
grid[a_chan, a_pol, u_indx, v_indx] = grid[a_chan, a_pol, u_indx, v_indx] + conv * weighted_data
norm = norm + conv
sum_weight[a_chan, a_pol] = sum_weight[a_chan, a_pol] + weighted_data * np.real(norm)
return
def _aperture_grid_numpy_wrap(vis_data,uvw,imaging_weight,field,cf_baseline_map,cf_chan_map,cf_pol_map,conv_kernel,weight_support,phase_gradient,freq_chan,grid_parms):
#print('imaging_weight ', imaging_weight.shape)
import time
n_chan = imaging_weight.shape[2]
if grid_parms['chan_mode'] == 'cube':
n_imag_chan = n_chan
chan_map = (np.arange(0, n_chan)).astype(np.int)
else: # continuum
n_imag_chan = 1 # Making only one continuum image.
chan_map = (np.zeros(n_chan)).astype(np.int)
n_imag_pol = imaging_weight.shape[3]
pol_map = (np.arange(0, n_imag_pol)).astype(np.int)
n_uv = grid_parms['image_size_padded']
delta_lm = grid_parms['cell_size']
oversampling = grid_parms['oversampling']
if grid_parms['complex_grid']:
grid = np.zeros((n_imag_chan, n_imag_pol, n_uv[0], n_uv[1]), dtype=np.complex128)
else:
grid = np.zeros((n_imag_chan, n_imag_pol, n_uv[0], n_uv[1]), dtype=np.double)
sum_weight = np.zeros((n_imag_chan, n_imag_pol), dtype=np.double)
do_psf = grid_parms['do_psf']
#print('vis_data', vis_data.shape , 'grid ', grid.shape, 'sum_weight', sum_weight.shape, 'cf_chan_map ', cf_chan_map.shape, ' cf_baseline_map', cf_baseline_map.shape, 'cf_pol_map', cf_pol_map.shape, ' conv_kernel', conv_kernel.shape, 'phase_gradient', phase_gradient.shape, 'field', field.shape, )
#start = time.time()
_aperture_grid_jit(grid, sum_weight, do_psf, vis_data, uvw, freq_chan, chan_map, pol_map, cf_baseline_map, cf_chan_map, cf_pol_map, imaging_weight, conv_kernel, n_uv, delta_lm, weight_support, oversampling, field, phase_gradient)
#time_to_grid = time.time() - start
#print("time to grid ", time_to_grid)
return grid, sum_weight
# Important changes to be made https://github.com/numba/numba/issues/4261
# debug=True and gdb()
@jit(nopython=True, cache=True, nogil=True)
def _aperture_grid_jit(grid, sum_weight, do_psf, vis_data, uvw, freq_chan, chan_map, pol_map, cf_baseline_map, cf_chan_map, cf_pol_map, imaging_weight, conv_kernel, n_uv, delta_lm, weight_support, oversampling, field, phase_gradient):
c = 299792458.0
uv_scale = np.zeros((2, len(freq_chan)), dtype=np.double)
uv_scale[0, :] = -(freq_chan * delta_lm[0] * n_uv[0]) / c
uv_scale[1, :] = -(freq_chan * delta_lm[1] * n_uv[1]) / c
uv_center = n_uv // 2
n_time = uvw.shape[0]
n_baseline = uvw.shape[1]
n_chan = len(chan_map)
n_pol = len(pol_map)
n_u = n_uv[0]
n_v = n_uv[1]
u_center = uv_center[0]
v_center = uv_center[1]
max_support_center = np.max(weight_support)
conv_v_center = conv_kernel.shape[-1]//2
conv_u_center = conv_kernel.shape[-2]//2
conv_size = np.array(conv_kernel.shape[-2:])
#print('conv_size',conv_size)
#print('sizes ',conv_kernel.shape, conv_u_center, conv_v_center)
#print(phase_gradient.shape)
#print(weight_conv_kernel.shape)
prev_field = -1
for i_time in range(n_time):
if prev_field != field[i_time]:
conv_kernel_phase_gradient = conv_kernel*phase_gradient[field[i_time],:,:]
prev_field = field[i_time]
#conv_kernel_phase_gradient = conv_kernel
for i_baseline in range(n_baseline):
cf_baseline = cf_baseline_map[i_baseline]
for i_chan in range(n_chan):
cf_chan = cf_chan_map[i_chan]
a_chan = chan_map[i_chan]
u = uvw[i_time, i_baseline, 0] * uv_scale[0, i_chan]
v = uvw[i_time, i_baseline, 1] * uv_scale[1, i_chan]
if ~np.isnan(u) and ~np.isnan(v):
u_pos = u + uv_center[0]
v_pos = v + uv_center[1]
#Do not use numpy round
u_center_indx = int(u_pos + 0.5)
v_center_indx = int(v_pos + 0.5)
if (u_center_indx+max_support_center < n_u) and (v_center_indx+max_support_center < n_v) and (u_center_indx-max_support_center >= 0) and (v_center_indx-max_support_center >= 0):
u_offset = u_center_indx - u_pos
u_center_offset_indx = math.floor(u_offset * oversampling[0] + 0.5) + conv_u_center
v_offset = v_center_indx - v_pos
v_center_offset_indx = math.floor(v_offset * oversampling[1] + 0.5) + conv_v_center
for i_pol in range(n_pol):
if do_psf:
weighted_data = imaging_weight[i_time, i_baseline, i_chan, i_pol]
else:
weighted_data = vis_data[i_time, i_baseline, i_chan, i_pol] * imaging_weight[i_time, i_baseline, i_chan, i_pol]
if ~np.isnan(weighted_data) and (weighted_data != 0.0):
cf_pol = cf_pol_map[i_pol]
a_pol = pol_map[i_pol]
norm = 0.0
'''
support = weight_support[cf_baseline,cf_chan,cf_pol,:]
#support = np.array([13,13])
support_center = support // 2
start_support = - support_center
end_support = support - support_center # end_support is larger by 1 so that python range() gives correct indices
'''
support_u = weight_support[cf_baseline,cf_chan,cf_pol,0]
support_v = weight_support[cf_baseline,cf_chan,cf_pol,1]
support_center_u = support_u // 2
support_center_v = support_v // 2
start_support_u = - support_center_u
start_support_v = - support_center_v
end_support_u = support_u - support_center_u
end_support_v = support_v - support_center_v
#print(support)
###############
# resized_conv_size = (support + 1)*oversampling
# start_indx = conv_size//2 - resized_conv_size//2
# end_indx = start_indx + resized_conv_size
# normalize_factor = np.real(np.sum(conv_kernel[cf_baseline,cf_chan,cf_pol,start_indx[0]:end_indx[0],start_indx[1]:end_indx[1]])/(oversampling[0]*oversampling[1]))
#
# conv_kernel_phase_gradient = conv_kernel*phase_gradient[field[i_time],:,:]/normalize_factor
# print(normalize_factor)
##############
for i_v in range(start_support_v,end_support_v):
v_indx = v_center_indx + i_v
cf_v_indx = oversampling[1]*i_v + v_center_offset_indx
for i_u in range(start_support_u,end_support_u):
u_indx = u_center_indx + i_u
cf_u_indx = oversampling[0]*i_u + u_center_offset_indx
conv = conv_kernel_phase_gradient[cf_baseline,cf_chan,cf_pol,cf_u_indx,cf_v_indx]
grid[a_chan, a_pol, u_indx, v_indx] = grid[a_chan, a_pol, u_indx, v_indx] + conv * weighted_data
norm = norm + conv
sum_weight[a_chan, a_pol] = sum_weight[a_chan, a_pol] + imaging_weight[i_time, i_baseline, i_chan, i_pol]*np.real(norm**2)#*np.real(norm**2)#* np.real(norm) #np.abs(norm**2) #**2 term is needed since the pb is in the image twice (one naturally and another from the gcf)
return
def _tree_sum_list(list_to_sum):
import dask.array as da
while len(list_to_sum) > 1:
new_list_to_sum = []
for i in range(0, len(list_to_sum), 2):
if i < len(list_to_sum) - 1:
lazy = da.add(list_to_sum[i],list_to_sum[i+1])
else:
lazy = list_to_sum[i]
new_list_to_sum.append(lazy)
list_to_sum = new_list_to_sum
return list_to_sum
|
{"hexsha": "c90a70605c67e2a636062c5aebd88d6171ba5849", "size": 21995, "ext": "py", "lang": "Python", "max_stars_repo_path": "ngcasa/imaging/_imaging_utils/_aperture_grid.py", "max_stars_repo_name": "FedeMPouzols/cngi_prototype", "max_stars_repo_head_hexsha": "421a99c460f4092b79120f5bec122de7ce9b8b96", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ngcasa/imaging/_imaging_utils/_aperture_grid.py", "max_issues_repo_name": "FedeMPouzols/cngi_prototype", "max_issues_repo_head_hexsha": "421a99c460f4092b79120f5bec122de7ce9b8b96", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ngcasa/imaging/_imaging_utils/_aperture_grid.py", "max_forks_repo_name": "FedeMPouzols/cngi_prototype", "max_forks_repo_head_hexsha": "421a99c460f4092b79120f5bec122de7ce9b8b96", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.1025056948, "max_line_length": 303, "alphanum_fraction": 0.576812912, "include": true, "reason": "import numpy,from numba", "num_tokens": 5085}
|
import torch
import math
import numpy as np
from scipy.spatial import Delaunay
from shapely import geometry
from matplotlib import pyplot as plt
def get_circle(batch_size, masks_size, num_points, device):
half_dim = masks_size / 2
half_width = half_dim
half_height = half_dim
# r = torch.randint(low=5, high=30, size=[1]).item()
r = 10
circle = []
for x in range(0, num_points):
circle.append([half_width + math.floor(math.cos(2 * math.pi / num_points * x) * r),
half_height + math.floor(math.sin(2 * math.pi / num_points * x) * r)])
vert = np.array(circle)
vert = (vert - half_dim) / half_dim
tri = Delaunay(vert).simplices.copy()
vert = torch.Tensor(vert)[None, None, ...].to(device).repeat(batch_size, 1, 1, 1)
face = torch.Tensor(tri)[None, None, ...].to(device).repeat(batch_size, 1, 1, 1).type(torch.int32)
vert[:, :, :, 1] = -vert[:, :, :, 1]
return vert, face
def get_circles(batch_size, masks_size, num_points, device):
# mask = (B, 3, H, W)
half_dim = masks_size / 2
half_width = half_dim
half_height = half_dim
# r = torch.randint(low=5, high=30, size=[1]).item()
r = 30
circle1 = []
for x in range(0, num_points):
circle1.append([half_width + (math.cos(2 * math.pi / num_points * x) * r),
half_height + (math.sin(2 * math.pi / num_points * x) * r)])
circle0 = []
for x in range(0, num_points):
circle0.append([half_width + (math.cos(2 * math.pi / num_points * x) * (r - 10)),
half_height + (math.sin(2 * math.pi / num_points * x) * (r - 10))])
p0 = np.array(circle1[num_points//6])
p1 = np.array(circle1[num_points//3])
delta = r/10
c0 = np.array([half_width, half_height])
c2 = np.array([c0[0], c0[1] + r + delta])
cos_theta = np.dot(p0-c2, p1-c2) / (np.linalg.norm(p0-c2) * np.linalg.norm(p1-c2))
theta = math.acos(cos_theta) / 2
circle2 = []
r = np.linalg.norm(p1-c2)
num_points2 = num_points//2
for x in range(0, num_points2 - 1):
circle2.append([c2[0] + (math.cos((2 * math.pi - 2 * theta) / num_points2 * (x+1) - math.pi / 2 + theta) * r),
c2[1] + (math.sin((2 * math.pi - 2 * theta) / num_points2 * (x+1) - math.pi / 2 + theta) * r)])
cp_index_0 = len(circle2) - 1
for x in range(num_points//3, num_points//6, -1):
circle2.append(circle1[x])
cp_index_1 = len(circle2) - 1
vert0 = np.array(circle0)
vert0 = (vert0 - half_dim) / half_dim
tri0 = Delaunay(vert0).simplices.copy()
vert0 = torch.Tensor(vert0)[None, None, ...].to(device).repeat(batch_size, 1, 1, 1)
face0 = torch.Tensor(tri0)[None, None, ...].to(device).repeat(batch_size, 1, 1, 1).type(torch.int32)
vert0[:, :, :, 1] = -vert0[:, :, :, 1]
circle1.extend(circle0)
circle1.append([half_dim, half_dim])
vert1 = np.array(circle1)
vert1 = (vert1 - half_dim) / half_dim
tri1 = Delaunay(vert1).simplices.copy()
mask = ~(tri1 == vert1.shape[0] - 1).any(axis=1)
tri1 = tri1[mask, :]
vert1 = vert1[:-1, :]
vert1 = torch.Tensor(vert1)[None, None, ...].to(device).repeat(batch_size, 1, 1, 1)
face1 = torch.Tensor(tri1)[None, None, ...].to(device).repeat(batch_size, 1, 1, 1).type(torch.int32)
vert1[:, :, :, 1] = -vert1[:, :, :, 1]
vert2 = np.array(circle2)
vert2 = (vert2 - half_dim) / half_dim
tri2 = Delaunay(vert2).simplices.copy()
tri2 = triangulate_within(vert2, tri2)
vert2 = torch.Tensor(vert2)[None, None, ...].to(device).repeat(batch_size, 1, 1, 1)
face2 = torch.Tensor(tri2)[None, None, ...].to(device).repeat(batch_size, 1, 1, 1).type(torch.int32)
vert2[:, :, :, 1] = -vert2[:, :, :, 1]
return vert0, face0, vert1, face1, vert2, face2, cp_index_0, cp_index_1
def get_circles_2(batch_size, masks_size, num_points, device):
# mask = (B, 3, H, W)
half_dim = masks_size / 2
half_width = half_dim
half_height = half_dim
# r = torch.randint(low=5, high=30, size=[1]).item()
r = 30
circle1 = []
for x in range(0, num_points):
circle1.append([half_width + (math.cos(2 * math.pi / num_points * x) * r),
half_height + (math.sin(2 * math.pi / num_points * x) * r)])
circle1.append([half_width, half_height])
circle0 = []
for x in range(0, num_points):
circle0.append([half_width + (math.cos(2 * math.pi / num_points * x) * (r - 10)),
half_height + (math.sin(2 * math.pi / num_points * x) * (r - 10))])
circle0.append([half_width, half_height])
p0 = np.array(circle1[num_points//6])
p1 = np.array(circle1[num_points//3])
delta = r/10
c0 = np.array([half_width, half_height])
c2 = np.array([c0[0], c0[1] + r + delta])
cos_theta = np.dot(p0-c2, p1-c2) / (np.linalg.norm(p0-c2) * np.linalg.norm(p1-c2))
theta = math.acos(cos_theta) / 2
circle2 = []
r = np.linalg.norm(p1-c2)
num_points2 = num_points//2
for x in range(0, num_points2 - 1):
circle2.append([c2[0] + (math.cos((2 * math.pi - 2 * theta) / num_points2 * (x+1) - math.pi / 2 + theta) * r),
c2[1] + (math.sin((2 * math.pi - 2 * theta) / num_points2 * (x+1) - math.pi / 2 + theta) * r)])
cp_index_0 = len(circle2) - 1
for x in range(num_points//3, num_points//6, -1):
circle2.append(circle1[x])
cp_index_1 = len(circle2) - 1
vert0 = np.array(circle0)
vert0 = (vert0 - half_dim) / half_dim
tri0 = Delaunay(vert0).simplices.copy()
vert0 = torch.Tensor(vert0)[None, None, ...].to(device).repeat(batch_size, 1, 1, 1)
face0 = torch.Tensor(tri0)[None, None, ...].to(device).repeat(batch_size, 1, 1, 1).type(torch.int32)
vert0[:, :, :, 1] = -vert0[:, :, :, 1]
vert1 = np.array(circle1)
vert1 = (vert1 - half_dim) / half_dim
tri1 = Delaunay(vert1).simplices.copy()
vert1 = torch.Tensor(vert1)[None, None, ...].to(device).repeat(batch_size, 1, 1, 1)
face1 = torch.Tensor(tri1)[None, None, ...].to(device).repeat(batch_size, 1, 1, 1).type(torch.int32)
vert1[:, :, :, 1] = -vert1[:, :, :, 1]
vert2 = np.array(circle2)
vert2 = (vert2 - half_dim) / half_dim
tri2 = Delaunay(vert2).simplices.copy()
tri2 = triangulate_within(vert2, tri2)
vert2 = torch.Tensor(vert2)[None, None, ...].to(device).repeat(batch_size, 1, 1, 1)
face2 = torch.Tensor(tri2)[None, None, ...].to(device).repeat(batch_size, 1, 1, 1).type(torch.int32)
vert2[:, :, :, 1] = -vert2[:, :, :, 1]
return vert0, face0, vert1, face1, vert2, face2, cp_index_0, cp_index_1
def get_circles_3(par1, par2, batch_size, masks_size, num_points, device, tri0=None, tri1=None, tri2=None):
"""
par1 (B, 2): c0(x, y)
par2 (B, 5): r1, factor 0 (r0/r1), theta2/theta2_max, d_c2_c0, theta_c2
theta2_max = pi if c2 inside c1, otherwise arcsin(r0/d_c2_c0)
"""
# mask = (B, 3, H, W)
half_dim = masks_size / 2
# vectorize circle 0 and circle 1
c0x = (par1[:, 0] / 2 + 0.5) * 127
c0y = (par1[:, 1] / 2 + 0.5) * 127
r1 = par2[:, 0] * 128
# c0x = (torch.tensor(0).float().repeat(batch_size).cuda() / 2 + 0.5) * 127
# c0y = (torch.tensor(0).float().repeat(batch_size).cuda() / 2 + 0.5) * 127
# r1 = torch.tensor(30/128).float().repeat(batch_size).cuda() * 128
r0 = torch.mul(par2[:, 1], r1).repeat(num_points, 1).transpose(1, 0)
c0_phase = torch.arange(num_points).repeat(batch_size, 1).cuda()
c0_phase = 2 * math.pi * c0_phase / num_points
z_c0 = torch.complex(real=c0x, imag=c0y)
c0_angle = torch.exp(torch.complex(real=torch.tensor(0).float().repeat(batch_size, num_points).cuda(), imag=c0_phase))
circle0 = z_c0.repeat(num_points, 1).transpose(1, 0) + r0 * c0_angle
circle0 = torch.cat([circle0, z_c0.unsqueeze(1)], dim=1)
circle0 = torch.view_as_real(circle0)
vert0 = (circle0 - half_dim) / half_dim
if tri0 is None:
tri0 = Delaunay(vert0[0].detach().cpu().numpy()).simplices.copy()
vert0 = vert0.unsqueeze(1)
vert0[:, :, :, 1] = -vert0[:, :, :, 1]
face0 = torch.Tensor(tri0)[None, None, ...].to(device).repeat(batch_size, 1, 1, 1).type(torch.int32)
circle1 = z_c0.repeat(num_points, 1).transpose(1, 0) + r1.repeat(num_points, 1).transpose(1, 0) * c0_angle
circle1 = torch.cat([circle1, z_c0.unsqueeze(1)], dim=1)
circle1 = torch.view_as_real(circle1)
vert1 = (circle1 - half_dim) / half_dim
vert1 = vert1.unsqueeze(1)
vert1[:, :, :, 1] = -vert1[:, :, :, 1]
face1 = face0
# to compute circle 2
dmin = r1 * 1/2
dmax = r1 * 3/2
d_c2_c0 = par2[:, 3] * (dmax - dmin) + dmin
theta_c2 = par2[:, 4] * math.pi * 2
# theta_c2 = torch.tensor(math.pi / 2).float().repeat(batch_size).cuda()
z_c2 = z_c0 + d_c2_c0 * torch.exp(torch.complex(real=torch.tensor(0).float().cuda().repeat(batch_size), imag=theta_c2.float()))
theta2_max = torch.tensor(math.pi * 3 / 4).float().repeat(batch_size).cuda()
theta2_min = torch.tensor(math.pi * 1 / 6).float().repeat(batch_size).cuda()
theta2 = theta2_min + par2[:, 2] * (theta2_max - theta2_min)
theta_p0 = theta_c2 - theta2 # theta_p0 = (-pi, 2pi)
theta_p1 = theta_c2 + theta2 # theta_p1 = (0, 3pi)
# theta_p0, theta_p1 = (0, 2pi)
theta_p0 = torch.where(
theta_p0 < 0,
theta_p0 + math.pi * 2,
theta_p0,
)
theta_p1 = torch.where(
theta_p1 > math.pi * 2,
theta_p1 - math.pi * 2,
theta_p1,
)
theta_p1 = torch.where(
theta_p1 < theta_p0,
theta_p1 + math.pi * 2,
theta_p1
)
n_arc_points = num_points // 2
theta_p0 = theta_p0.repeat(n_arc_points, 1).transpose(1, 0)
theta_p1 = theta_p1.repeat(n_arc_points, 1).transpose(1, 0)
arc_count = torch.arange(n_arc_points).repeat(batch_size, 1).cuda()
arc_phase = theta_p0 + torch.mul(theta_p1 - theta_p0, arc_count) / n_arc_points
arc_angle = torch.exp(torch.complex(real=torch.tensor(0).float().repeat(batch_size, n_arc_points).cuda(), imag=arc_phase))
arc = z_c0.repeat(n_arc_points, 1).transpose(1, 0) + r1.repeat(n_arc_points, 1).transpose(1, 0) * arc_angle
arc_1 = torch.flip(arc, dims=[1]) # p1 to p0 arc
r2 = (torch.view_as_real(z_c2) - torch.view_as_real(arc_1[:, -1])).norm(dim=1)
theta_c2_p0 = torch.log(arc_1[:, -1] - z_c2).imag # theta_c2_p0 = (-pi, pi)
theta_c2_p1 = torch.log(arc_1[:, 0] - z_c2).imag # theta_c2_p1 = (-pi, pi)
theta_c2_p0 = torch.where(
theta_c2_p0 < 0,
theta_c2_p0 + math.pi * 2,
theta_c2_p0,
)
theta_c2_p1 = torch.where(
theta_c2_p1 < 0,
theta_c2_p1 + math.pi * 2,
theta_c2_p1,
)
theta_c2_p1 = torch.where(
theta_c2_p0 > theta_c2_p1,
theta_c2_p1 + math.pi * 2,
theta_c2_p1,
)
theta_c2_p0 = theta_c2_p0.repeat(n_arc_points, 1).transpose(1, 0)
theta_c2_p1 = theta_c2_p1.repeat(n_arc_points, 1).transpose(1, 0)
arc_phase = theta_c2_p0 + torch.mul(theta_c2_p1 - theta_c2_p0, arc_count) / n_arc_points
arc_angle = torch.exp(torch.complex(real=torch.tensor(0).float().repeat(batch_size, n_arc_points).cuda(), imag=arc_phase))
arc_2 = z_c2.repeat(n_arc_points, 1).transpose(1, 0) + r2.repeat(n_arc_points, 1).transpose(1, 0) * arc_angle
circle2 = torch.cat([torch.view_as_real(arc_2), torch.view_as_real(arc_1)], dim=1)
vert2 = (circle2 - half_dim) / half_dim
if tri2 is None:
tri2 = Delaunay(vert2[0].detach().cpu().numpy()).simplices.copy()
tri2 = triangulate_within(vert2[0].detach().cpu().numpy(), tri2)
vert2 = vert2.unsqueeze(1)
vert2[:, :, :, 1] = -vert2[:, :, :, 1]
face2 = torch.Tensor(tri2)[None, None, ...].to(device).repeat(batch_size, 1, 1, 1).type(torch.int32)
return vert0, face0, vert1, face1, vert2, face2, tri0, tri1, tri2
def triangulate_circle_2(n_c2, n_tot):
print(n_c2, n_tot)
tris = []
if n_c2 > n_tot / 2:
pass
for i in range(n_c2):
tris.extend([
[i, i+1, n_tot - i - 1],
[i, i+1, n_tot - i - 2],
[n_tot - i-1, n_tot - i - 2, i],
[n_tot - i-1, n_tot - i - 2, i + 1],
])
return np.array(tris)
def pad_tri2(tri2, n):
padding = []
for i in range(n - tri2.shape[0]):
padding.append(
[i, i + 1, i + 2]
)
padding = np.array(padding)
return np.concatenate([tri2, padding], axis=0)
def triangulate_within(vert, faces):
polygon = geometry.Polygon(vert)
output = []
for f in range(faces.shape[0]):
face = faces[f, :]
triangle = geometry.Polygon(vert[face, :])
if triangle.within(polygon):
output.append(face)
if len(output) == 0:
vert = vert * 64 + 64
plt.imshow(np.zeros((128, 128)))
plt.plot(vert[:, 0], vert[:, 1], 'bx-')
for f in range(faces.shape[0]):
p1 = vert[faces[f, 0], :]
p2 = vert[faces[f, 1], :]
p3 = vert[faces[f, 2], :]
plt.plot([p1[0], p2[0]], [p1[1], p2[1]], 'k-')
plt.plot([p1[0], p3[0]], [p1[1], p3[1]], 'k-')
plt.plot([p3[0], p2[0]], [p3[1], p2[1]], 'k-')
plt.show()
output = np.stack(output)
return output
|
{"hexsha": "c80b1738980303ce0df0aa84bff6f962a89ad17c", "size": 13335, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/topology.py", "max_stars_repo_name": "lisurui6/acdrnet", "max_stars_repo_head_hexsha": "0f6fcdf1b441801b073c93aa07a2f537ce1cc565", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils/topology.py", "max_issues_repo_name": "lisurui6/acdrnet", "max_issues_repo_head_hexsha": "0f6fcdf1b441801b073c93aa07a2f537ce1cc565", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/topology.py", "max_forks_repo_name": "lisurui6/acdrnet", "max_forks_repo_head_hexsha": "0f6fcdf1b441801b073c93aa07a2f537ce1cc565", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.9912280702, "max_line_length": 131, "alphanum_fraction": 0.5908511436, "include": true, "reason": "import numpy,from scipy", "num_tokens": 4604}
|
"""
Utility functions
"""
import logging
import configparser
import numpy as np
from itertools import chain
def load_glove(file, dim):
"""Loads GloVe vectors in numpy array.
Args:
file (str): a path to a glove file.
Return:
dict: a dict of numpy arrays.
"""
model = {}
with open(file) as f:
for line in f:
line = line.split(' ')
word = line[0]
vector = np.array([float(val) for val in line[1:]])
if len(vector) != dim:
raise ValueError('glove embedding dimension: {} unmatch with params: {}'.format(len(vector), dim))
model[word] = vector
return model
def generate_tags(tagging_format, ner_tags):
"""
generate word level ner tags, E.G.(B-PPL, I-PPL, L-PPL, O,....)
:param tagging_format: bio, bioes, or biolu
:param ner_tags: entity level ner tags E.G.(PPL, COM, GOV, GPE, ACA)
:return:
"""
if tagging_format not in ['bio', 'bioes', 'biolu']:
raise ValueError('tagging_format must be either bio, bioes, or biolu')
tags = []
tagging_signs = None
if tagging_format == 'bio':
tagging_signs = ['B', 'I']
if tagging_format == 'bioes':
tagging_signs = ['B', 'I', 'E', 'S']
if tagging_format == 'biolu':
tagging_signs = ['B', 'I', 'L', 'U']
for ner_tag in ner_tags:
for tagging_sign in tagging_signs:
tag = tagging_sign + '-' + ner_tag
tags.append(tag)
tags.append('O')
return tags
def flatten(list_of_list):
return list(chain.from_iterable(list_of_list))
def get_lengths(y_true_onehot_seq):
lengths = []
y_true_seq = np.argmax(y_true_onehot_seq, -1)
for y in y_true_seq:
try:
pad_i = list(y).index(0)
length = pad_i
except ValueError:
length = len(y)
lengths.append(length)
return lengths
def set_logger(log_path):
"""Sets the logger to log info in terminal and file `log_path`.
In general, it is useful to have a logger so that every output to the terminal is saved
in a permanent file. Here we save it to `model_dir/train.log`.
Example:
```
logging.info("Starting training...")
```
Args:
log_path: (string) where to log
"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Logging to a file
file_handler = logging.FileHandler(log_path)
file_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logger.addHandler(file_handler)
# Logging to console
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(stream_handler)
|
{"hexsha": "d67df80be4e3143e1ed8ed440709edea3bceebc3", "size": 2854, "ext": "py", "lang": "Python", "max_stars_repo_path": "deepsequence/utils.py", "max_stars_repo_name": "yangdc1992/deepsequence", "max_stars_repo_head_hexsha": "577c2140271d2d725806c3e1a14fbd5324d20bca", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "deepsequence/utils.py", "max_issues_repo_name": "yangdc1992/deepsequence", "max_issues_repo_head_hexsha": "577c2140271d2d725806c3e1a14fbd5324d20bca", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "deepsequence/utils.py", "max_forks_repo_name": "yangdc1992/deepsequence", "max_forks_repo_head_hexsha": "577c2140271d2d725806c3e1a14fbd5324d20bca", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.54, "max_line_length": 115, "alphanum_fraction": 0.5925017519, "include": true, "reason": "import numpy", "num_tokens": 681}
|
r"""
Computation of Frobenius matrix on Monsky-Washnitzer cohomology
The most interesting functions to be exported here are
:func:`matrix_of_frobenius` and :func:`adjusted_prec`.
Currently this code is limited to the case `p \geq 5` (no
`GF(p^n)` for `n > 1`), and only handles the
elliptic curve case (not more general hyperelliptic curves).
REFERENCES:
- [Ked2001]_
- [Edix]_
AUTHORS:
- David Harvey and Robert Bradshaw: initial code developed at the 2006
MSRI graduate workshop, working with Jennifer Balakrishnan and Liang
Xiao
- David Harvey (2006-08): cleaned up, rewrote some chunks, lots more
documentation, added Newton iteration method, added more complete
'trace trick', integrated better into Sage.
- David Harvey (2007-02): added algorithm with sqrt(p) complexity
(removed in May 2007 due to better C++ implementation)
- Robert Bradshaw (2007-03): keep track of exact form in reduction
algorithms
- Robert Bradshaw (2007-04): generalization to hyperelliptic curves
- Julian Rueth (2014-05-09): improved caching
"""
# ****************************************************************************
# Copyright (C) 2006 William Stein <wstein@gmail.com>
# 2006 Robert Bradshaw <robertwb@math.washington.edu>
# 2006 David Harvey <dmharvey@math.harvard.edu>
# 2014 Julian Rueth <julian.rueth@fsfe.org>
#
# Distributed under the terms of the GNU General Public License (GPL)
# https://www.gnu.org/licenses/
# ****************************************************************************
from __future__ import print_function
from sage.arith.all import binomial, integer_ceil as ceil
from sage.functions.log import log
from sage.functions.other import floor
from sage.matrix.all import matrix
from sage.misc.cachefunc import cached_method
from sage.misc.misc import newton_method_sizes, repr_lincomb
from sage.misc.profiler import Profiler
from sage.modules.all import vector
from sage.modules.free_module import FreeModule
from sage.modules.free_module_element import is_FreeModuleElement
from sage.modules.module import Module
from sage.rings.all import (Integers, Integer, PolynomialRing, PowerSeriesRing,
Rationals, Rational, LaurentSeriesRing, QQ, IntegralDomain)
from sage.rings.infinity import Infinity
from sage.rings.laurent_series_ring import is_LaurentSeriesRing
from sage.rings.padics.all import pAdicField
from sage.rings.polynomial.polynomial_element import is_Polynomial
from sage.rings.ring import CommutativeAlgebra
from sage.schemes.elliptic_curves.constructor import EllipticCurve
from sage.schemes.elliptic_curves.ell_generic import is_EllipticCurve
from sage.schemes.hyperelliptic_curves.constructor import HyperellipticCurve
from sage.schemes.hyperelliptic_curves.hyperelliptic_generic import is_HyperellipticCurve
from sage.structure.element import CommutativeAlgebraElement, ModuleElement
from sage.structure.richcmp import richcmp
from sage.structure.unique_representation import UniqueRepresentation
class SpecialCubicQuotientRing(CommutativeAlgebra):
r"""
Specialised class for representing the quotient ring
`R[x,T]/(T - x^3 - ax - b)`, where `R` is an
arbitrary commutative base ring (in which 2 and 3 are invertible),
`a` and `b` are elements of that ring.
Polynomials are represented internally in the form
`p_0 + p_1 x + p_2 x^2` where the `p_i` are
polynomials in `T`. Multiplication of polynomials always
reduces high powers of `x` (i.e. beyond `x^2`) to
powers of `T`.
Hopefully this ring is faster than a general quotient ring because
it uses the special structure of this ring to speed multiplication
(which is the dominant operation in the frobenius matrix
calculation). I haven't actually tested this theory though...
.. TODO::
Eventually we will want to run this in characteristic 3, so we
need to: (a) Allow `Q(x)` to contain an `x^2` term, and (b) Remove
the requirement that 3 be invertible. Currently this is used in
the Toom-Cook algorithm to speed multiplication.
EXAMPLES::
sage: B.<t> = PolynomialRing(Integers(125))
sage: R = monsky_washnitzer.SpecialCubicQuotientRing(t^3 - t + B(1/4))
sage: R
SpecialCubicQuotientRing over Ring of integers modulo 125 with polynomial T = x^3 + 124*x + 94
Get generators::
sage: x, T = R.gens()
sage: x
(0) + (1)*x + (0)*x^2
sage: T
(T) + (0)*x + (0)*x^2
Coercions::
sage: R(7)
(7) + (0)*x + (0)*x^2
Create elements directly from polynomials::
sage: A = R.poly_ring()
sage: A
Univariate Polynomial Ring in T over Ring of integers modulo 125
sage: z = A.gen()
sage: R.create_element(z^2, z+1, 3)
(T^2) + (T + 1)*x + (3)*x^2
Some arithmetic::
sage: x^3
(T + 31) + (1)*x + (0)*x^2
sage: 3 * x**15 * T**2 + x - T
(3*T^7 + 90*T^6 + 110*T^5 + 20*T^4 + 58*T^3 + 26*T^2 + 124*T) + (15*T^6 + 110*T^5 + 35*T^4 + 63*T^2 + 1)*x + (30*T^5 + 40*T^4 + 8*T^3 + 38*T^2)*x^2
Retrieve coefficients (output is zero-padded)::
sage: x^10
(3*T^2 + 61*T + 8) + (T^3 + 93*T^2 + 12*T + 40)*x + (3*T^2 + 61*T + 9)*x^2
sage: (x^10).coeffs()
[[8, 61, 3, 0], [40, 12, 93, 1], [9, 61, 3, 0]]
.. TODO::
write an example checking multiplication of these polynomials
against Sage's ordinary quotient ring arithmetic. I can't seem
to get the quotient ring stuff happening right now...
"""
def __init__(self, Q, laurent_series=False):
"""
Constructor.
INPUT:
- ``Q`` -- a polynomial of the form
`Q(x) = x^3 + ax + b`, where `a`, `b` belong to a ring in which
2, 3 are invertible.
- ``laurent_series`` -- whether or not to allow
negative powers of `T` (default=False)
EXAMPLES::
sage: B.<t> = PolynomialRing(Integers(125))
sage: R = monsky_washnitzer.SpecialCubicQuotientRing(t^3 - t + B(1/4))
sage: R
SpecialCubicQuotientRing over Ring of integers modulo 125 with polynomial T = x^3 + 124*x + 94
::
sage: R = monsky_washnitzer.SpecialCubicQuotientRing(t^3 + 2*t^2 - t + B(1/4))
Traceback (most recent call last):
...
ValueError: Q (=t^3 + 2*t^2 + 124*t + 94) must be of the form x^3 + ax + b
::
sage: B.<t> = PolynomialRing(Integers(10))
sage: R = monsky_washnitzer.SpecialCubicQuotientRing(t^3 - t + 1)
Traceback (most recent call last):
...
ArithmeticError: 2 and 3 must be invertible in the coefficient ring (=Ring of integers modulo 10) of Q
"""
if not is_Polynomial(Q):
raise TypeError("Q (=%s) must be a polynomial" % Q)
if Q.degree() != 3 or not Q[2].is_zero():
raise ValueError("Q (=%s) must be of the form x^3 + ax + b" % Q)
base_ring = Q.parent().base_ring()
if not base_ring(6).is_unit():
raise ArithmeticError("2 and 3 must be invertible in the "
"coefficient ring (=%s) of Q" % base_ring)
# CommutativeAlgebra.__init__ tries to establish a coercion
# from the base ring, by trac ticket #9138. The corresponding
# hom set is cached. In order to use self as cache key, its
# string representation is used. In otder to get the string
# representation, we need to know the attributes _a and
# _b. Hence, in #9138, we have to move CommutativeAlgebra.__init__
# further down:
self._a = Q[1]
self._b = Q[0]
if laurent_series:
self._poly_ring = LaurentSeriesRing(base_ring, 'T') # R[T]
else:
self._poly_ring = PolynomialRing(base_ring, 'T') # R[T]
self._poly_generator = self._poly_ring.gen(0) # the generator T
CommutativeAlgebra.__init__(self, base_ring)
# Precompute a matrix that is used in the Toom-Cook multiplication.
# This is where we need 2 and 3 invertible.
# (a good description of Toom-Cook is online at:
# http://www.gnu.org/software/gmp/manual/html_node/Toom-Cook-3-Way-Multiplication.html)
self._speedup_matrix = (matrix(Integers(), 3, 3, [2, 4, 8,
1, 1, 1,
8, 4, 2])**(-1)).change_ring(base_ring).list()
def __repr__(self):
"""
String representation
EXAMPLES::
sage: B.<t> = PolynomialRing(Integers(125))
sage: R = monsky_washnitzer.SpecialCubicQuotientRing(t^3 - t + B(1/4))
sage: print(R)
SpecialCubicQuotientRing over Ring of integers modulo 125 with polynomial T = x^3 + 124*x + 94
"""
return "SpecialCubicQuotientRing over %s with polynomial T = %s" % \
(self.base_ring(), PolynomialRing(self.base_ring(), 'x')(
[self._b, self._a, 0, 1]))
def poly_ring(self):
"""
Return the underlying polynomial ring in `T`.
EXAMPLES::
sage: B.<t> = PolynomialRing(Integers(125))
sage: R = monsky_washnitzer.SpecialCubicQuotientRing(t^3 - t + B(1/4))
sage: R.poly_ring()
Univariate Polynomial Ring in T over Ring of integers modulo 125
"""
return self._poly_ring
def gens(self):
"""
Return a list [x, T] where x and T are the generators of the ring
(as element *of this ring*).
.. note::
I have no idea if this is compatible with the usual Sage
'gens' interface.
EXAMPLES::
sage: B.<t> = PolynomialRing(Integers(125))
sage: R = monsky_washnitzer.SpecialCubicQuotientRing(t^3 - t + B(1/4))
sage: x, T = R.gens()
sage: x
(0) + (1)*x + (0)*x^2
sage: T
(T) + (0)*x + (0)*x^2
"""
return [SpecialCubicQuotientRingElement(self, self._poly_ring(0),
self._poly_ring(1),
self._poly_ring(0),
check=False),
SpecialCubicQuotientRingElement(self, self._poly_generator,
self._poly_ring(0),
self._poly_ring(0),
check=False)]
def create_element(self, p0, p1, p2, check=True):
"""
Creates the element `p_0 + p_1*x + p_2*x^2`, where the `p_i`
are polynomials in `T`.
INPUT:
- ``p0, p1, p2`` -- coefficients; must be coercible
into poly_ring()
- ``check`` -- bool (default True): whether to carry
out coercion
EXAMPLES::
sage: B.<t> = PolynomialRing(Integers(125))
sage: R = monsky_washnitzer.SpecialCubicQuotientRing(t^3 - t + B(1/4))
sage: A, z = R.poly_ring().objgen()
sage: R.create_element(z^2, z+1, 3)
(T^2) + (T + 1)*x + (3)*x^2
"""
return SpecialCubicQuotientRingElement(self, p0, p1, p2, check)
def __call__(self, value):
"""
EXAMPLES::
sage: B.<t> = PolynomialRing(Integers(125))
sage: R = monsky_washnitzer.SpecialCubicQuotientRing(t^3 - t + B(1/4))
sage: R(3)
(3) + (0)*x + (0)*x^2
"""
return self._coerce_(value)
def _coerce_impl(self, value):
"""
EXAMPLES::
sage: B.<t> = PolynomialRing(Integers(125))
sage: R = monsky_washnitzer.SpecialCubicQuotientRing(t^3 - t + B(1/4))
sage: R._coerce_impl(3)
(3) + (0)*x + (0)*x^2
"""
# coerce to underlying polynomial ring (possibly via base ring):
value = self._poly_ring._coerce_(value)
return SpecialCubicQuotientRingElement(self, value, self._poly_ring(0),
self._poly_ring(0), check=False)
class SpecialCubicQuotientRingElement(CommutativeAlgebraElement):
"""
An element of a SpecialCubicQuotientRing.
"""
def __init__(self, parent, p0, p1, p2, check=True):
"""
Constructs the element `p_0 + p_1*x + p_2*x^2`, where
the `p_i` are polynomials in `T`.
INPUT:
- ``parent`` -- a SpecialCubicQuotientRing
- ``p0, p1, p2`` -- coefficients; must be coercible
into parent.poly_ring()
- ``check`` -- bool (default True): whether to carry
out coercion
EXAMPLES::
sage: B.<t> = PolynomialRing(Integers(125))
sage: R = monsky_washnitzer.SpecialCubicQuotientRing(t^3 - t + B(1/4))
sage: from sage.schemes.hyperelliptic_curves.monsky_washnitzer import SpecialCubicQuotientRingElement
sage: SpecialCubicQuotientRingElement(R, 2, 3, 4)
(2) + (3)*x + (4)*x^2
"""
if not isinstance(parent, SpecialCubicQuotientRing):
raise TypeError("parent (=%s) must be a SpecialCubicQuotientRing" % parent)
CommutativeAlgebraElement.__init__(self, parent)
if check:
poly_ring = parent.poly_ring()
p0 = poly_ring(p0)
p1 = poly_ring(p1)
p2 = poly_ring(p2)
self._triple = (p0, p1, p2)
def coeffs(self):
"""
Returns list of three lists of coefficients, corresponding to the
`x^0`, `x^1`, `x^2` coefficients. The lists
are zero padded to the same length. The list entries belong to the
base ring.
EXAMPLES::
sage: B.<t> = PolynomialRing(Integers(125))
sage: R = monsky_washnitzer.SpecialCubicQuotientRing(t^3 - t + B(1/4))
sage: p = R.create_element(t, t^2 - 2, 3)
sage: p.coeffs()
[[0, 1, 0], [123, 0, 1], [3, 0, 0]]
"""
coeffs = [column.coefficients(sparse=False) for column in self._triple]
degree = max([len(x) for x in coeffs])
base_ring = self.parent().base_ring()
for column in coeffs:
column.extend([base_ring(0)] * (degree - len(column)))
return coeffs
def __bool__(self):
"""
EXAMPLES::
sage: B.<t> = PolynomialRing(Integers(125))
sage: R = monsky_washnitzer.SpecialCubicQuotientRing(t^3 - t + B(1/4))
sage: x, T = R.gens()
sage: not x
False
sage: not T
False
sage: not R.create_element(0, 0, 0)
True
"""
return not not self._triple[0] or not not self._triple[1] or not not self._triple[2]
__nonzero__ = __bool__
def _richcmp_(self, other, op):
"""
EXAMPLES::
sage: B.<t> = PolynomialRing(Integers(125))
sage: x, t = monsky_washnitzer.SpecialCubicQuotientRing(t^3 - t + B(1/4)).gens()
sage: x == t
False
sage: x == x
True
sage: x == x + x - x
True
"""
return richcmp(self._triple, other._triple, op)
def _repr_(self):
"""
EXAMPLES::
sage: B.<t> = PolynomialRing(Integers(125))
sage: R = monsky_washnitzer.SpecialCubicQuotientRing(t^3 - t + B(1/4))
sage: x, T = R.gens()
sage: x + T*x - 2*T^2
(123*T^2) + (T + 1)*x + (0)*x^2
"""
return "(%s) + (%s)*x + (%s)*x^2" % self._triple
def _latex_(self):
"""
EXAMPLES::
sage: B.<t> = PolynomialRing(Integers(125))
sage: R = monsky_washnitzer.SpecialCubicQuotientRing(t^3 - t + B(1/4))
sage: x, T = R.gens()
sage: f = x + T*x - 2*T^2
sage: latex(f)
(123 T^{2}) + (T + 1)x + (0)x^2
"""
return "(%s) + (%s)x + (%s)x^2" % \
tuple([column._latex_() for column in self._triple])
def _add_(self, other):
"""
EXAMPLES::
sage: B.<t> = PolynomialRing(Integers(125))
sage: R = monsky_washnitzer.SpecialCubicQuotientRing(t^3 - t + B(1/4))
sage: f = R.create_element(2, t, t^2 - 3)
sage: g = R.create_element(3 + t, -t, t)
sage: f + g
(T + 5) + (0)*x + (T^2 + T + 122)*x^2
"""
return SpecialCubicQuotientRingElement(self.parent(),
self._triple[0] + other._triple[0],
self._triple[1] + other._triple[1],
self._triple[2] + other._triple[2],
check=False)
def _sub_(self, other):
"""
EXAMPLES::
sage: B.<t> = PolynomialRing(Integers(125))
sage: R = monsky_washnitzer.SpecialCubicQuotientRing(t^3 - t + B(1/4))
sage: f = R.create_element(2, t, t^2 - 3)
sage: g = R.create_element(3 + t, -t, t)
sage: f - g
(124*T + 124) + (2*T)*x + (T^2 + 124*T + 122)*x^2
"""
return SpecialCubicQuotientRingElement(self.parent(),
self._triple[0] - other._triple[0],
self._triple[1] - other._triple[1],
self._triple[2] - other._triple[2],
check=False)
def shift(self, n):
"""
Returns this element multiplied by `T^n`.
EXAMPLES::
sage: B.<t> = PolynomialRing(Integers(125))
sage: R = monsky_washnitzer.SpecialCubicQuotientRing(t^3 - t + B(1/4))
sage: f = R.create_element(2, t, t^2 - 3)
sage: f
(2) + (T)*x + (T^2 + 122)*x^2
sage: f.shift(1)
(2*T) + (T^2)*x + (T^3 + 122*T)*x^2
sage: f.shift(2)
(2*T^2) + (T^3)*x + (T^4 + 122*T^2)*x^2
"""
return SpecialCubicQuotientRingElement(self.parent(),
self._triple[0].shift(n),
self._triple[1].shift(n),
self._triple[2].shift(n),
check=False)
def scalar_multiply(self, scalar):
"""
Multiplies this element by a scalar, i.e. just multiply each
coefficient of `x^j` by the scalar.
INPUT:
- ``scalar`` -- either an element of base_ring, or an
element of poly_ring.
EXAMPLES::
sage: B.<t> = PolynomialRing(Integers(125))
sage: R = monsky_washnitzer.SpecialCubicQuotientRing(t^3 - t + B(1/4))
sage: x, T = R.gens()
sage: f = R.create_element(2, t, t^2 - 3)
sage: f
(2) + (T)*x + (T^2 + 122)*x^2
sage: f.scalar_multiply(2)
(4) + (2*T)*x + (2*T^2 + 119)*x^2
sage: f.scalar_multiply(t)
(2*T) + (T^2)*x + (T^3 + 122*T)*x^2
"""
scalar = self.parent()._poly_ring(scalar)
return SpecialCubicQuotientRingElement(self.parent(),
scalar * self._triple[0],
scalar * self._triple[1],
scalar * self._triple[2],
check=False)
def square(self):
"""
EXAMPLES::
sage: B.<t> = PolynomialRing(Integers(125))
sage: R = monsky_washnitzer.SpecialCubicQuotientRing(t^3 - t + B(1/4))
sage: x, T = R.gens()
::
sage: f = R.create_element(1 + 2*t + 3*t^2, 4 + 7*t + 9*t^2, 3 + 5*t + 11*t^2)
sage: f.square()
(73*T^5 + 16*T^4 + 38*T^3 + 39*T^2 + 70*T + 120) + (121*T^5 + 113*T^4 + 73*T^3 + 8*T^2 + 51*T + 61)*x + (18*T^4 + 60*T^3 + 22*T^2 + 108*T + 31)*x^2
"""
return self * self
def _mul_(self, other):
"""
EXAMPLES::
sage: B.<t> = PolynomialRing(Integers(125))
sage: R = monsky_washnitzer.SpecialCubicQuotientRing(t^3 - t + B(1/4))
sage: x, T = R.gens()
::
sage: f = R.create_element(1 + 2*t + 3*t^2, 4 + 7*t + 9*t^2, 3 + 5*t + 11*t^2)
sage: g = R.create_element(4 + 3*t + 7*t^2, 2 + 3*t + t^2, 8 + 4*t + 6*t^2)
sage: f * g
(65*T^5 + 27*T^4 + 33*T^3 + 75*T^2 + 120*T + 57) + (66*T^5 + T^4 + 123*T^3 + 95*T^2 + 24*T + 50)*x + (45*T^4 + 75*T^3 + 37*T^2 + 2*T + 52)*x^2
"""
if not isinstance(other, SpecialCubicQuotientRingElement):
return self.scalar_multiply(other)
# Here we do Toom-Cook three-way multiplication, which reduces the
# naive 9 polynomial multiplications to only 5 polynomial multiplications.
a0, a1, a2 = self._triple
b0, b1, b2 = other._triple
M = self.parent()._speedup_matrix
if self is other:
# faster method if we're squaring
p0 = a0 * a0
temp = a0 + 2*a1 + 4*a2
p1 = temp * temp
temp = a0 + a1 + a2
p2 = temp * temp
temp = 4*a0 + 2*a1 + a2
p3 = temp * temp
p4 = a2 * a2
else:
p0 = a0 * b0
p1 = (a0 + 2*a1 + 4*a2) * (b0 + 2*b1 + 4*b2)
p2 = (a0 + a1 + a2) * (b0 + b1 + b2)
p3 = (4*a0 + 2*a1 + a2) * (4*b0 + 2*b1 + b2)
p4 = a2 * b2
q1 = p1 - p0 - 16*p4
q2 = p2 - p0 - p4
q3 = p3 - 16*p0 - p4
c0 = p0
c1 = M[0]*q1 + M[1]*q2 + M[2]*q3
c2 = M[3]*q1 + M[4]*q2 + M[5]*q3
c3 = M[6]*q1 + M[7]*q2 + M[8]*q3
c4 = p4
# Now the product is c0 + c1 x + c2 x^2 + c3 x^3 + c4 x^4.
# We need to reduce mod y = x^3 + ax + b and return result.
parent = self.parent()
T = parent._poly_generator
b = parent._b
a = parent._a
# todo: These lines are necessary to get binop stuff working
# for certain base rings, e.g. when we compute b*c3 in the
# final line. They shouldn't be necessary. Need to fix this
# somewhere else in Sage.
a = parent._poly_ring(a)
b = parent._poly_ring(b)
return SpecialCubicQuotientRingElement(parent,
-b*c3 + c0 + c3*T,
-b*c4 - a*c3 + c1 + c4*T,
-a*c4 + c2,
check=False)
def transpose_list(input):
"""
INPUT:
- ``input`` -- a list of lists, each list of the same
length
OUTPUT:
- ``output`` -- a list of lists such that output[i][j]
= input[j][i]
EXAMPLES::
sage: from sage.schemes.hyperelliptic_curves.monsky_washnitzer import transpose_list
sage: L = [[1, 2], [3, 4], [5, 6]]
sage: transpose_list(L)
[[1, 3, 5], [2, 4, 6]]
"""
h = len(input)
w = len(input[0])
output = []
for i in range(w):
row = []
for j in range(h):
row.append(input[j][i])
output.append(row)
return output
def helper_matrix(Q):
"""
Computes the (constant) matrix used to calculate the linear
combinations of the `d(x^i y^j)` needed to eliminate the
negative powers of `y` in the cohomology (i.e. in
reduce_negative()).
INPUT:
- ``Q`` -- cubic polynomial
EXAMPLES::
sage: t = polygen(QQ,'t')
sage: from sage.schemes.hyperelliptic_curves.monsky_washnitzer import helper_matrix
sage: helper_matrix(t**3-4*t-691)
[ 64/12891731 -16584/12891731 4297329/12891731]
[ 6219/12891731 -32/12891731 8292/12891731]
[ -24/12891731 6219/12891731 -32/12891731]
"""
a = Q[1]
b = Q[0]
# Discriminant (should be invertible for a curve of good reduction)
D = 4*a**3 + 27*b**2
Dinv = D**(-1) # NB do not use 1/D
# This is the inverse of the matrix
# [ a, -3b, 0 ]
# [ 0, -2a, -3b ]
# [ 3, 0, -2a ]
return Dinv * matrix([[4*a**2, -6*b*a, 9*b**2],
[-9*b, -2*a**2, 3*b*a],
[6*a, -9*b, -2*a**2]])
def lift(x):
r"""
Tries to call x.lift(), presumably from the `p`-adics to ZZ.
If this fails, it assumes the input is a power series, and tries to
lift it to a power series over QQ.
This function is just a very kludgy solution to the problem of
trying to make the reduction code (below) work over both Zp and
Zp[[t]].
EXAMPLES::
sage: from sage.schemes.hyperelliptic_curves.monsky_washnitzer import lift
sage: l = lift(Qp(13)(131)); l
131
sage: l.parent()
Integer Ring
sage: x=PowerSeriesRing(Qp(17),'x').gen()
sage: l = lift(4+5*x+17*x**6); l
4 + 5*t + 17*t^6
sage: l.parent()
Power Series Ring in t over Rational Field
"""
try:
return x.lift()
except AttributeError:
return PowerSeriesRing(Rationals(), "t")(x.list(), x.prec())
def reduce_negative(Q, p, coeffs, offset, exact_form=None):
"""
Applies cohomology relations to incorporate negative powers of
`y` into the `y^0` term.
INPUT:
- ``p`` -- prime
- ``Q`` -- cubic polynomial
- ``coeffs`` -- list of length 3 lists. The
`i^{th}` list [a, b, c] represents
`y^{2(i - offset)} (a + bx + cx^2) dx/y`.
- ``offset`` -- nonnegative integer
OUTPUT: The reduction is performed in-place. The output is placed
in coeffs[offset]. Note that coeffs[i] will be meaningless for i
offset after this function is finished.
EXAMPLES::
sage: R.<x> = Integers(5^3)['x']
sage: Q = x^3 - x + R(1/4)
sage: coeffs = [[10, 15, 20], [1, 2, 3], [4, 5, 6], [7, 8, 9]]
sage: coeffs = [[R.base_ring()(a) for a in row] for row in coeffs]
sage: monsky_washnitzer.reduce_negative(Q, 5, coeffs, 3)
sage: coeffs[3]
[28, 52, 9]
::
sage: R.<x> = Integers(7^3)['x']
sage: Q = x^3 - x + R(1/4)
sage: coeffs = [[7, 14, 21], [1, 2, 3], [4, 5, 6], [7, 8, 9]]
sage: coeffs = [[R.base_ring()(a) for a in row] for row in coeffs]
sage: monsky_washnitzer.reduce_negative(Q, 7, coeffs, 3)
sage: coeffs[3]
[245, 332, 9]
"""
m = helper_matrix(Q).list()
base_ring = Q.base_ring()
next_a = coeffs[0]
if exact_form is not None:
x = exact_form.parent().gen(0)
y = exact_form.parent()(exact_form.parent().base_ring().gen(0))
try:
three_j_plus_5 = 5 - base_ring(6*offset)
three_j_plus_7 = 7 - base_ring(6*offset)
six = base_ring(6)
for i in range(offset):
j = 2*(i-offset)
a = next_a
next_a = coeffs[i+1]
# todo: the following divisions will sometimes involve
# a division by (a power of) p. In all cases, we know (from
# Kedlaya's estimates) that the answer should be p-integral.
# However, since we're working over $Z/p^k Z$, we're not allowed
# to "divide by p". So currently we lift to Q, divide, and coerce
# back. Eventually, when pAdicInteger is implemented, and plays
# nicely with pAdicField, we should reimplement this stuff
# using pAdicInteger.
if (p.divides(j+1)):
# need to lift here to perform the division
a[0] = base_ring(lift(a[0]) / (j+1))
a[1] = base_ring(lift(a[1]) / (j+1))
a[2] = base_ring(lift(a[2]) / (j+1))
else:
j_plus_1_inv = ~base_ring(j+1)
a[0] = a[0] * j_plus_1_inv
a[1] = a[1] * j_plus_1_inv
a[2] = a[2] * j_plus_1_inv
c1 = m[3]*a[0] + m[4]*a[1] + m[5]*a[2]
c2 = m[6]*a[0] + m[7]*a[1] + m[8]*a[2]
next_a[0] = next_a[0] - three_j_plus_5 * c1
next_a[1] = next_a[1] - three_j_plus_7 * c2
three_j_plus_7 = three_j_plus_7 + six
three_j_plus_5 = three_j_plus_5 + six
if exact_form is not None:
c0 = m[0]*a[0] + m[1]*a[1] + m[2]*a[2]
exact_form += (c0 + c1*x + c2 * x**2) * y**(j+1)
except NotImplementedError:
raise NotImplementedError("It looks like you've found a "
"non-integral matrix of Frobenius! "
"(Q=%s, p=%s)\nTime to write a paper." % (Q, p))
coeffs[int(offset)] = next_a
return exact_form
def reduce_positive(Q, p, coeffs, offset, exact_form=None):
"""
Applies cohomology relations to incorporate positive powers of
`y` into the `y^0` term.
INPUT:
- ``Q`` -- cubic polynomial
- ``coeffs`` -- list of length 3 lists. The
`i^{th}` list [a, b, c] represents
`y^{2(i - offset)} (a + bx + cx^2) dx/y`.
- ``offset`` -- nonnegative integer
OUTPUT: The reduction is performed in-place. The output is placed
in coeffs[offset]. Note that coeffs[i] will be meaningless for i
offset after this function is finished.
EXAMPLES::
sage: R.<x> = Integers(5^3)['x']
sage: Q = x^3 - x + R(1/4)
::
sage: coeffs = [[1, 2, 3], [10, 15, 20]]
sage: coeffs = [[R.base_ring()(a) for a in row] for row in coeffs]
sage: monsky_washnitzer.reduce_positive(Q, 5, coeffs, 0)
sage: coeffs[0]
[16, 102, 88]
::
sage: coeffs = [[9, 8, 7], [10, 15, 20]]
sage: coeffs = [[R.base_ring()(a) for a in row] for row in coeffs]
sage: monsky_washnitzer.reduce_positive(Q, 5, coeffs, 0)
sage: coeffs[0]
[24, 108, 92]
"""
base_ring = Q.base_ring()
next_a = coeffs[len(coeffs) - 1]
Qa = Q[1]
Qb = Q[0]
A = 2*Qa
B = 3*Qb
offset = Integer(offset)
if exact_form is not None:
x = exact_form.parent().gen(0)
y = exact_form.parent().base_ring().gen(0)
# y = exact_form.parent()(exact_form.parent().base_ring().gen(0))
for i in range(len(coeffs)-1, offset, -1):
j = 2*(i-offset) - 2
a = next_a
next_a = coeffs[i-1]
a[0] = a[0] - Qa*a[2]/3 # subtract d(y^j + 3)
if exact_form is not None:
exact_form += Q.base_ring()(a[2].lift() / (3*j+9)) * y**(j+3)
# todo: see comments about pAdicInteger in reduceNegative()
# subtract off c1 of d(x y^j + 1), and
if p.divides(3*j + 5):
c1 = base_ring(lift(a[0]) / (3*j + 5))
else:
c1 = a[0] / (3*j + 5)
# subtract off c2 of d(x^2 y^j + 1)
if p.divides(3*j + 7):
c2 = base_ring(lift(a[1]) / (3*j + 7))
else:
c2 = a[1] / (3*j + 7)
next_a[0] = next_a[0] + B*c1*(j+1)
next_a[1] = next_a[1] + A*c1*(j+1) + B*c2*(j+1)
next_a[2] = next_a[2] + A*c2*(j+1)
if exact_form is not None:
exact_form += (c1*x + c2 * x**2) * y**(j+1)
coeffs[int(offset)] = next_a
return exact_form
def reduce_zero(Q, coeffs, offset, exact_form=None):
"""
Applies cohomology relation to incorporate `x^2 y^0` term
into `x^0 y^0` and `x^1 y^0` terms.
INPUT:
- ``Q`` -- cubic polynomial
- ``coeffs`` -- list of length 3 lists. The
`i^{th}` list [a, b, c] represents
`y^{2(i - offset)} (a + bx + cx^2) dx/y`.
- ``offset`` -- nonnegative integer
OUTPUT: The reduction is performed in-place. The output is placed
in coeffs[offset]. This method completely ignores coeffs[i] for i
!= offset.
EXAMPLES::
sage: R.<x> = Integers(5^3)['x']
sage: Q = x^3 - x + R(1/4)
sage: coeffs = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
sage: coeffs = [[R.base_ring()(a) for a in row] for row in coeffs]
sage: monsky_washnitzer.reduce_zero(Q, coeffs, 1)
sage: coeffs[1]
[6, 5, 0]
"""
a = coeffs[int(offset)]
if a[2] == 0:
return exact_form
Qa = Q[1]
a[0] = a[0] - a[2]*Qa/3 # $3x^2 dx/y = -a dx/y$
coeffs[int(offset)] = a
if exact_form is not None:
y = exact_form.parent()(exact_form.parent().base_ring().gen(0))
exact_form += Q.base_ring()(a[2] / 3) * y
a[2] = 0
coeffs[int(offset)] = a
return exact_form
def reduce_all(Q, p, coeffs, offset, compute_exact_form=False):
"""
Applies cohomology relations to reduce all terms to a linear
combination of `dx/y` and `x dx/y`.
INPUT:
- ``Q`` -- cubic polynomial
- ``coeffs`` -- list of length 3 lists. The
`i^{th}` list [a, b, c] represents
`y^{2(i - offset)} (a + bx + cx^2) dx/y`.
- ``offset`` -- nonnegative integer
OUTPUT:
- ``A, B`` - pair such that the input differential is
cohomologous to (A + Bx) dx/y.
.. note::
The algorithm operates in-place, so the data in coeffs is
destroyed.
EXAMPLES::
sage: R.<x> = Integers(5^3)['x']
sage: Q = x^3 - x + R(1/4)
sage: coeffs = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
sage: coeffs = [[R.base_ring()(a) for a in row] for row in coeffs]
sage: monsky_washnitzer.reduce_all(Q, 5, coeffs, 1)
(21, 106)
"""
R = Q.base_ring()
if compute_exact_form:
# exact_form = SpecialCubicQuotientRing(Q, laurent_series=True)(0)
exact_form = PolynomialRing(LaurentSeriesRing(Q.base_ring(), 'y'), 'x').zero()
# t = (Q.base_ring().order().factor())[0]
# from sage.rings.padics.qp import pAdicField
# exact_form = PolynomialRing(LaurentSeriesRing(pAdicField(p, t[1]), 'y'), 'x')(0)
else:
exact_form = None
while len(coeffs) <= offset:
coeffs.append([R(0), R(0), R(0)])
exact_form = reduce_negative(Q, p, coeffs, offset, exact_form)
exact_form = reduce_positive(Q, p, coeffs, offset, exact_form)
exact_form = reduce_zero(Q, coeffs, offset, exact_form)
if exact_form is None:
return coeffs[int(offset)][0], coeffs[int(offset)][1]
else:
return (coeffs[int(offset)][0], coeffs[int(offset)][1]), exact_form
def frobenius_expansion_by_newton(Q, p, M):
r"""
Computes the action of Frobenius on `dx/y` and on
`x dx/y`, using Newton's method (as suggested in Kedlaya's
paper [Ked2001]_).
(This function does *not* yet use the cohomology relations - that
happens afterwards in the "reduction" step.)
More specifically, it finds `F_0` and `F_1` in
the quotient ring `R[x, T]/(T - Q(x))`, such that
.. MATH::
F( dx/y) = T^{-r} F0 dx/y, \text{\ and\ } F(x dx/y) = T^{-r} F1 dx/y
where
.. MATH::
r = ( (2M-3)p - 1 )/2.
(Here `T` is `y^2 = z^{-2}`, and `R` is the
coefficient ring of `Q`.)
`F_0` and `F_1` are computed in the
SpecialCubicQuotientRing associated to `Q`, so all powers
of `x^j` for `j \geq 3` are reduced to powers of
`T`.
INPUT:
- ``Q`` -- cubic polynomial of the form
`Q(x) = x^3 + ax + b`, whose coefficient ring is a
`Z/(p^M)Z`-algebra
- ``p`` -- residue characteristic of the p-adic field
- ``M`` -- p-adic precision of the coefficient ring
(this will be used to determine the number of Newton iterations)
OUTPUT:
- ``F0, F1`` - elements of
SpecialCubicQuotientRing(Q), as described above
- ``r`` - non-negative integer, as described above
EXAMPLES::
sage: from sage.schemes.hyperelliptic_curves.monsky_washnitzer import frobenius_expansion_by_newton
sage: R.<x> = Integers(5^3)['x']
sage: Q = x^3 - x + R(1/4)
sage: frobenius_expansion_by_newton(Q,5,3)
((25*T^5 + 75*T^3 + 100*T^2 + 100*T + 100) + (5*T^6 + 80*T^5 + 100*T^3
+ 25*T + 50)*x + (55*T^5 + 50*T^4 + 75*T^3 + 25*T^2 + 25*T + 25)*x^2,
(5*T^8 + 15*T^7 + 95*T^6 + 10*T^5 + 25*T^4 + 25*T^3 + 100*T^2 + 50)
+ (65*T^7 + 55*T^6 + 70*T^5 + 100*T^4 + 25*T^2 + 100*T)*x
+ (15*T^6 + 115*T^5 + 75*T^4 + 100*T^3 + 50*T^2 + 75*T + 75)*x^2, 7)
"""
S = SpecialCubicQuotientRing(Q)
x, _ = S.gens() # T = y^2
base_ring = S.base_ring()
# When we compute Frob(1/y) we actually only need precision M-1, since
# we're going to multiply by p at the end anyway.
M = float(M - 1)
# Kedlaya sets s = Q(x^p)/T^p = 1 + p T^{-p} E, where
# E = (Q(x^p) - Q(x)^p) / p (has integral coefficients).
# Then he computes s^{-1/2} in S, using Newton's method to find
# successive approximations. We follow this plan, but we normalise our
# approximations so that we only ever need positive powers of T.
# Start by setting r = Q(x^p)/2 = 1/2 T^p s.
# (The 1/2 is for convenience later on.)
x_to_p_less_one = x**(p-1)
x_to_p = x_to_p_less_one * x
x_to_p_cubed = x_to_p.square() * x_to_p
r = (base_ring(1) / base_ring(2)) * (x_to_p_cubed + Q[1]*x_to_p + S(Q[0]))
# todo: this next loop would be clearer if it used the newton_method_sizes()
# function
# We will start with a hard-coded initial approximation, which we provide
# up to precision 3. First work out what precision is best to start with.
if M <= 3:
initial_precision = M
elif ceil(log(M/2, 2)) == ceil(log(M/3, 2)):
# In this case there is no advantage to starting with precision three,
# because we'll overshoot at the end. E.g. suppose the final precision
# is 8. If we start with precision 2, we need two iterations to get us
# to 8. If we start at precision 3, we will still need two iterations,
# but we do more work along the way. So may as well start with only 2.
initial_precision = 2
else:
initial_precision = 3
# Now compute the first approximation. In the main loop below, X is the
# normalised approximation, and k is the precision. More specifically,
# X = T^{p(k-1)} x_i, where x_i is an approximation to s^{-1/2}, and the
# approximation is correct mod p^k.
if initial_precision == 1:
k = 1
X = S(1)
elif initial_precision == 2:
# approximation is 3/2 - 1/2 s
k = 2
X = S(base_ring(3) / base_ring(2)).shift(p) - r
elif initial_precision == 3:
# approximation is (15 - 10 s + 3 s^2) / 8
k = 3
X = (base_ring(1) / base_ring(8)) * (S(15).shift(2*p)
- (base_ring(20) * r).shift(p) +
(base_ring(12) * r.square()))
# The key to the following calculation is that the T^{-m} coefficient
# of every x_i is divisible by p^(ceil(m/p)) (for m >= 0). Therefore if
# we are only expecting an answer correct mod p^k, we can truncate
# beyond the T^{-(k-1)p} term without any problems.
# todo: what would be really nice is to be able to work in a lower
# precision *coefficient ring* when we start the iteration, and move up to
# higher precision rings as the iteration proceeds. This would be feasible
# over Integers(p**n), but quite complicated (maybe impossible) over a more
# general base ring. This might give a decent constant factor speedup;
# or it might not, depending on how much the last iteration dominates the
# whole runtime. My guess is that it isn't worth the effort.
three_halves = base_ring(3) / base_ring(2)
# Newton iteration loop
while k < M:
# target_k = k' = precision we want our answer to be after this iteration
target_k = 2*k
# This prevents us overshooting. For example if the current precision
# is 3 and we want to get to 10, we're better off going up to 5
# instead of 6, because it is less work to get from 5 to 10 than it
# is to get from 6 to 10.
if ceil(log(M/target_k, 2)) == ceil(log(M/(target_k-1), 2)):
target_k -= 1
# temp = T^{p(3k-2)} 1/2 s x_i^3
temp = X.square() * (X * r)
# We know that the final result is only going to be correct mod
# p^(target_k), so we might as well truncate the extraneous terms now.
# temp = T^{p(k'-1)} 1/2 s x_i^3
temp = temp.shift(-p*(3*k - target_k - 1))
# X = T^{p(k'-1)} (3/2 x_i - 1/2 s x_i^3)
# = T^{p(k'-1)} x_{i+1}
X = (three_halves * X).shift(p*(target_k - k)) - temp
k = target_k
# Now k should equal M, since we're up to the correct precision
assert k == M, "Oops, something went wrong in the iteration"
# We should have s^{-1/2} correct to precision M.
# The following line can be uncommented to verify this.
# (It is a slow verification though, can double the whole computation time.)
# assert (p * X.square() * r * base_ring(2)).coeffs() == \
# R(p).shift(p*(2*M - 1)).coeffs()
# Finally incorporate frobenius of dx and x dx, and choose offset that
# compensates for our normalisations by powers of T.
F0 = base_ring(p) * x_to_p_less_one * X
F1 = F0 * x_to_p
offset = ((2*k-1)*p - 1)/2
return F0, F1, offset
def frobenius_expansion_by_series(Q, p, M):
r"""
Computes the action of Frobenius on `dx/y` and on `x dx/y`, using a
series expansion.
(This function computes the same thing as
frobenius_expansion_by_newton(), using a different method.
Theoretically the Newton method should be asymptotically faster,
when the precision gets large. However, in practice, this functions
seems to be marginally faster for moderate precision, so I'm
keeping it here until I figure out exactly why it is faster.)
(This function does *not* yet use the cohomology relations - that
happens afterwards in the "reduction" step.)
More specifically, it finds F0 and F1 in the quotient ring
`R[x, T]/(T - Q(x))`, such that
`F( dx/y) = T^{-r} F0 dx/y`, and
`F(x dx/y) = T^{-r} F1 dx/y` where
`r = ( (2M-3)p - 1 )/2`. (Here `T` is `y^2 = z^{-2}`,
and `R` is the coefficient ring of `Q`.)
`F_0` and `F_1` are computed in the
SpecialCubicQuotientRing associated to `Q`, so all powers
of `x^j` for `j \geq 3` are reduced to powers of
`T`.
It uses the sum
.. MATH::
F0 = \sum_{k=0}^{M-2} \binom{-1/2}{k} p x^{p-1} E^k T^{(M-2-k)p}
and
.. MATH::
F1 = x^p F0,
where `E = Q(x^p) - Q(x)^p`.
INPUT:
- ``Q`` -- cubic polynomial of the form
`Q(x) = x^3 + ax + b`, whose coefficient ring is a
`\ZZ/(p^M)\ZZ` -algebra
- ``p`` -- residue characteristic of the `p`-adic field
- ``M`` -- `p`-adic precision of the coefficient ring
(this will be used to determine the number of terms in the
series)
OUTPUT:
- ``F0, F1`` - elements of
SpecialCubicQuotientRing(Q), as described above
- ``r`` - non-negative integer, as described above
EXAMPLES::
sage: from sage.schemes.hyperelliptic_curves.monsky_washnitzer import frobenius_expansion_by_series
sage: R.<x> = Integers(5^3)['x']
sage: Q = x^3 - x + R(1/4)
sage: frobenius_expansion_by_series(Q,5,3)
((25*T^5 + 75*T^3 + 100*T^2 + 100*T + 100) + (5*T^6 + 80*T^5 + 100*T^3
+ 25*T + 50)*x + (55*T^5 + 50*T^4 + 75*T^3 + 25*T^2 + 25*T + 25)*x^2,
(5*T^8 + 15*T^7 + 95*T^6 + 10*T^5 + 25*T^4 + 25*T^3 + 100*T^2 + 50)
+ (65*T^7 + 55*T^6 + 70*T^5 + 100*T^4 + 25*T^2 + 100*T)*x
+ (15*T^6 + 115*T^5 + 75*T^4 + 100*T^3 + 50*T^2 + 75*T + 75)*x^2, 7)
"""
S = SpecialCubicQuotientRing(Q)
x, _ = S.gens()
base_ring = S.base_ring()
x_to_p_less_1 = x**(p-1)
x_to_p = x_to_p_less_1 * x
# compute frobQ = Q(x^p)
x_to_p_squared = x_to_p * x_to_p
x_to_p_cubed = x_to_p_squared * x_to_p
frobQ = x_to_p_cubed + Q[1]*x_to_p + Q[0]*S(1)
# anticipating the day when p = 3 is supported:
# frobQ = x_to_p_cubed + Q[2]*x_to_p_squared + Q[1]*x_to_p + Q[0]*S(1)
E = frobQ - S(1).shift(p) # E = Q(x^p) - Q(x)^p
offset = int(((2*M-3)*p-1)/2)
term = p * x_to_p_less_1
F0 = term.shift((M-2)*p)
# todo: Possible speedup idea, perhaps by a factor of 2, but
# it requires a lot of work:
# Note that p divides E, so p^k divides E^k. So when we are
# working with high powers of E, we're doing a lot more work
# in the multiplications than we need to. To take advantage of
# this we would need some protocol for "lowering the precision"
# of a SpecialCubicQuotientRing. This would be quite messy to
# do properly over an arbitrary base ring. Perhaps it is
# feasible to do for the most common case (i.e. Z/p^nZ).
# (but it probably won't save much time unless p^n is very
# large, because the machine word size is probably pretty
# big anyway.)
for k in range(int(1), int(M-1)):
term = term * E
c = base_ring(binomial(-Integer(1)/2, k))
F0 += (term * c).shift((M-k-2)*p)
return F0, F0 * x_to_p, offset
def adjusted_prec(p, prec):
r"""
Compute how much precision is required in ``matrix_of_frobenius`` to
get an answer correct to ``prec`` `p`-adic digits.
The issue is that the algorithm used in
:func:`matrix_of_frobenius` sometimes performs divisions by `p`,
so precision is lost during the algorithm.
The estimate returned by this function is based on Kedlaya's result
(Lemmas 2 and 3 of [Ked2001]_),
which implies that if we start with `M` `p`-adic
digits, the total precision loss is at most
`1 + \lfloor \log_p(2M-3) \rfloor` `p`-adic
digits. (This estimate is somewhat less than the amount you would
expect by naively counting the number of divisions by
`p`.)
INPUT:
- ``p`` -- a prime ``p >= 5``
- ``prec`` -- integer, desired output precision, ``prec >= 1``
OUTPUT: adjusted precision (usually slightly more than ``prec``)
EXAMPLES::
sage: from sage.schemes.hyperelliptic_curves.monsky_washnitzer import adjusted_prec
sage: adjusted_prec(5,2)
3
"""
# initial estimate:
defect = floor(Integer(2 * prec - 3).log(p))
if prec <= 2:
adjusted = 2
else:
adjusted = prec + defect - 1
# increase it until we have enough
while adjusted - defect - 1 < prec:
adjusted += 1
return adjusted
def matrix_of_frobenius(Q, p, M, trace=None, compute_exact_forms=False):
r"""
Compute the matrix of Frobenius on Monsky-Washnitzer cohomology,
with respect to the basis `(dx/y, x dx/y)`.
INPUT:
- ``Q`` -- cubic polynomial `Q(x) = x^3 + ax + b`
defining an elliptic curve `E` by
`y^2 = Q(x)`. The coefficient ring of `Q` should be a
`\ZZ/(p^M)\ZZ`-algebra in which the matrix of
frobenius will be constructed.
- ``p`` -- prime >= 5 for which E has good reduction
- ``M`` -- integer >= 2; `p` -adic precision of the coefficient ring
- ``trace`` -- (optional) the trace of the matrix, if
known in advance. This is easy to compute because it is just the
`a_p` of the curve. If the trace is supplied,
matrix_of_frobenius will use it to speed the computation (i.e. we
know the determinant is `p`, so we have two conditions, so
really only column of the matrix needs to be computed. it is
actually a little more complicated than that, but that's the basic
idea.) If trace=None, then both columns will be computed
independently, and you can get a strong indication of correctness
by verifying the trace afterwards.
.. warning::
THE RESULT WILL NOT NECESSARILY BE CORRECT TO M p-ADIC
DIGITS. If you want prec digits of precision, you need to use
the function adjusted_prec(), and then you need to reduce the
answer mod `p^{\mathrm{prec}}` at the end.
OUTPUT:
2x2 matrix of Frobenius acting on Monsky-Washnitzer cohomology,
with entries in the coefficient ring of ``Q``.
EXAMPLES:
A simple example::
sage: p = 5
sage: prec = 3
sage: M = monsky_washnitzer.adjusted_prec(p, prec); M
4
sage: R.<x> = PolynomialRing(Integers(p**M))
sage: A = monsky_washnitzer.matrix_of_frobenius(x^3 - x + R(1/4), p, M)
sage: A
[340 62]
[ 70 533]
But the result is only accurate to ``prec`` digits::
sage: B = A.change_ring(Integers(p**prec))
sage: B
[90 62]
[70 33]
Check trace (123 = -2 mod 125) and determinant::
sage: B.det()
5
sage: B.trace()
123
sage: EllipticCurve([-1, 1/4]).ap(5)
-2
Try using the trace to speed up the calculation::
sage: A = monsky_washnitzer.matrix_of_frobenius(x^3 - x + R(1/4),
....: p, M, -2)
sage: A
[ 90 62]
[320 533]
Hmmm... it looks different, but that's because the trace of our
first answer was only -2 modulo `5^3`, not -2 modulo
`5^5`. So the right answer is::
sage: A.change_ring(Integers(p**prec))
[90 62]
[70 33]
Check it works with only one digit of precision::
sage: p = 5
sage: prec = 1
sage: M = monsky_washnitzer.adjusted_prec(p, prec)
sage: R.<x> = PolynomialRing(Integers(p**M))
sage: A = monsky_washnitzer.matrix_of_frobenius(x^3 - x + R(1/4), p, M)
sage: A.change_ring(Integers(p))
[0 2]
[0 3]
Here is an example that is particularly badly conditioned for
using the trace trick::
sage: p = 11
sage: prec = 3
sage: M = monsky_washnitzer.adjusted_prec(p, prec)
sage: R.<x> = PolynomialRing(Integers(p**M))
sage: A = monsky_washnitzer.matrix_of_frobenius(x^3 + 7*x + 8, p, M)
sage: A.change_ring(Integers(p**prec))
[1144 176]
[ 847 185]
The problem here is that the top-right entry is divisible by 11,
and the bottom-left entry is divisible by `11^2`. So when
you apply the trace trick, neither `F(dx/y)` nor
`F(x dx/y)` is enough to compute the whole matrix to the
desired precision, even if you try increasing the target precision
by one. Nevertheless, ``matrix_of_frobenius`` knows
how to get the right answer by evaluating `F((x+1) dx/y)`
instead::
sage: A = monsky_washnitzer.matrix_of_frobenius(x^3 + 7*x + 8, p, M, -2)
sage: A.change_ring(Integers(p**prec))
[1144 176]
[ 847 185]
The running time is about ``O(p*prec**2)`` (times some logarithmic
factors), so it is feasible to run on fairly large primes, or
precision (or both?!?!)::
sage: p = 10007
sage: prec = 2
sage: M = monsky_washnitzer.adjusted_prec(p, prec)
sage: R.<x> = PolynomialRing(Integers(p**M))
sage: A = monsky_washnitzer.matrix_of_frobenius( # long time
....: x^3 - x + R(1/4), p, M) # long time
sage: B = A.change_ring(Integers(p**prec)); B # long time
[74311982 57996908]
[95877067 25828133]
sage: B.det() # long time
10007
sage: B.trace() # long time
66
sage: EllipticCurve([-1, 1/4]).ap(10007) # long time
66
::
sage: p = 5
sage: prec = 300
sage: M = monsky_washnitzer.adjusted_prec(p, prec)
sage: R.<x> = PolynomialRing(Integers(p**M))
sage: A = monsky_washnitzer.matrix_of_frobenius( # long time
....: x^3 - x + R(1/4), p, M) # long time
sage: B = A.change_ring(Integers(p**prec)) # long time
sage: B.det() # long time
5
sage: -B.trace() # long time
2
sage: EllipticCurve([-1, 1/4]).ap(5) # long time
-2
Let us check consistency of the results for a range of precisions::
sage: p = 5
sage: max_prec = 60
sage: M = monsky_washnitzer.adjusted_prec(p, max_prec)
sage: R.<x> = PolynomialRing(Integers(p**M))
sage: A = monsky_washnitzer.matrix_of_frobenius(x^3 - x + R(1/4), p, M) # long time
sage: A = A.change_ring(Integers(p**max_prec)) # long time
sage: result = [] # long time
sage: for prec in range(1, max_prec): # long time
....: M = monsky_washnitzer.adjusted_prec(p, prec) # long time
....: R.<x> = PolynomialRing(Integers(p^M),'x') # long time
....: B = monsky_washnitzer.matrix_of_frobenius( # long time
....: x^3 - x + R(1/4), p, M) # long time
....: B = B.change_ring(Integers(p**prec)) # long time
....: result.append(B == A.change_ring( # long time
....: Integers(p**prec))) # long time
sage: result == [True] * (max_prec - 1) # long time
True
The remaining examples discuss what happens when you take the
coefficient ring to be a power series ring; i.e. in effect you're
looking at a family of curves.
The code does in fact work...
::
sage: p = 11
sage: prec = 3
sage: M = monsky_washnitzer.adjusted_prec(p, prec)
sage: S.<t> = PowerSeriesRing(Integers(p**M), default_prec=4)
sage: a = 7 + t + 3*t^2
sage: b = 8 - 6*t + 17*t^2
sage: R.<x> = PolynomialRing(S)
sage: Q = x**3 + a*x + b
sage: A = monsky_washnitzer.matrix_of_frobenius(Q, p, M) # long time
sage: B = A.change_ring(PowerSeriesRing(Integers(p**prec), 't', default_prec=4)) # long time
sage: B # long time
[1144 + 264*t + 841*t^2 + 1025*t^3 + O(t^4) 176 + 1052*t + 216*t^2 + 523*t^3 + O(t^4)]
[ 847 + 668*t + 81*t^2 + 424*t^3 + O(t^4) 185 + 341*t + 171*t^2 + 642*t^3 + O(t^4)]
The trace trick should work for power series rings too, even in the
badly-conditioned case. Unfortunately I do not know how to compute
the trace in advance, so I am not sure exactly how this would help.
Also, I suspect the running time will be dominated by the
expansion, so the trace trick will not really speed things up anyway.
Another problem is that the determinant is not always p::
sage: B.det() # long time
11 + 484*t^2 + 451*t^3 + O(t^4)
However, it appears that the determinant always has the property
that if you substitute t - 11t, you do get the constant series p
(mod p\*\*prec). Similarly for the trace. And since the parameter
only really makes sense when it is divisible by p anyway, perhaps
this is not a problem after all.
"""
M = int(M)
if M < 2:
raise ValueError("M (=%s) must be at least 2" % M)
base_ring = Q.base_ring()
# Expand out frobenius of dx/y and x dx/y.
# (You can substitute frobenius_expansion_by_series here, that will work
# as well. See its docstring for some performance notes.)
F0, F1, offset = frobenius_expansion_by_newton(Q, p, M)
# F0, F1, offset = frobenius_expansion_by_series(Q, p, M)
if compute_exact_forms:
# we need to do all the work to get the exact expressions f such that F(x^i dx/y) = df + \sum a_i x^i dx/y
F0_coeffs = transpose_list(F0.coeffs())
F0_reduced, f_0 = reduce_all(Q, p, F0_coeffs, offset, True)
F1_coeffs = transpose_list(F1.coeffs())
F1_reduced, f_1 = reduce_all(Q, p, F1_coeffs, offset, True)
elif M == 2:
# This implies that only one digit of precision is valid, so we only need
# to reduce the second column. Also, the trace doesn't help at all.
F0_reduced = [base_ring(0), base_ring(0)]
F1_coeffs = transpose_list(F1.coeffs())
F1_reduced = reduce_all(Q, p, F1_coeffs, offset)
elif trace is None:
# No trace provided, just reduce F(dx/y) and F(x dx/y) separately.
F0_coeffs = transpose_list(F0.coeffs())
F0_reduced = reduce_all(Q, p, F0_coeffs, offset)
F1_coeffs = transpose_list(F1.coeffs())
F1_reduced = reduce_all(Q, p, F1_coeffs, offset)
else:
# Trace has been provided.
# In most cases this can be used to quickly compute F(dx/y) from
# F(x dx/y). However, if we're unlucky, the (dx/y)-component of
# F(x dx/y) (i.e. the top-right corner of the matrix) may be divisible
# by p, in which case there isn't enough information to get the
# (x dx/y)-component of F(dx/y) to the desired precision. When this
# happens, it turns out that F((x+1) dx/y) always *does* give enough
# information (together with the trace) to get both columns to the
# desired precision.
# First however we need a quick way of telling whether the top-right
# corner is divisible by p, i.e. we want to compute the second column
# of the matrix mod p. We could do this by just running the entire
# algorithm with M = 2 (which assures precision 1). Luckily, we've
# already done most of the work by computing F1 to high precision; so
# all we need to do is extract the coefficients that would correspond
# to the first term of the series, and run the reduction on them.
# todo: actually we only need to do this reduction step mod p^2, not
# mod p^M, which is what the code currently does. If the base ring
# is Integers(p^M), then it is easy. Otherwise it is tricky to construct
# the right ring, I don't know how to do it.
F1_coeffs = transpose_list(F1.coeffs())
F1_modp_coeffs = F1_coeffs[int((M-2)*p):]
# make a copy, because reduce_all will destroy the coefficients:
F1_modp_coeffs = [[cell for cell in row] for row in F1_modp_coeffs]
F1_modp_offset = offset - (M-2)*p
F1_modp_reduced = reduce_all(Q, p, F1_modp_coeffs, F1_modp_offset)
if F1_modp_reduced[0].is_unit():
# If the first entry is invertible mod p, then F(x dx/y) is sufficient
# to get the whole matrix.
F1_reduced = reduce_all(Q, p, F1_coeffs, offset)
F0_reduced = [base_ring(trace) - F1_reduced[1], None]
# using that the determinant is p:
F0_reduced[1] = (F0_reduced[0] * F1_reduced[1] - base_ring(p)) \
/ F1_reduced[0]
else:
# If the first entry is zero mod p, then F((x+1) dx/y) will be sufficient
# to get the whole matrix. (Here we are using the fact that the second
# entry *cannot* be zero mod p. This is guaranteed by some results in
# section 3.2 of ``Computation of p-adic Heights and Log Convergence''
# by Mazur, Stein, Tate. But let's quickly check it anyway :-))
msg = "The second entry in the second column "
msg += "should be invertible mod p!"
assert F1_modp_reduced[1].is_unit(), msg
G0_coeffs = transpose_list((F0 + F1).coeffs())
G0_reduced = reduce_all(Q, p, G0_coeffs, offset)
# Now G0_reduced expresses F((x+1) dx/y) in terms of dx/y and x dx/y.
# Re-express this in terms of (x+1) dx/y and x dx/y.
H0_reduced = [G0_reduced[0], G0_reduced[1] - G0_reduced[0]]
# The thing we're about to divide by better be a unit.
msg = "The second entry in this column "
msg += "should be invertible mod p!"
assert H0_reduced[1].is_unit(), msg
# Figure out the second column using the trace...
H1_reduced = [None, base_ring(trace) - H0_reduced[0]]
# ... and using that the determinant is p:
H1_reduced[0] = (H0_reduced[0] * H1_reduced[1] - base_ring(p)) \
/ H0_reduced[1]
# Finally, change back to the usual basis (dx/y, x dx/y)
F1_reduced = [H1_reduced[0],
H1_reduced[0] + H1_reduced[1]]
F0_reduced = [H0_reduced[0] - F1_reduced[0],
H0_reduced[0] + H0_reduced[1] - F1_reduced[1]]
# One more sanity check: our final result should be congruent mod p
# to the approximation we used earlier.
msg = "The output matrix is not congruent mod p "
msg += "to the approximation found earlier!"
assert not (
(F1_reduced[0] - F1_modp_reduced[0]).is_unit() or
(F1_reduced[1] - F1_modp_reduced[1]).is_unit() or
F0_reduced[0].is_unit() or F0_reduced[1].is_unit()), msg
if compute_exact_forms:
return matrix(base_ring, 2, 2, [F0_reduced[0], F1_reduced[0],
F0_reduced[1], F1_reduced[1]]), f_0, f_1
else:
return matrix(base_ring, 2, 2, [F0_reduced[0], F1_reduced[0],
F0_reduced[1], F1_reduced[1]])
# ****************************************************************************
# This is a generalization of the above functionality for hyperelliptic curves.
#
# THIS IS A WORK IN PROGRESS.
#
# I tried to embed must stuff into the rings themselves rather than
# just extract and manipulate lists of coefficients. Hence the implementations
# below are much less optimized, so are much slower, but should hopefully be
# easier to follow. (E.g. one can print/make sense of intermediate results.)
#
# AUTHOR:
# -- Robert Bradshaw (2007-04)
#
# ****************************************************************************
def matrix_of_frobenius_hyperelliptic(Q, p=None, prec=None, M=None):
r"""
Computes the matrix of Frobenius on Monsky-Washnitzer cohomology,
with respect to the basis `(dx/2y, x dx/2y, ...x^{d-2} dx/2y)`, where
`d` is the degree of `Q`.
INPUT:
- ``Q`` -- monic polynomial `Q(x)`
- ``p`` -- prime `\geq 5` for which `E` has good reduction
- ``prec`` -- (optional) `p`-adic precision of the coefficient ring
- ``M`` -- (optional) adjusted `p`-adic precision of the coefficient ring
OUTPUT:
`(d-1)` x `(d-1)` matrix `M` of Frobenius on Monsky-Washnitzer cohomology,
and list of differentials \{f_i \} such that
.. MATH::
\phi^* (x^i dx/2y) = df_i + M[i]*vec(dx/2y, ..., x^{d-2} dx/2y)
EXAMPLES::
sage: p = 5
sage: prec = 3
sage: R.<x> = QQ['x']
sage: A,f = monsky_washnitzer.matrix_of_frobenius_hyperelliptic(x^5 - 2*x + 3, p, prec)
sage: A
[ 4*5 + O(5^3) 5 + 2*5^2 + O(5^3) 2 + 3*5 + 2*5^2 + O(5^3) 2 + 5 + 5^2 + O(5^3)]
[ 3*5 + 5^2 + O(5^3) 3*5 + O(5^3) 4*5 + O(5^3) 2 + 5^2 + O(5^3)]
[ 4*5 + 4*5^2 + O(5^3) 3*5 + 2*5^2 + O(5^3) 5 + 3*5^2 + O(5^3) 2*5 + 2*5^2 + O(5^3)]
[ 5^2 + O(5^3) 5 + 4*5^2 + O(5^3) 4*5 + 3*5^2 + O(5^3) 2*5 + O(5^3)]
"""
prof = Profiler()
prof("setup")
if p is None:
try:
K = Q.base_ring()
p = K.prime()
prec = K.precision_cap()
except AttributeError:
raise ValueError("p and prec must be specified if Q is not "
"defined over a p-adic ring")
if M is None:
M = adjusted_prec(p, prec)
extra_prec_ring = Integers(p**M)
# extra_prec_ring = pAdicField(p, M) # SLOW!
real_prec_ring = pAdicField(p, prec) # pAdicField(p, prec) # To capped absolute?
S = SpecialHyperellipticQuotientRing(Q, extra_prec_ring, True)
MW = S.monsky_washnitzer()
prof("frob basis elements")
F = MW.frob_basis_elements(M, p)
prof("rationalize")
# do reduction over Q in case we have non-integral entries (and it is so much faster than padics)
rational_S = S.change_ring(QQ)
# this is a hack until pAdics are fast
# (They are in the latest development bundle, but its not standard and I'd need to merge.
# (it will periodically cast into this ring to reduce coefficient size)
rational_S._prec_cap = p**M
rational_S._p = p
# S._p = p
# rational_S(F[0]).reduce_fast()
# prof("reduce others")
# rational_S = S.change_ring(pAdicField(p, M))
F = [rational_S(F_i) for F_i in F]
prof("reduce")
reduced = [F_i.reduce_fast(True) for F_i in F]
# but the coeffs are WAY more precision than they need to be
prof("make matrix")
# now take care of precision capping
M = matrix(real_prec_ring, [a for f, a in reduced])
for i in range(M.ncols()):
for j in range(M.nrows()):
M[i, j] = M[i, j].add_bigoh(prec)
return M.transpose(), [f for f, a in reduced]
class SpecialHyperellipticQuotientRing(UniqueRepresentation, CommutativeAlgebra):
_p = None
def __init__(self, Q, R=None, invert_y=True):
r"""
Initialization.
TESTS:
Check that caching works::
sage: R.<x> = QQ['x']
sage: E = HyperellipticCurve(x^5-3*x+1)
sage: from sage.schemes.hyperelliptic_curves.monsky_washnitzer import SpecialHyperellipticQuotientRing
sage: SpecialHyperellipticQuotientRing(E) is SpecialHyperellipticQuotientRing(E)
True
"""
if R is None:
R = Q.base_ring()
# Trac ticket #9138: CommutativeAlgebra.__init__ must not be
# done so early. It tries to register a coercion, but that
# requires the hash being available. But the hash, in its
# default implementation, relies on the string representation,
# which is not available at this point.
# CommutativeAlgebra.__init__(self, R) # moved to below.
x = PolynomialRing(R, 'xx').gen(0)
if is_EllipticCurve(Q):
E = Q
if E.a1() != 0 or E.a2() != 0:
raise NotImplementedError("Curve must be in Weierstrass "
"normal form.")
Q = -E.change_ring(R).defining_polynomial()(x, 0, 1)
self._curve = E
elif is_HyperellipticCurve(Q):
C = Q
if C.hyperelliptic_polynomials()[1] != 0:
raise NotImplementedError("Curve must be of form y^2 = Q(x).")
Q = C.hyperelliptic_polynomials()[0].change_ring(R)
self._curve = C
if is_Polynomial(Q):
self._Q = Q.change_ring(R)
self._coeffs = self._Q.coefficients(sparse=False)
if self._coeffs.pop() != 1:
raise NotImplementedError("Polynomial must be monic.")
if not hasattr(self, '_curve'):
if self._Q.degree() == 3:
ainvs = [0, self._Q[2], 0, self._Q[1], self._Q[0]]
self._curve = EllipticCurve(ainvs)
else:
self._curve = HyperellipticCurve(self._Q)
else:
raise NotImplementedError("Must be an elliptic curve or polynomial "
"Q for y^2 = Q(x)\n(Got element of %s)" % Q.parent())
self._n = int(Q.degree())
self._series_ring = (LaurentSeriesRing if invert_y else PolynomialRing)(R, 'y')
self._series_ring_y = self._series_ring.gen(0)
self._series_ring_0 = self._series_ring(0)
# Trac ticket #9138: Initialise the commutative algebra here!
# Below, we do self(self._poly_ring.gen(0)), which requires
# the initialisation being finished.
CommutativeAlgebra.__init__(self, R)
self._poly_ring = PolynomialRing(self._series_ring, 'x')
self._x = self(self._poly_ring.gen(0))
self._y = self(self._series_ring.gen(0))
self._Q_coeffs = Q.change_ring(self._series_ring).list()
self._dQ = Q.derivative().change_ring(self)(self._x)
self._monsky_washnitzer = MonskyWashnitzerDifferentialRing(self)
self._monomial_diffs = {}
self._monomial_diff_coeffs = {}
def _repr_(self):
"""
String representation
EXAMPLES::
sage: R.<x> = QQ['x']
sage: E = HyperellipticCurve(x^5-3*x+1)
sage: x,y = E.monsky_washnitzer_gens()
sage: x.parent() # indirect doctest
SpecialHyperellipticQuotientRing K[x,y,y^-1] / (y^2 = x^5 - 3*x + 1) over Rational Field
"""
y_inverse = ",y^-1" if is_LaurentSeriesRing(self._series_ring) else ""
return "SpecialHyperellipticQuotientRing K[x,y%s] / (y^2 = %s) over %s" % (y_inverse, self._Q, self.base_ring())
def base_extend(self, R):
"""
Return the base extension of ``self`` to the ring ``R`` if possible.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: E = HyperellipticCurve(x^5-3*x+1)
sage: x,y = E.monsky_washnitzer_gens()
sage: x.parent().base_extend(UniversalCyclotomicField())
SpecialHyperellipticQuotientRing K[x,y,y^-1] / (y^2 = x^5 - 3*x + 1) over Universal Cyclotomic Field
sage: x.parent().base_extend(ZZ)
Traceback (most recent call last):
...
TypeError: no such base extension
"""
if R.has_coerce_map_from(self.base_ring()):
return self.change_ring(R)
else:
raise TypeError("no such base extension")
def change_ring(self, R):
"""
Return the analog of ``self`` over the ring ``R``
EXAMPLES::
sage: R.<x> = QQ['x']
sage: E = HyperellipticCurve(x^5-3*x+1)
sage: x,y = E.monsky_washnitzer_gens()
sage: x.parent().change_ring(ZZ)
SpecialHyperellipticQuotientRing K[x,y,y^-1] / (y^2 = x^5 - 3*x + 1) over Integer Ring
"""
return SpecialHyperellipticQuotientRing(self._Q, R, is_LaurentSeriesRing(self._series_ring))
def __call__(self, val, offset=0, check=True):
if isinstance(val, SpecialHyperellipticQuotientElement) and val.parent() is self:
if offset == 0:
return val
else:
return val << offset
elif isinstance(val, MonskyWashnitzerDifferential):
return self._monsky_washnitzer(val)
return SpecialHyperellipticQuotientElement(self, val, offset, check)
def gens(self):
"""
Return the generators of ``self``
EXAMPLES::
sage: R.<x> = QQ['x']
sage: E = HyperellipticCurve(x^5-3*x+1)
sage: x,y = E.monsky_washnitzer_gens()
sage: x.parent().gens()
(x, y*1)
"""
return self._x, self._y
def x(self):
r"""
Return the generator `x` of ``self``
EXAMPLES::
sage: R.<x> = QQ['x']
sage: E = HyperellipticCurve(x^5-3*x+1)
sage: x,y = E.monsky_washnitzer_gens()
sage: x.parent().x()
x
"""
return self._x
def y(self):
r"""
Return the generator `y` of ``self``
EXAMPLES::
sage: R.<x> = QQ['x']
sage: E = HyperellipticCurve(x^5-3*x+1)
sage: x,y = E.monsky_washnitzer_gens()
sage: x.parent().y()
y*1
"""
return self._y
def monomial(self, i, j, b=None):
"""
Returns `b y^j x^i`, computed quickly.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: E = HyperellipticCurve(x^5-3*x+1)
sage: x,y = E.monsky_washnitzer_gens()
sage: x.parent().monomial(4,5)
y^5*x^4
"""
i = int(i)
j = int(j)
if 0 < i and i < self._n:
if b is None:
by_to_j = self._series_ring_y << (j-1)
else:
by_to_j = self._series_ring(b) << j
v = [self._series_ring_0] * self._n
v[i] = by_to_j
return self(v)
else:
return (self._x ** i) << j if b is None else self.base_ring()(b) * (self._x ** i) << j
def monomial_diff_coeffs(self, i, j):
r"""
The key here is that the formula for `d(x^iy^j)` is messy
in terms of `i`, but varies nicely with `j`.
.. MATH::
d(x^iy^j) = y^{j-1} (2ix^{i-1}y^2 + j (A_i(x) + B_i(x)y^2)) \frac{dx}{2y}
Where `A,B` have degree at most `n-1` for each
`i`. Pre-compute `A_i, B_i` for each `i`
the "hard" way, and the rest are easy.
"""
try:
return self._monomial_diff_coeffs[i, j]
except KeyError:
pass
if i < self._n:
try:
A, B, two_i_x_to_i = self._precomputed_diff_coeffs[i]
except AttributeError:
self._precomputed_diff_coeffs = self._precompute_monomial_diffs()
A, B, two_i_x_to_i = self._precomputed_diff_coeffs[i]
if i == 0:
return j*A, j*B
else:
return j*A, j*B + two_i_x_to_i
else:
dg = self.monomial(i, j).diff()
coeffs = [dg.extract_pow_y(j-1), dg.extract_pow_y(j+1)]
self._monomial_diff_coeffs[i, j] = coeffs
return coeffs
def monomial_diff_coeffs_matrices(self):
self.monomial_diff_coeffs(0, 0) # precompute stuff
R = self.base_ring()
mat_1 = matrix(R, self._n, self._n)
mat_2 = matrix(R, self._n, self._n)
for i in range(self._n):
mat_1[i] = self._precomputed_diff_coeffs[i][1]
mat_2[i] = self._precomputed_diff_coeffs[i][2]
return mat_1.transpose(), mat_2.transpose()
def _precompute_monomial_diffs(self):
x, y = self.gens()
R = self.base_ring()
V = FreeModule(R, self.degree())
As = []
for i in range(self.degree()):
dg = self.monomial(i, 1).diff()
two_i_x_to_i = R(2*i) * x**(i-1) * y*y if i > 0 else self(0)
A = dg - self._monsky_washnitzer(two_i_x_to_i)
As.append((V(A.extract_pow_y(0)), V(A.extract_pow_y(2)), V(two_i_x_to_i.extract_pow_y(2))))
return As
def Q(self):
"""
Return the defining polynomial of the underlying hyperelliptic curve.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: E = HyperellipticCurve(x^5-2*x+1)
sage: x,y = E.monsky_washnitzer_gens()
sage: x.parent().Q()
x^5 - 2*x + 1
"""
return self._Q
def curve(self):
"""
Return the underlying hyperelliptic curve.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: E = HyperellipticCurve(x^5-3*x+1)
sage: x,y = E.monsky_washnitzer_gens()
sage: x.parent().curve()
Hyperelliptic Curve over Rational Field defined by y^2 = x^5 - 3*x + 1
"""
return self._curve
def degree(self):
"""
Return the degree of the underlying hyperelliptic curve.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: E = HyperellipticCurve(x^5-3*x+1)
sage: x,y = E.monsky_washnitzer_gens()
sage: x.parent().degree()
5
"""
return self._n
def prime(self):
return self._p
def monsky_washnitzer(self):
return self._monsky_washnitzer
def is_field(self, proof=True):
"""
Return False as ``self`` is not a field.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: E = HyperellipticCurve(x^5-3*x+1)
sage: x,y = E.monsky_washnitzer_gens()
sage: x.parent().is_field()
False
"""
return False
SpecialHyperellipticQuotientRing_class = SpecialHyperellipticQuotientRing
class SpecialHyperellipticQuotientElement(CommutativeAlgebraElement):
def __init__(self, parent, val=0, offset=0, check=True):
"""
Elements in the Hyperelliptic quotient ring
EXAMPLES::
sage: R.<x> = QQ['x']
sage: E = HyperellipticCurve(x^5-36*x+1)
sage: x,y = E.monsky_washnitzer_gens()
sage: MW = x.parent()
sage: MW(x+x**2+y-77) # indirect doctest
-(77-y)*1 + x + x^2
"""
CommutativeAlgebraElement.__init__(self, parent)
if not check:
self._f = parent._poly_ring(val, check=False)
return
if isinstance(val, SpecialHyperellipticQuotientElement):
R = parent.base_ring()
self._f = parent._poly_ring([a.change_ring(R) for a in val._f])
return
if isinstance(val, tuple):
val, offset = val
if isinstance(val, list) and len(val) > 0 and is_FreeModuleElement(val[0]):
val = transpose_list(val)
self._f = parent._poly_ring(val)
if offset != 0:
self._f = self._f.parent()([a << offset for a in self._f], check=False)
def _richcmp_(self, other, op):
"""
Compare the elements.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: E = HyperellipticCurve(x^5-36*x+1)
sage: x,y = E.monsky_washnitzer_gens()
sage: x == x
True
sage: x > y
True
"""
return richcmp(self._f, other._f, op)
def change_ring(self, R):
"""
Return the same element after changing the base ring to R.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: E = HyperellipticCurve(x^5-36*x+1)
sage: x,y = E.monsky_washnitzer_gens()
sage: MW = x.parent()
sage: z = MW(x+x**2+y-77)
sage: z.change_ring(AA).parent()
SpecialHyperellipticQuotientRing K[x,y,y^-1] / (y^2 = x^5 - 36*x + 1) over Algebraic Real Field
"""
return self.parent().change_ring(R)(self)
def __call__(self, *x):
"""
Evaluate ``self`` at given arguments
EXAMPLES::
sage: R.<x> = QQ['x']
sage: E = HyperellipticCurve(x^5-36*x+1)
sage: x,y = E.monsky_washnitzer_gens()
sage: MW = x.parent()
sage: z = MW(x+x**2+y-77); z
-(77-y)*1 + x + x^2
sage: z(66)
4345 + y
sage: z(5,4)
-43
"""
return self._f(*x)
def __invert__(self):
"""
Return the inverse of the element
The general element in our ring is not invertible, but `y` may
be. We do not want to pass to the fraction field.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: E = HyperellipticCurve(x^5-36*x+1)
sage: x,y = E.monsky_washnitzer_gens()
sage: MW = x.parent()
sage: z = y**(-1) # indirect doctest
sage: z.parent()
SpecialHyperellipticQuotientRing K[x,y,y^-1] / (y^2 = x^5 - 36*x + 1) over Rational Field
sage: z = (x+y)**(-1) # indirect doctest
Traceback (most recent call last):
...
ZeroDivisionError: Element not invertible
"""
if self._f.degree() == 0 and self._f[0].is_unit():
return SpecialHyperellipticQuotientElement(self.parent(), ~self._f[0])
else:
raise ZeroDivisionError("Element not invertible")
def __bool__(self):
"""
Return True iff ``self`` is not zero.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: E = HyperellipticCurve(x^5-3*x+1)
sage: x,y = E.monsky_washnitzer_gens()
sage: bool(x)
True
"""
return not not self._f
__nonzero__ = __bool__
def __eq__(self, other):
"""
Return True iff ``self`` is equal to ``other``
EXAMPLES::
sage: R.<x> = QQ['x']
sage: E = HyperellipticCurve(x^5-3*x+1)
sage: x,y = E.monsky_washnitzer_gens()
sage: x == y # indirect doctest
False
"""
if not isinstance(other, SpecialHyperellipticQuotientElement):
other = self.parent()(other)
return self._f == other._f
def _add_(self, other):
"""
Return the sum of two elements
EXAMPLES::
sage: R.<x> = QQ['x']
sage: E = HyperellipticCurve(x^5-36*x+1)
sage: x,y = E.monsky_washnitzer_gens()
sage: x+y
y*1 + x
"""
return SpecialHyperellipticQuotientElement(self.parent(), self._f + other._f)
def _sub_(self, other):
"""
Return the difference of two elements
EXAMPLES::
sage: R.<x> = QQ['x']
sage: E = HyperellipticCurve(x^5-36*x+1)
sage: x,y = E.monsky_washnitzer_gens()
sage: y-x
y*1 - x
"""
return SpecialHyperellipticQuotientElement(self.parent(), self._f - other._f)
def _mul_(self, other):
"""
Return the product of two elements
EXAMPLES::
sage: R.<x> = QQ['x']
sage: E = HyperellipticCurve(x^5-36*x+1)
sage: x,y = E.monsky_washnitzer_gens()
sage: y*x
y*x
"""
# over Laurent series, addition and subtraction can be
# expensive, and the degree of this poly is small enough that
# Karatsuba actually hurts significantly in some cases
if self._f[0].valuation() + other._f[0].valuation() > -200:
prod = self._f._mul_generic(other._f)
else:
prod = self._f * other._f
v = prod.list()
parent = self.parent()
Q_coeffs = parent._Q_coeffs
n = len(Q_coeffs) - 1
y2 = self.parent()._series_ring_y << 1
for i in range(len(v)-1, n-1, -1):
for j in range(n):
v[i-n+j] -= Q_coeffs[j] * v[i]
v[i-n] += y2 * v[i]
return SpecialHyperellipticQuotientElement(parent, v[0:n])
def _rmul_(self, c):
coeffs = self._f.list(copy=False)
return self.parent()([c*a for a in coeffs], check=False)
def _lmul_(self, c):
coeffs = self._f.list(copy=False)
return self.parent()([a*c for a in coeffs], check=False)
def __lshift__(self, k):
coeffs = self._f.list(copy=False)
return self.parent()([a << k for a in coeffs], check=False)
def __rshift__(self, k):
coeffs = self._f.list(copy=False)
return self.parent()([a >> k for a in coeffs], check=False)
def truncate_neg(self, n):
"""
Return ``self`` minus its terms of degree less than `n` wrt `y`.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: E = HyperellipticCurve(x^5-3*x+1)
sage: x,y = E.monsky_washnitzer_gens()
sage: (x+3*y+7*x*2*y**4).truncate_neg(1)
3*y*1 + 14*y^4*x
"""
coeffs = self._f.list(copy=False)
return self.parent()([a.truncate_neg(n) for a in coeffs], check=False)
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: E = HyperellipticCurve(x^5-3*x+1)
sage: x,y = E.monsky_washnitzer_gens()
sage: (x+3*y)._repr_()
'3*y*1 + x'
"""
x = PolynomialRing(QQ, 'x').gen(0)
coeffs = self._f.list()
return repr_lincomb([(x**i, coeffs[i]) for i in range(len(coeffs))])
def _latex_(self):
"""
Return a LateX string for ``self``.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: E = HyperellipticCurve(x^5-3*x+1)
sage: x,y = E.monsky_washnitzer_gens()
sage: (x+3*y)._latex_()
'3y1 + x'
"""
x = PolynomialRing(QQ, 'x').gen(0)
coeffs = self._f.list()
return repr_lincomb([(x**i, coeffs[i]) for i in range(len(coeffs))], is_latex=True)
def diff(self):
"""
Return the differential of ``self``
EXAMPLES::
sage: R.<x> = QQ['x']
sage: E = HyperellipticCurve(x^5-3*x+1)
sage: x,y = E.monsky_washnitzer_gens()
sage: (x+3*y).diff()
(-(9-2*y)*1 + 15*x^4) dx/2y
"""
# try:
# return self._diff_x
# except AttributeError:
# pass
# d(self) = A dx + B dy
# = (2y A + BQ') dx/2y
parent = self.parent()
R = parent.base_ring()
x, y = parent.gens()
v = self._f.list()
n = len(v)
A = parent([R(i) * v[i] for i in range(1, n)])
B = parent([a.derivative() for a in v])
dQ = parent._dQ
return parent._monsky_washnitzer((R(2) * A << 1) + dQ * B)
# self._diff = self.parent()._monsky_washnitzer(two_y * A + dQ * B)
# return self._diff
def extract_pow_y(self, k):
r"""
Return the coefficients of `y^k` in ``self`` as a list
EXAMPLES::
sage: R.<x> = QQ['x']
sage: E = HyperellipticCurve(x^5-3*x+1)
sage: x,y = E.monsky_washnitzer_gens()
sage: (x+3*y+9*x*y).extract_pow_y(1)
[3, 9, 0, 0, 0]
"""
v = [a[k] for a in self._f.list()]
while len(v) < self.parent()._n:
v.append(0)
return v
def min_pow_y(self):
"""
Return the minimal degree of ``self`` w.r.t. y
EXAMPLES::
sage: R.<x> = QQ['x']
sage: E = HyperellipticCurve(x^5-3*x+1)
sage: x,y = E.monsky_washnitzer_gens()
sage: (x+3*y).min_pow_y()
0
"""
if self._f.degree() == -1:
return 0
return min([a.valuation() for a in self._f.list()])
def max_pow_y(self):
"""
Return the maximal degree of ``self`` w.r.t. y
EXAMPLES::
sage: R.<x> = QQ['x']
sage: E = HyperellipticCurve(x^5-3*x+1)
sage: x,y = E.monsky_washnitzer_gens()
sage: (x+3*y).max_pow_y()
1
"""
if self._f.degree() == -1:
return 0
return max([a.degree() for a in self._f.list()])
def coeffs(self, R=None):
"""
Returns the raw coefficients of this element.
INPUT:
- ``R`` -- an (optional) base-ring in which to cast the coefficients
OUTPUT:
- ``coeffs`` -- a list of coefficients of powers of `x` for each power
of `y`
- ``n`` -- an offset indicating the power of `y` of the first list
element
EXAMPLES::
sage: R.<x> = QQ['x']
sage: E = HyperellipticCurve(x^5-3*x+1)
sage: x,y = E.monsky_washnitzer_gens()
sage: x.coeffs()
([(0, 1, 0, 0, 0)], 0)
sage: y.coeffs()
([(0, 0, 0, 0, 0), (1, 0, 0, 0, 0)], 0)
sage: a = sum(n*x^n for n in range(5)); a
x + 2*x^2 + 3*x^3 + 4*x^4
sage: a.coeffs()
([(0, 1, 2, 3, 4)], 0)
sage: a.coeffs(Qp(7))
([(0, 1 + O(7^20), 2 + O(7^20), 3 + O(7^20), 4 + O(7^20))], 0)
sage: (a*y).coeffs()
([(0, 0, 0, 0, 0), (0, 1, 2, 3, 4)], 0)
sage: (a*y^-2).coeffs()
([(0, 1, 2, 3, 4), (0, 0, 0, 0, 0), (0, 0, 0, 0, 0)], -2)
Note that the coefficient list is transposed compared to how they
are stored and printed::
sage: a*y^-2
(y^-2)*x + (2*y^-2)*x^2 + (3*y^-2)*x^3 + (4*y^-2)*x^4
A more complicated example::
sage: a = x^20*y^-3 - x^11*y^2; a
(y^-3-4*y^-1+6*y-4*y^3+y^5)*1 - (12*y^-3-36*y^-1+36*y+y^2-12*y^3-2*y^4+y^6)*x + (54*y^-3-108*y^-1+54*y+6*y^2-6*y^4)*x^2 - (108*y^-3-108*y^-1+9*y^2)*x^3 + (81*y^-3)*x^4
sage: raw, offset = a.coeffs()
sage: a.min_pow_y()
-3
sage: offset
-3
sage: raw
[(1, -12, 54, -108, 81),
(0, 0, 0, 0, 0),
(-4, 36, -108, 108, 0),
(0, 0, 0, 0, 0),
(6, -36, 54, 0, 0),
(0, -1, 6, -9, 0),
(-4, 12, 0, 0, 0),
(0, 2, -6, 0, 0),
(1, 0, 0, 0, 0),
(0, -1, 0, 0, 0)]
sage: sum(c * x^i * y^(j+offset) for j, L in enumerate(raw) for i, c in enumerate(L)) == a
True
Can also be used to construct elements::
sage: a.parent()(raw, offset) == a
True
"""
zero = self.base_ring()(0) if R is None else R(0)
y_offset = min(self.min_pow_y(), 0)
y_degree = max(self.max_pow_y(), 0)
coeffs = []
n = y_degree - y_offset + 1
for a in self._f.list():
k = a.valuation()
if k is Infinity:
k = 0
k -= y_offset
z = a.list()
coeffs.append([zero] * k + z + [zero]*(n - len(z) - k))
while len(coeffs) < self.parent().degree():
coeffs.append([zero] * n)
V = FreeModule(self.base_ring() if R is None else R, self.parent().degree())
coeffs = transpose_list(coeffs)
return [V(a) for a in coeffs], y_offset
class MonskyWashnitzerDifferentialRing(UniqueRepresentation, Module):
r"""
A ring of Monsky--Washnitzer differentials over ``base_ring``.
"""
def __init__(self, base_ring):
r"""
Initialization.
TESTS:
Check that caching works::
sage: R.<x> = QQ['x']
sage: E = HyperellipticCurve(x^5-3*x+1)
sage: from sage.schemes.hyperelliptic_curves.monsky_washnitzer import SpecialHyperellipticQuotientRing, MonskyWashnitzerDifferentialRing
sage: S = SpecialHyperellipticQuotientRing(E)
sage: MonskyWashnitzerDifferentialRing(S) is MonskyWashnitzerDifferentialRing(S)
True
"""
Module.__init__(self, base_ring)
def invariant_differential(self):
"""
Returns `dx/2y` as an element of self.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: C = HyperellipticCurve(x^5-4*x+4)
sage: MW = C.invariant_differential().parent()
sage: MW.invariant_differential()
1 dx/2y
"""
return self(1)
def __call__(self, val, offset=0):
return MonskyWashnitzerDifferential(self, val, offset)
def base_extend(self, R):
"""
Return a new differential ring which is self base-extended to `R`
INPUT:
- ``R`` -- ring
OUTPUT:
Self, base-extended to `R`.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: C = HyperellipticCurve(x^5-4*x+4)
sage: MW = C.invariant_differential().parent()
sage: MW.base_ring()
SpecialHyperellipticQuotientRing K[x,y,y^-1] / (y^2 = x^5 - 4*x + 4) over Rational Field
sage: MW.base_extend(Qp(5,5)).base_ring()
SpecialHyperellipticQuotientRing K[x,y,y^-1] / (y^2 = (1 + O(5^5))*x^5 + (1 + 4*5 + 4*5^2 + 4*5^3 + 4*5^4 + O(5^5))*x + 4 + O(5^5)) over 5-adic Field with capped relative precision 5
"""
return MonskyWashnitzerDifferentialRing(self.base_ring().base_extend(R))
def change_ring(self, R):
"""
Returns a new differential ring which is self with the coefficient
ring changed to `R`.
INPUT:
- ``R`` -- ring of coefficients
OUTPUT:
Self, with the coefficient ring changed to `R`.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: C = HyperellipticCurve(x^5-4*x+4)
sage: MW = C.invariant_differential().parent()
sage: MW.base_ring()
SpecialHyperellipticQuotientRing K[x,y,y^-1] / (y^2 = x^5 - 4*x + 4) over Rational Field
sage: MW.change_ring(Qp(5,5)).base_ring()
SpecialHyperellipticQuotientRing K[x,y,y^-1] / (y^2 = (1 + O(5^5))*x^5 + (1 + 4*5 + 4*5^2 + 4*5^3 + 4*5^4 + O(5^5))*x + 4 + O(5^5)) over 5-adic Field with capped relative precision 5
"""
return MonskyWashnitzerDifferentialRing(self.base_ring().change_ring(R))
def degree(self):
"""
Returns the degree of `Q(x)`, where the model of the underlying
hyperelliptic curve of self is given by `y^2 = Q(x)`.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: C = HyperellipticCurve(x^5-4*x+4)
sage: MW = C.invariant_differential().parent()
sage: MW.Q()
x^5 - 4*x + 4
sage: MW.degree()
5
"""
return self.base_ring().degree()
def dimension(self):
"""
Returns the dimension of self.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: C = HyperellipticCurve(x^5-4*x+4)
sage: K = Qp(7,5)
sage: CK = C.change_ring(K)
sage: MW = CK.invariant_differential().parent()
sage: MW.dimension()
4
"""
return self.base_ring().degree()-1
def Q(self):
"""
Returns `Q(x)` where the model of the underlying hyperelliptic curve
of self is given by `y^2 = Q(x)`.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: C = HyperellipticCurve(x^5-4*x+4)
sage: MW = C.invariant_differential().parent()
sage: MW.Q()
x^5 - 4*x + 4
"""
return self.base_ring().Q()
@cached_method
def x_to_p(self, p):
"""
Returns and caches `x^p`, reduced via the relations coming from the
defining polynomial of the hyperelliptic curve.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: C = HyperellipticCurve(x^5-4*x+4)
sage: MW = C.invariant_differential().parent()
sage: MW.x_to_p(3)
x^3
sage: MW.x_to_p(5)
-(4-y^2)*1 + 4*x
sage: MW.x_to_p(101) is MW.x_to_p(101)
True
"""
return self.base_ring().x() ** p
@cached_method
def frob_Q(self, p):
r"""
Returns and caches `Q(x^p)`, which is used in computing the image of
`y` under a `p`-power lift of Frobenius to `A^{\dagger}`.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: C = HyperellipticCurve(x^5-4*x+4)
sage: MW = C.invariant_differential().parent()
sage: MW.frob_Q(3)
-(60-48*y^2+12*y^4-y^6)*1 + (192-96*y^2+12*y^4)*x - (192-48*y^2)*x^2 + 60*x^3
sage: MW.Q()(MW.x_to_p(3))
-(60-48*y^2+12*y^4-y^6)*1 + (192-96*y^2+12*y^4)*x - (192-48*y^2)*x^2 + 60*x^3
sage: MW.frob_Q(11) is MW.frob_Q(11)
True
"""
return self.base_ring()._Q.change_ring(self.base_ring())(self.x_to_p(p))
def frob_invariant_differential(self, prec, p):
r"""
Kedlaya's algorithm allows us to calculate the action of Frobenius on
the Monsky-Washnitzer cohomology. First we lift `\phi` to `A^{\dagger}`
by setting
.. MATH::
\phi(x) = x^p
\phi(y) = y^p \sqrt{1 + \frac{Q(x^p) - Q(x)^p}{Q(x)^p}}.
Pulling back the differential `dx/2y`, we get
.. MATH::
\phi^*(dx/2y) = px^{p-1} y(\phi(y))^{-1} dx/2y
= px^{p-1} y^{1-p} \sqrt{1+ \frac{Q(x^p) - Q(x)^p}{Q(x)^p}} dx/2y
Use Newton's method to calculate the square root.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: C = HyperellipticCurve(x^5-4*x+4)
sage: prec = 2
sage: p = 7
sage: MW = C.invariant_differential().parent()
sage: MW.frob_invariant_differential(prec,p)
((67894400*y^-20-81198880*y^-18+40140800*y^-16-10035200*y^-14+1254400*y^-12-62720*y^-10)*1 - (119503944*y^-20-116064242*y^-18+43753472*y^-16-7426048*y^-14+514304*y^-12-12544*y^-10+1568*y^-8-70*y^-6-7*y^-4)*x + (78905288*y^-20-61014016*y^-18+16859136*y^-16-2207744*y^-14+250880*y^-12-37632*y^-10+3136*y^-8-70*y^-6)*x^2 - (39452448*y^-20-26148752*y^-18+8085490*y^-16-2007040*y^-14+376320*y^-12-37632*y^-10+1568*y^-8)*x^3 + (21102144*y^-20-18120592*y^-18+8028160*y^-16-2007040*y^-14+250880*y^-12-12544*y^-10)*x^4) dx/2y
"""
prof = Profiler()
prof("setup")
# TODO, would it be useful to be able to take Frobenius of any element? Less efficient?
x, y = self.base_ring().gens()
prof("x_to_p")
x_to_p_less_1 = x**(p-1)
x_to_p = x*x_to_p_less_1
# cache for future use
self.x_to_p.set_cache(p, x_to_p)
prof("frob_Q")
a = self.frob_Q(p) >> 2*p # frobQ * y^{-2p}
prof("sqrt")
# Q = self.base_ring()._Q
# three_halves = Q.parent().base_ring()(Rational((3,2)))
# one_half = Q.parent().base_ring()(Rational((1,2)))
three_halves = self.base_ring()._series_ring.base_ring()(Rational((3, 2)))
one_half = self.base_ring()._series_ring.base_ring()(Rational((1, 2)))
half_a = a._rmul_(one_half)
# We are solving for t = a^{-1/2} = (F_pQ y^{-p})^{-1/2}
# Newton's method converges because we know the root is in the same residue class as 1.
# t = self.base_ring()(1)
t = self.base_ring()(three_halves) - half_a
# first iteration trivial, start with prec 2
for cur_prec in newton_method_sizes(prec)[2:]:
# newton_method_sizes = [1, 2, ...]
y_prec = -(2*cur_prec-1)*p+1
# binomial expansion is $\sum p^{k+1} y^{-(2k+1)p+1} f(x)$
# so if we are only correct mod p^prec,
# can ignore y powers less than y_prec
t_cube = (t*t*t).truncate_neg(y_prec)
t = t._rmul_(three_halves) - (half_a * t_cube).truncate_neg(y_prec)
# t = (3/2) t - (1/2) a t^3
prof("compose")
F_dx_y = (p * x_to_p_less_1 * t) >> (p-1) # px^{p-1} sqrt(a) * y^{-p+1}
prof("done")
return MonskyWashnitzerDifferential(self, F_dx_y)
def frob_basis_elements(self, prec, p):
r"""
Return the action of a `p`-power lift of Frobenius on the basis
.. MATH::
\{ dx/2y, x dx/2y, ..., x^{d-2} dx/2y \}
where `d` is the degree of the underlying hyperelliptic curve.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: C = HyperellipticCurve(x^5-4*x+4)
sage: prec = 1
sage: p = 5
sage: MW = C.invariant_differential().parent()
sage: MW.frob_basis_elements(prec,p)
[((92000*y^-14-74200*y^-12+32000*y^-10-8000*y^-8+1000*y^-6-50*y^-4)*1 - (194400*y^-14-153600*y^-12+57600*y^-10-9600*y^-8+600*y^-6)*x + (204800*y^-14-153600*y^-12+38400*y^-10-3200*y^-8)*x^2 - (153600*y^-14-76800*y^-12+9600*y^-10)*x^3 + (63950*y^-14-18550*y^-12+1600*y^-10-400*y^-8+50*y^-6+5*y^-4)*x^4) dx/2y, (-(1391200*y^-14-941400*y^-12+302000*y^-10-76800*y^-8+14400*y^-6-1320*y^-4+30*y^-2)*1 + (2168800*y^-14-1402400*y^-12+537600*y^-10-134400*y^-8+16800*y^-6-720*y^-4)*x - (1596800*y^-14-1433600*y^-12+537600*y^-10-89600*y^-8+5600*y^-6)*x^2 + (1433600*y^-14-1075200*y^-12+268800*y^-10-22400*y^-8)*x^3 - (870200*y^-14-445350*y^-12+63350*y^-10-3200*y^-8+600*y^-6-30*y^-4-5*y^-2)*x^4) dx/2y, ((19488000*y^-14-15763200*y^-12+4944400*y^-10-913800*y^-8+156800*y^-6-22560*y^-4+1480*y^-2-10)*1 - (28163200*y^-14-18669600*y^-12+5774400*y^-10-1433600*y^-8+268800*y^-6-25440*y^-4+760*y^-2)*x + (15062400*y^-14-12940800*y^-12+5734400*y^-10-1433600*y^-8+179200*y^-6-8480*y^-4)*x^2 - (12121600*y^-14-11468800*y^-12+4300800*y^-10-716800*y^-8+44800*y^-6)*x^3 + (9215200*y^-14-6952400*y^-12+1773950*y^-10-165750*y^-8+5600*y^-6-720*y^-4+10*y^-2+5)*x^4) dx/2y, (-(225395200*y^-14-230640000*y^-12+91733600*y^-10-18347400*y^-8+2293600*y^-6-280960*y^-4+31520*y^-2-1480-10*y^2)*1 + (338048000*y^-14-277132800*y^-12+89928000*y^-10-17816000*y^-8+3225600*y^-6-472320*y^-4+34560*y^-2-720)*x - (172902400*y^-14-141504000*y^-12+58976000*y^-10-17203200*y^-8+3225600*y^-6-314880*y^-4+11520*y^-2)*x^2 + (108736000*y^-14-109760000*y^-12+51609600*y^-10-12902400*y^-8+1612800*y^-6-78720*y^-4)*x^3 - (85347200*y^-14-82900000*y^-12+31251400*y^-10-5304150*y^-8+367350*y^-6-8480*y^-4+760*y^-2+10-5*y^2)*x^4) dx/2y]
"""
F_i = self.frob_invariant_differential(prec, p)
x_to_p = self.x_to_p(p)
F = [F_i]
for i in range(1, self.degree()-1):
F_i *= x_to_p
F.append(F_i)
return F
def helper_matrix(self):
"""
We use this to solve for the linear combination of
`x^i y^j` needed to clear all terms with
`y^{j-1}`.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: C = HyperellipticCurve(x^5-4*x+4)
sage: MW = C.invariant_differential().parent()
sage: MW.helper_matrix()
[ 256/2101 320/2101 400/2101 500/2101 625/2101]
[-625/8404 -64/2101 -80/2101 -100/2101 -125/2101]
[-125/2101 -625/8404 -64/2101 -80/2101 -100/2101]
[-100/2101 -125/2101 -625/8404 -64/2101 -80/2101]
[ -80/2101 -100/2101 -125/2101 -625/8404 -64/2101]
"""
try:
return self._helper_matrix
except AttributeError:
pass
# The smallest y term of (1/j) d(x^i y^j) is constant for all j.
L = []
x, y = self.base_ring().gens()
n = self.degree()
for i in range(n):
L.append((y*x**i).diff().extract_pow_y(0))
A = matrix(L).transpose()
if not isinstance(A.base_ring(), IntegralDomain):
# must be using integer_mod or something to approximate
self._helper_matrix = (~A.change_ring(QQ)).change_ring(A.base_ring())
else:
self._helper_matrix = ~A
return self._helper_matrix
MonskyWashnitzerDifferentialRing_class = MonskyWashnitzerDifferentialRing
class MonskyWashnitzerDifferential(ModuleElement):
def __init__(self, parent, val=0, offset=0):
r"""
Create an element of the Monsky-Washnitzer ring of differentials, of
the form `F dx/2y`.
INPUT:
- ``parent`` -- Monsky-Washnitzer differential ring (instance of class
:class:`~MonskyWashnitzerDifferentialRing`
- ``val`` -- element of the base ring, or list of coefficients
- ``offset`` -- if non-zero, shift val by `y^\text{offset}` (default 0)
EXAMPLES::
sage: R.<x> = QQ['x']
sage: C = HyperellipticCurve(x^5 - 4*x + 4)
sage: x,y = C.monsky_washnitzer_gens()
sage: MW = C.invariant_differential().parent()
sage: sage.schemes.hyperelliptic_curves.monsky_washnitzer.MonskyWashnitzerDifferential(MW, x)
x dx/2y
sage: sage.schemes.hyperelliptic_curves.monsky_washnitzer.MonskyWashnitzerDifferential(MW, y)
y*1 dx/2y
sage: sage.schemes.hyperelliptic_curves.monsky_washnitzer.MonskyWashnitzerDifferential(MW, x, 10)
y^10*x dx/2y
"""
ModuleElement.__init__(self, parent)
if isinstance(val, MonskyWashnitzerDifferential):
val = val._coeff
self._coeff = self.parent().base_ring()(val, offset)
def _add_(left, right):
"""
Returns the sum of left and right, both elements of the
Monsky-Washnitzer ring of differentials.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: C = HyperellipticCurve(x^5-4*x + 4)
sage: x,y = C.monsky_washnitzer_gens()
sage: w = C.invariant_differential()
sage: w + w
2*1 dx/2y
sage: x*w + w
(1 + x) dx/2y
sage: x*w + y*w
(y*1 + x) dx/2y
"""
return MonskyWashnitzerDifferential(left.parent(),
left._coeff + right._coeff)
def _sub_(left, right):
"""
Returns the difference of left and right, both elements of the
Monsky-Washnitzer ring of differentials.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: C = HyperellipticCurve(x^5-4*x+4)
sage: x,y = C.monsky_washnitzer_gens()
sage: w = C.invariant_differential()
sage: w-w
0 dx/2y
sage: x*w-w
(-1 + x) dx/2y
sage: w - x*w - y*w
((1-y)*1 - x) dx/2y
"""
return MonskyWashnitzerDifferential(left.parent(),
left._coeff - right._coeff)
def __neg__(self):
"""
Returns the additive inverse of self.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: C = HyperellipticCurve(x^5-4*x+4)
sage: x,y = C.monsky_washnitzer_gens()
sage: w = C.invariant_differential()
sage: -w
-1 dx/2y
sage: -((y-x)*w)
(-y*1 + x) dx/2y
"""
return MonskyWashnitzerDifferential(self.parent(), -self._coeff)
def _lmul_(self, a):
"""
Returns `self * a`.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: C = HyperellipticCurve(x^5-4*x+4)
sage: x,y = C.monsky_washnitzer_gens()
sage: w = C.invariant_differential()
sage: w*x
x dx/2y
sage: (w*x)*2
2*x dx/2y
sage: w*y
y*1 dx/2y
sage: w*(x+y)
(y*1 + x) dx/2y
"""
return MonskyWashnitzerDifferential(self.parent(), self._coeff * a)
def _rmul_(self, a):
"""
Returns `a * self`.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: C = HyperellipticCurve(x^5-4*x+4)
sage: x,y = C.monsky_washnitzer_gens()
sage: w = C.invariant_differential()
sage: x*w
x dx/2y
sage: 2*(x*w)
2*x dx/2y
sage: y*w
y*1 dx/2y
sage: (x+y)*w
(y*1 + x) dx/2y
"""
return MonskyWashnitzerDifferential(self.parent(), a * self._coeff)
def coeff(self):
r"""
Returns `A`, where this element is `A dx/2y`.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: C = HyperellipticCurve(x^5-4*x+4)
sage: x,y = C.monsky_washnitzer_gens()
sage: w = C.invariant_differential()
sage: w
1 dx/2y
sage: w.coeff()
1
sage: (x*y*w).coeff()
y*x
"""
return self._coeff
def __bool__(self):
"""
EXAMPLES::
sage: R.<x> = QQ['x']
sage: C = HyperellipticCurve(x^5-4*x+4)
sage: x,y = C.monsky_washnitzer_gens()
sage: w = C.invariant_differential()
sage: not w
False
sage: not 0*w
True
sage: not x*y*w
False
"""
return not not self._coeff
__nonzero__ = __bool__
def _repr_(self):
"""
EXAMPLES::
sage: R.<x> = QQ['x']
sage: C = HyperellipticCurve(x^5-4*x+4)
sage: x,y = C.monsky_washnitzer_gens()
sage: w = C.invariant_differential()
sage: w
1 dx/2y
sage: (2*x+y)*w
(y*1 + 2*x) dx/2y
"""
s = self._coeff._repr_()
if s.find("+") != -1 or s.find("-") > 0:
s = "(%s)" % s
return s + " dx/2y"
def _latex_(self):
"""
Returns the latex representation of self.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: C = HyperellipticCurve(x^5-4*x+4)
sage: x,y = C.monsky_washnitzer_gens()
sage: w = C.invariant_differential()
sage: latex(w)
1 \frac{dx}{2y}
sage: latex(x*w)
x \frac{dx}{2y}
"""
s = self._coeff._latex_()
if s.find("+") != -1 or s.find("-") > 0:
s = "\\left(%s\\right)" % s
return s + " \\frac{dx}{2y}"
def extract_pow_y(self, k):
"""
Returns the power of `y` in `A` where self is `A dx/2y`.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: C = HyperellipticCurve(x^5-3*x+1)
sage: x,y = C.monsky_washnitzer_gens()
sage: A = y^5 - x*y^3
sage: A.extract_pow_y(5)
[1, 0, 0, 0, 0]
sage: (A * C.invariant_differential()).extract_pow_y(5)
[1, 0, 0, 0, 0]
"""
return self._coeff.extract_pow_y(k)
def min_pow_y(self):
"""
Returns the minimum power of `y` in `A` where self is `A dx/2y`.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: C = HyperellipticCurve(x^5-3*x+1)
sage: x,y = C.monsky_washnitzer_gens()
sage: w = y^5 * C.invariant_differential()
sage: w.min_pow_y()
5
sage: w = (x^2*y^4 + y^5) * C.invariant_differential()
sage: w.min_pow_y()
4
"""
return self._coeff.min_pow_y()
def max_pow_y(self):
"""
Returns the maximum power of `y` in `A` where self is `A dx/2y`.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: C = HyperellipticCurve(x^5-3*x+1)
sage: x,y = C.monsky_washnitzer_gens()
sage: w = y^5 * C.invariant_differential()
sage: w.max_pow_y()
5
sage: w = (x^2*y^4 + y^5) * C.invariant_differential()
sage: w.max_pow_y()
5
"""
return self._coeff.max_pow_y()
def reduce_neg_y(self):
"""
Use homology relations to eliminate negative powers of `y`.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: C = HyperellipticCurve(x^5-3*x+1)
sage: x,y = C.monsky_washnitzer_gens()
sage: (y^-1).diff().reduce_neg_y()
((y^-1)*1, 0 dx/2y)
sage: (y^-5*x^2+y^-1*x).diff().reduce_neg_y()
((y^-1)*x + (y^-5)*x^2, 0 dx/2y)
"""
S = self.parent().base_ring()
R = S.base_ring()
M = self.parent().helper_matrix()
p = S._p
n = S.degree()
x, y = S.gens()
f = S(0)
reduced = self
for j in range(self.min_pow_y()+1, 0):
if p is not None and p.divides(j):
cs = [a/j for a in reduced.extract_pow_y(j-1)]
else:
j_inverse = ~R(j)
cs = [a*j_inverse for a in reduced.extract_pow_y(j-1)]
lin_comb = M * vector(M.base_ring(), cs)
g = self.parent().base_ring()(0)
if not lin_comb.is_zero():
for i in range(n):
if lin_comb[i] != 0:
g += S.monomial(i, j, lin_comb[i])
if not g.is_zero():
f += g
reduced -= g.diff()
return f, reduced
def reduce_neg_y_fast(self, even_degree_only=False):
"""
Use homology relations to eliminate negative powers of `y`.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: E = HyperellipticCurve(x^5-3*x+1)
sage: x, y = E.monsky_washnitzer_gens()
sage: (y^-1).diff().reduce_neg_y_fast()
((y^-1)*1, 0 dx/2y)
sage: (y^-5*x^2+y^-1*x).diff().reduce_neg_y_fast()
((y^-1)*x + (y^-5)*x^2, 0 dx/2y)
It leaves non-negative powers of `y` alone::
sage: y.diff()
(-3*1 + 5*x^4) dx/2y
sage: y.diff().reduce_neg_y_fast()
(0, (-3*1 + 5*x^4) dx/2y)
"""
# prof = Profiler()
# prof("reduce setup")
S = self.parent().base_ring()
R = S.base_ring()
M = self.parent().helper_matrix()
# prof("extract coeffs")
coeffs, offset = self.coeffs(R)
V = coeffs[0].parent()
if offset == 0:
return S(0), self
# prof("loop %s"%self.min_pow_y())
forms = []
p = S._p
for j in range(self.min_pow_y()+1, 0):
if (even_degree_only and j % 2 == 0) or coeffs[j-offset-1].is_zero():
forms.append(V(0))
else:
# this is a total hack to deal with the fact that we're using
# rational numbers to approximate fixed precision p-adics
if p is not None and j % 3 == 1:
try:
v = coeffs[j-offset-1]
for kk in range(len(v)):
a = v[kk]
ppow = p**max(-a.valuation(S._p), 0)
v[kk] = ((a * ppow) % S._prec_cap) / ppow
except AttributeError:
pass
lin_comb = ~R(j) * (M * coeffs[j-offset-1])
forms.append(lin_comb)
for i in lin_comb.nonzero_positions():
# g = lin_comb[i] x^i y^j
# self -= dg
coeffs[j-offset+1] -= lin_comb[i] * S.monomial_diff_coeffs(i, j)[1]
# prof("recreate forms")
f = S(forms, offset+1)
reduced = S._monsky_washnitzer(coeffs[-1-offset:], -1)
return f, reduced
def reduce_neg_y_faster(self, even_degree_only=False):
"""
Use homology relations to eliminate negative powers of `y`.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: C = HyperellipticCurve(x^5-3*x+1)
sage: x,y = C.monsky_washnitzer_gens()
sage: (y^-1).diff().reduce_neg_y()
((y^-1)*1, 0 dx/2y)
sage: (y^-5*x^2+y^-1*x).diff().reduce_neg_y_faster()
((y^-1)*x + (y^-5)*x^2, 0 dx/2y)
"""
# Timings indicate that this is not any faster after all...
S = self.parent().base_ring()
R = S.base_ring()
M = self.parent().helper_matrix()
coeffs, offset = self.coeffs(R)
V = coeffs[0].parent()
zeroV = V(0)
if offset == 0:
return S(0), self
# See monomial_diff_coeffs
# this is the B_i and x_to_i contributions respectively for all i
d_mat_1, d_mat_2 = S.monomial_diff_coeffs_matrices()
forms = []
for j in range(self.min_pow_y()+1, 0):
if coeffs[j-offset-1].is_zero():
forms.append(zeroV)
else:
# this is a total hack to deal with the fact that we're using
# rational numbers to approximate fixed precision p-adics
if j % 3 == 0:
try:
v = coeffs[j-offset-1]
for kk in range(len(v)):
a = v[kk]
ppow = S._p**max(-a.valuation(S._p), 0)
v[kk] = ((a * ppow) % S._prec_cap) / ppow
except AttributeError:
pass
j_inverse = ~R(j)
lin_comb = (M * coeffs[j-offset-1])
forms.append(j_inverse * lin_comb)
coeffs[j-offset+1] -= (d_mat_1 + j_inverse * d_mat_2) * lin_comb
f = S(forms, offset+1)
reduced = S._monsky_washnitzer(coeffs[-1-offset:], -1)
# reduced = self - f.diff()
return f, reduced
def reduce_pos_y(self):
"""
Use homology relations to eliminate positive powers of `y`.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: C = HyperellipticCurve(x^3-4*x+4)
sage: x,y = C.monsky_washnitzer_gens()
sage: (y^2).diff().reduce_pos_y()
(y^2*1, 0 dx/2y)
sage: (y^2*x).diff().reduce_pos_y()
(y^2*x, 0 dx/2y)
sage: (y^92*x).diff().reduce_pos_y()
(y^92*x, 0 dx/2y)
sage: w = (y^3 + x).diff()
sage: w += w.parent()(x)
sage: w.reduce_pos_y_fast()
(y^3*1 + x, x dx/2y)
"""
S = self.parent().base_ring()
n = S.Q().degree()
x, y = S.gens()
f = S(0)
reduced = self
for j in range(self.max_pow_y(), 0, -1):
for i in range(n-1, -1, -1):
c = reduced.extract_pow_y(j)[i]
if c != 0:
g = S.monomial(0, j+1) if i == n-1 else S.monomial(i+1, j-1)
dg = g.diff()
denom = dg.extract_pow_y(j)[i]
c /= denom
c = g.parent()(c)
f += c * g
reduced -= c * dg
return f, reduced
def reduce_pos_y_fast(self, even_degree_only=False):
"""
Use homology relations to eliminate positive powers of `y`.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: E = HyperellipticCurve(x^3-4*x+4)
sage: x, y = E.monsky_washnitzer_gens()
sage: y.diff().reduce_pos_y_fast()
(y*1, 0 dx/2y)
sage: (y^2).diff().reduce_pos_y_fast()
(y^2*1, 0 dx/2y)
sage: (y^2*x).diff().reduce_pos_y_fast()
(y^2*x, 0 dx/2y)
sage: (y^92*x).diff().reduce_pos_y_fast()
(y^92*x, 0 dx/2y)
sage: w = (y^3 + x).diff()
sage: w += w.parent()(x)
sage: w.reduce_pos_y_fast()
(y^3*1 + x, x dx/2y)
"""
S = self.parent().base_ring()
R = S.base_ring()
n = S.Q().degree()
coeffs, offset = self.coeffs(R)
V = coeffs[0].parent()
zeroV = V(0)
forms = [V(0), V(0)]
for j in range(self.max_pow_y(), -1, -1):
if (even_degree_only and j % 2 == 1) or (j > 0 and coeffs[j-offset].is_zero()):
forms.append(zeroV)
continue
form = V(0)
i = n-1
c = coeffs[j-offset][i]
if c != 0:
dg_coeffs = S.monomial_diff_coeffs(0, j+1)[0]
c /= dg_coeffs[i]
forms[len(forms)-2][0] = c
# self -= c d(y^{j+1})
coeffs[j-offset] -= c*dg_coeffs
if j == 0:
# the others are basis elements
break
for i in range(n-2, -1, -1):
c = coeffs[j-offset][i]
if c != 0:
dg_coeffs = S.monomial_diff_coeffs(i+1, j-1)
denom = dg_coeffs[1][i]
c /= denom
form[i+1] = c
# self -= c d(x^{i+1} y^{j-1})
coeffs[j-offset] -= c*dg_coeffs[1]
coeffs[j-offset-2] -= c*dg_coeffs[0]
forms.append(form)
forms.reverse()
f = S(forms)
reduced = self.parent()(coeffs[:1-offset], offset)
return f, reduced
def reduce(self):
"""
Use homology relations to find `a` and `f` such that this element is
equal to `a + df`, where `a` is given in terms of the `x^i dx/2y`.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: C = HyperellipticCurve(x^5-4*x+4)
sage: x,y = C.monsky_washnitzer_gens()
sage: w = (y*x).diff()
sage: w.reduce()
(y*x, 0 dx/2y)
sage: w = x^4 * C.invariant_differential()
sage: w.reduce()
(1/5*y*1, 4/5*1 dx/2y)
sage: w = sum(QQ.random_element() * x^i * y^j for i in [0..4] for j in [-3..3]) * C.invariant_differential()
sage: f, a = w.reduce()
sage: f.diff() + a - w
0 dx/2y
"""
n = self.parent().base_ring().Q().degree()
f1, a = self.reduce_neg_y()
f2, a = a.reduce_pos_y()
f = f1 + f2
c = a.extract_pow_y(0)[n-1]
if c != 0:
x, y = self.parent().base_ring().gens()
g = y
dg = g.diff()
c = g.parent()(c/dg.extract_pow_y(0)[n-1])
f += c * g
a -= c * dg
return f, a
def reduce_fast(self, even_degree_only=False):
"""
Use homology relations to find `a` and `f` such that this element is
equal to `a + df`, where `a` is given in terms of the `x^i dx/2y`.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: E = HyperellipticCurve(x^3-4*x+4)
sage: x, y = E.monsky_washnitzer_gens()
sage: x.diff().reduce_fast()
(x, (0, 0))
sage: y.diff().reduce_fast()
(y*1, (0, 0))
sage: (y^-1).diff().reduce_fast()
((y^-1)*1, (0, 0))
sage: (y^-11).diff().reduce_fast()
((y^-11)*1, (0, 0))
sage: (x*y^2).diff().reduce_fast()
(y^2*x, (0, 0))
"""
f1, reduced = self.reduce_neg_y_fast(even_degree_only)
f2, reduced = reduced.reduce_pos_y_fast(even_degree_only)
# f1, reduced = self.reduce_neg_y()
# f2, reduced = reduced.reduce_pos_y()
v = reduced.extract_pow_y(0)
v.pop()
V = FreeModule(self.base_ring().base_ring(), len(v))
return f1+f2, V(v)
def coeffs(self, R=None):
"""
Used to obtain the raw coefficients of a differential, see
:meth:`SpecialHyperellipticQuotientElement.coeffs`
INPUT:
- R -- An (optional) base ring in which to cast the coefficients
OUTPUT:
The raw coefficients of $A$ where self is $A dx/2y$.
EXAMPLES::
sage: R.<x> = QQ['x']
sage: C = HyperellipticCurve(x^5-4*x+4)
sage: x,y = C.monsky_washnitzer_gens()
sage: w = C.invariant_differential()
sage: w.coeffs()
([(1, 0, 0, 0, 0)], 0)
sage: (x*w).coeffs()
([(0, 1, 0, 0, 0)], 0)
sage: (y*w).coeffs()
([(0, 0, 0, 0, 0), (1, 0, 0, 0, 0)], 0)
sage: (y^-2*w).coeffs()
([(1, 0, 0, 0, 0), (0, 0, 0, 0, 0), (0, 0, 0, 0, 0)], -2)
"""
return self._coeff.coeffs(R)
def coleman_integral(self, P, Q):
r"""
Compute the definite integral of ``self`` from `P` to `Q`.
INPUT:
- `P`, `Q` -- two points on the underlying curve
OUTPUT:
`\int_P^Q \text{self}`
EXAMPLES::
sage: K = pAdicField(5,7)
sage: E = EllipticCurve(K,[-31/3,-2501/108]) #11a
sage: P = E(K(14/3), K(11/2))
sage: w = E.invariant_differential()
sage: w.coleman_integral(P,2*P)
O(5^6)
sage: Q = E([3,58332])
sage: w.coleman_integral(P,Q)
2*5 + 4*5^2 + 3*5^3 + 4*5^4 + 3*5^5 + O(5^6)
sage: w.coleman_integral(2*P,Q)
2*5 + 4*5^2 + 3*5^3 + 4*5^4 + 3*5^5 + O(5^6)
sage: (2*w).coleman_integral(P, Q) == 2*(w.coleman_integral(P, Q))
True
"""
return self.parent().base_ring().curve().coleman_integral(self, P, Q)
integrate = coleman_integral
|
{"hexsha": "13363c1170ad9334f17283350f78d2ead7b07831", "size": 124605, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/sage/schemes/hyperelliptic_curves/monsky_washnitzer.py", "max_stars_repo_name": "Blues1998/sage", "max_stars_repo_head_hexsha": "b5c9cf037cbce672101725f269470135b9b2c5c4", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-05-19T22:34:03.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-19T22:34:03.000Z", "max_issues_repo_path": "src/sage/schemes/hyperelliptic_curves/monsky_washnitzer.py", "max_issues_repo_name": "Blues1998/sage", "max_issues_repo_head_hexsha": "b5c9cf037cbce672101725f269470135b9b2c5c4", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/sage/schemes/hyperelliptic_curves/monsky_washnitzer.py", "max_forks_repo_name": "Blues1998/sage", "max_forks_repo_head_hexsha": "b5c9cf037cbce672101725f269470135b9b2c5c4", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-03-29T17:13:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-03T18:11:28.000Z", "avg_line_length": 34.8740554156, "max_line_length": 1689, "alphanum_fraction": 0.5277637334, "include": true, "reason": "from sage", "num_tokens": 37471}
|
import unittest
import hierarch.stats
from hierarch.power import DataSimulator
import scipy.stats as stats
import numpy as np
import pandas as pd
class TestPreprocessData(unittest.TestCase):
def test_label_encoding(self):
# check that strings get encoded
data = np.array(["a", "b", "c"]).reshape((3, 1))
processed = hierarch.stats._preprocess_data(data)
self.assertTrue(processed.dtype, np.float64)
self.assertEqual(data.shape, processed.shape)
# check that floats do not get encoded
data = np.arange(10, step=0.5, dtype='object').reshape((10, 2))
processed = hierarch.stats._preprocess_data(data)
for idx, v in enumerate(processed.flat):
self.assertEqual(v, data.flat[idx])
# check that when there is a mix of numerical and string columns,
# the numerical columns do not get encoded
data = np.arange(3, step=0.5, dtype='object').reshape((3, 2))
data[:,0] = np.array(["a", "b", "c"])
processed = hierarch.stats._preprocess_data(data)
self.assertTrue(processed.dtype, np.float64)
for idx, v in enumerate(processed[:,1]):
self.assertEqual(v, data[:,1][idx])
class TestStudentizedCovariance(unittest.TestCase):
def test_cov(self):
'''
Checks studentized_covariance against expected value.
'''
x = np.array([[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[1, 2, 3, 4, 5, 2, 3, 4, 5, 6]]).T
self.assertAlmostEqual(hierarch.stats.studentized_covariance(x[:,0], x[:,1]), 1.0039690353154482)
class TestWelch(unittest.TestCase):
def test_welch(self):
'''
Checks welch_statistic against expected value from scipy.stats.ttest_ind.
'''
a = np.random.randint(10, size=10)
b = np.random.randint(10, size=10)
self.assertAlmostEqual(hierarch.stats.welch_statistic(a, b), stats.ttest_ind(a, b, equal_var=False)[0])
class TestHypothesisTest(unittest.TestCase):
import scipy.stats as stats
paramlist = [[0, 2], [stats.norm], [stats.norm]]
hierarchy = [2, 4, 3]
datagen = DataSimulator(paramlist, random_state=2)
datagen.fit(hierarchy)
data = datagen.generate()
def test_corr_vs_means(self):
# check exact test
corr_p = hierarch.stats.hypothesis_test(self.data, treatment_col=0, compare='corr', bootstraps=1000, permutations='all', random_state=1)
t_p = hierarch.stats.hypothesis_test(self.data, treatment_col=0, compare='means', bootstraps=1000, permutations='all', random_state=1)
self.assertAlmostEqual(corr_p, t_p)
# check approximate test
corr_p = hierarch.stats.hypothesis_test(self.data, treatment_col=0, compare='corr', bootstraps=1000, permutations=70, random_state=1)
t_p = hierarch.stats.hypothesis_test(self.data, treatment_col=0, compare='means', bootstraps=1000, permutations=70, random_state=1)
self.assertAlmostEqual(corr_p, t_p)
def test_hypothesis_exceptions(self):
with self.assertRaises(TypeError) as raises:
hierarch.stats.hypothesis_test("ah", 0)
self.assertIn("Input data must be ndarray or DataFrame.", str(raises.exception))
with self.assertWarns(Warning) as warning:
hierarch.stats.hypothesis_test(self.data, 0, skip=[0])
self.assertIn("No need to include columns before treated columns in skip.", str(warning.warning))
with self.assertRaises(TypeError) as raises:
hierarch.stats.hypothesis_test(self.data, 0, bootstraps=1.5)
self.assertIn("bootstraps must be an integer greater than 0", str(raises.exception))
with self.assertRaises(TypeError) as raises:
hierarch.stats.hypothesis_test(self.data, 0, permutations='a')
self.assertIn("permutations must be 'all' or an integer greater than 0", str(raises.exception))
with self.assertRaises(TypeError) as raises:
hierarch.stats.hypothesis_test(self.data, 0, permutations=1.5)
self.assertIn("permutations must be 'all' or an integer greater than 0", str(raises.exception))
with self.assertRaises(AttributeError) as raises:
hello = 5
hierarch.stats.hypothesis_test(self.data, 0, compare=hello)
self.assertIn("Custom test statistics must be callable.", str(raises.exception))
with self.assertWarns(Warning) as warning:
hierarch.stats.hypothesis_test(self.data, 1)
self.assertIn("No levels to bootstrap. Setting bootstraps to zero.", str(warning.warning))
class TestMultiTest(unittest.TestCase):
import scipy.stats as stats
paramlist = [[0, 2, 4, 6], [stats.norm], [stats.norm]]
hierarchy = [4, 4, 3]
datagen = DataSimulator(paramlist, random_state=2)
datagen.fit(hierarchy)
data = datagen.generate()
def test_get_comparisons(self):
# check that all hypotheses are grabbed
test = hierarch.stats._get_comparisons(self.data, 0)
self.assertEqual(len(test), 6)
# check that every hypothesis is tested
out = hierarch.stats.multi_sample_test(self.data, 0).to_numpy()
self.assertEqual(len(out), 6)
def test_fdr_adjustment(self):
p_vals = np.arange(0.05, 1.05, step=0.1)
adjusted = hierarch.stats._false_discovery_adjust(p_vals)
standard = np.array([0.5, 0.75, 0.83333, 0.875, 0.9, 0.91667, 0.92857,
0.9375, 0.94444, 0.95])
for idx, v in enumerate(adjusted):
self.assertAlmostEqual(v, standard[idx])
def test_exceptions(self):
with self.assertRaises(KeyError) as raises:
hierarch.stats.multi_sample_test(self.data, 0, correction="ben")
self.assertIn("ben is not a valid multiple comparisons correction", str(raises.exception))
with self.assertRaises(TypeError) as raises:
hierarch.stats.multi_sample_test("hi", 0)
self.assertIn("Input data must be ndarray or DataFrame", str(raises.exception))
class TestConfidenceInterval(unittest.TestCase):
paramlist = [[0, 2], [stats.norm], [stats.norm]]
hierarchy = [2, 4, 3]
datagen = DataSimulator(paramlist, random_state=2)
datagen.fit(hierarchy)
data = datagen.generate()
def test_conf(self):
interval_95 = hierarch.stats.confidence_interval(self.data, 0, interval=95)
self.assertEqual(len(interval_95), 2)
interval_68 = hierarch.stats.confidence_interval(self.data, 0, interval=68)
# check that a 95% interval is wider than a 68% interval
self.assertLess(interval_95[0], interval_68[0])
self.assertGreater(interval_95[1], interval_68[1])
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "7a46a848cc229a5718c8002f307b3c0676b9beff", "size": 6746, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_stats.py", "max_stars_repo_name": "rishi-kulkarni/Hierarch", "max_stars_repo_head_hexsha": "d159a9605b48041cf87287dd44c0aa0ce434d12c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-06-06T01:35:52.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-01T11:22:50.000Z", "max_issues_repo_path": "tests/test_stats.py", "max_issues_repo_name": "rishi-kulkarni/Hierarch", "max_issues_repo_head_hexsha": "d159a9605b48041cf87287dd44c0aa0ce434d12c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 44, "max_issues_repo_issues_event_min_datetime": "2021-05-09T18:34:21.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-27T03:45:27.000Z", "max_forks_repo_path": "tests/test_stats.py", "max_forks_repo_name": "rishi-kulkarni/hierarch", "max_forks_repo_head_hexsha": "d159a9605b48041cf87287dd44c0aa0ce434d12c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.5225806452, "max_line_length": 144, "alphanum_fraction": 0.6683960866, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 1695}
|
"""This where implementations of individual operations live"""
from ..coreOperation import *
from ..coreNode import broadcast_shape, reduce_shape
from .twoInputOperations import DivideOperation
import numpy as np
class SumAllOperation(SingleInputOperation):
"""Sum all elements together
Attributes
----------
name : str
Name of the operation
result : np.array
Output of the operation
testing : bool
Flag specifying if the operation is in testing (making prefictions: True)
or training (optimizing parameters: False) mode
gradA : np.array
gradient with respect to inputA
inputA : ga.Operation
Operation feeding data A into this operation
shape : tuple
shape of the output
"""
name = "SumAllOperation"
def setShape(self):
"""Set the output shape"""
self.shape = (1, )
def perform(self, a):
"""Summ all elements of the input
Parameters
----------
a : np.array
Input data
Returns
-------
np.array
Output data
"""
return np.sum(a)
def performGradient(self, input=None):
"""Find out the gradient with respect to the parameter
Parameters
----------
input : int
placeholder variable since this operation has only one input
Returns
-------
np.array
Gradient propagated through this operation
"""
if (self.endNode):
grad = np.ones(self.inputA.shape)
else:
grad = np.zeros(self.inputA.shape)
for out in self.outputs:
grad += out.getGradient(self)
return grad * np.ones(self.inputA.shape)
class SumAxisOperation(SingleInputOperation):
"""Sum all elements together along a given axis
Attributes
----------
name : str
Name of the operation
result : np.array
Output of the operation
testing : bool
Flag specifying if the operation is in testing (making prefictions: True)
or training (optimizing parameters: False) mode
gradA : np.array
gradient with respect to inputA
inputA : ga.Operation
Operation feeding data A into this operation
shape : tuple
shape of the output
axis : int
Axis over which to perform the sum
"""
name = "SumAllOperation"
def __init__(self, inputA=None, axis=0):
self.axis = axis
super().__init__(inputA)
self.setShape()
def setShape(self):
"""Set the output shape"""
self.shape = np.delete(self.inputA.shape, self.axis)
def perform(self, a):
"""Sum all elements along the given axis
Parameters
----------
a : np.array
Input data
Returns
-------
np.array
Output data
"""
return np.sum(a, axis=self.axis)
def performGradient(self, input=None):
"""Find out the gradient with respect to the parameter
Parameters
----------
input : int
placeholder variable since this operation has only one input
Returns
-------
np.array
Gradient propagated through this operation
"""
if (self.endNode):
grad = np.ones(self.inputA.shape)
else:
grad = np.zeros(self.inputA.shape)
for out in self.outputs:
grad += out.getGradient(self)
if (self.axis == 0):
return (grad * np.ones(self.inputA.shape))
elif (self.axis == 1):
return (grad * np.ones(self.inputA.shape)).T
else:
raise NotImplemented("Must investigate this gradient further")
class SumSquaredOperation(SingleInputOperation):
"""Sum all elements together
Attributes
----------
name : str
Name of the operation
result : np.array
Output of the operation
testing : bool
Flag specifying if the operation is in testing (making prefictions: True)
or training (optimizing parameters: False) mode
gradA : np.array
gradient with respect to inputA
inputA : ga.Operation
Operation feeding data A into this operation
shape : tuple
shape of the output
"""
name = "SumSquaresOperation"
def setShape(self):
"""Set the output shape"""
self.shape = (1, )
def perform(self, a):
"""Sum all squared elements
Parameters
----------
a : np.array
Input data
Returns
-------
np.array
Output data
"""
return np.sum(np.square(a))
def performGradient(self, input=None):
"""Find out the gradient with respect to the parameter
Parameters
----------
input : int
placeholder variable since this operation has only one input
Returns
-------
np.array
Gradient propagated through this operation
"""
if (self.endNode):
grad = np.ones(self.inputA.shape)
else:
grad = np.zeros(self.inputA.shape)
for out in self.outputs:
grad += out.getGradient(self)
return grad * self.inputA.getValue() * 2
class ExpOperation(SingleInputOperation):
"""Apply exponential function to all of the elements
Attributes
----------
name : str
Name of the operation
result : np.array
Output of the operation
testing : bool
Flag specifying if the operation is in testing (making prefictions: True)
or training (optimizing parameters: False) mode
gradA : np.array
gradient with respect to inputA
inputA : ga.Operation
Operation feeding data A into this operation
shape : tuple
shape of the output
"""
name = "ExpOperation"
def setShape(self):
"""Set the output shape"""
self.shape = self.inputA.shape
def perform(self, a):
"""Calculate the exponens element-wise
Parameters
----------
a : np.array
Input data
Returns
-------
np.array
Output data
"""
return np.exp(a)
def performGradient(self, input=None):
"""Find out the gradient with respect to the parameter
Parameters
----------
input : int
placeholder variable since this operation has only one input
Returns
-------
np.array
Gradient propagated through this operation
"""
if (self.endNode):
grad = np.ones(self.inputA.shape)
else:
grad = np.zeros(self.inputA.shape)
for out in self.outputs:
grad += out.getGradient(self)
return grad * self.getValue()
class DropoutOperation(SingleInputOperation):
"""Drops out some of the elements to prevent overfitting
In default, the operation is active (performing dropout).
For testing purposes (asking for prediction) the self.testing
flag should be set to True to disable dropout and use all the
neurons for prediction
Attributes
----------
name : str
Name of the operation
result : np.array
Output of the operation
testing : bool
Flag specifying if the operation is in testing (making prefictions: True)
or training (optimizing parameters: False) mode
gradA : np.array
gradient with respect to inputA
inputA : ga.Operation
Operation feeding data A into this operation
shape : tuple
shape of the output
"""
name = "DropoutOperation"
def __init__(self, inputA=None, dropoutRate=0):
super().__init__(inputA)
self.setShape()
self.dropoutRate = dropoutRate
self.generateMask()
self.testing = False
def generateMask(self):
"""Generate dropout mask"""
if (self.testing):
self.dropoutMask = np.ones(self.shape[1:])
else:
self.dropoutMask = np.ones(self.shape[1:])
nNeurons = np.size(self.dropoutMask)
nNeuronsToDrop = int(nNeurons * self.dropoutRate)
weightingFactor = 1.0 / (1 - self.dropoutRate)
neuronsToDrop = np.random.choice(nNeurons, nNeuronsToDrop, replace=False)
self.dropoutMask.ravel()[neuronsToDrop] = 0
self.dropoutMask.reshape(self.shape[1:])
self.dropoutMask *= weightingFactor
def reset(self):
"""Reset the values and gradients held by this operation"""
self.result = None
self.gradA = None
self.setShape()
self.generateMask()
def setShape(self):
"""Set the output shape"""
self.shape = self.inputA.shape
def perform(self, a):
"""Perform dropout
Parameters
----------
a : np.array
Input data
Returns
-------
np.array
Output data
"""
return np.multiply(a, self.dropoutMask)
def performGradient(self, input=None):
"""Find out the gradient with respect to the parameter
Parameters
----------
input : int
placeholder variable since this operation has only one input
Returns
-------
np.array
Gradient propagated through this operation
"""
if (self.endNode):
grad = np.ones(self.inputA.shape)
else:
grad = np.zeros(self.inputA.shape)
for out in self.outputs:
grad += out.getGradient(self)
return np.multiply(grad, self.dropoutMask)
|
{"hexsha": "ff32c39890017422a7d4686fe4d29739ffa321c3", "size": 9847, "ext": "py", "lang": "Python", "max_stars_repo_path": "graphAttack/operations/singleInputOperations.py", "max_stars_repo_name": "jgolebiowski/graphAttack", "max_stars_repo_head_hexsha": "ec8488444b44d0bd54498bf917ee42d821643ee8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 51, "max_stars_repo_stars_event_min_datetime": "2017-08-16T13:04:43.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T09:10:30.000Z", "max_issues_repo_path": "graphAttack/operations/singleInputOperations.py", "max_issues_repo_name": "jgolebiowski/graphAttack", "max_issues_repo_head_hexsha": "ec8488444b44d0bd54498bf917ee42d821643ee8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "graphAttack/operations/singleInputOperations.py", "max_forks_repo_name": "jgolebiowski/graphAttack", "max_forks_repo_head_hexsha": "ec8488444b44d0bd54498bf917ee42d821643ee8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2017-09-27T01:10:02.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-05T09:44:56.000Z", "avg_line_length": 26.6856368564, "max_line_length": 85, "alphanum_fraction": 0.5715446329, "include": true, "reason": "import numpy", "num_tokens": 2026}
|
[STATEMENT]
lemma import_test:
"-y * x \<le> x * -y \<Longrightarrow> -y * x\<^sup>\<circ> = -y * (-y * x)\<^sup>\<circ>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. - y * x \<le> x * - y \<Longrightarrow> - y * x\<^sup>\<circ> = - y * (- y * x)\<^sup>\<circ>
[PROOF STEP]
by (simp add: circ_import tests_dual.sub_bot_least)
|
{"llama_tokens": 141, "file": "Correctness_Algebras_Test_Iterings", "length": 1}
|
#!/usr/bin.env/python
# -*- coding: utf-8 -*-
"""
The read_write module contains tools for accessing *.fcs files and
relies on the Python library FlowIO by Scott White. This is used by
Experiment to population FileGroups.
Projects also house the subjects (represented by the Subject class;
see cytopy.data.subject) of an analysis which can contain multiple
meta-data.
Copyright 2020 Ross Burton
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the
Software, and to permit persons to whom the Software is furnished
to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from multiprocessing import Pool, cpu_count
import flowio
import dateutil.parser as date_parser
import numpy as np
import pandas as pd
import json
import os
__author__ = "Ross Burton"
__copyright__ = "Copyright 2020, cytopy"
__credits__ = ["Ross Burton", "Scott White", "Simone Cuff", "Andreas Artemiou", "Matthias Eberl"]
__license__ = "MIT"
__version__ = "2.0.0"
__maintainer__ = "Ross Burton"
__email__ = "burtonrj@cardiff.ac.uk"
__status__ = "Production"
def filter_fcs_files(fcs_dir: str,
exclude_comps: bool = True,
exclude_dir: str = 'DUPLICATES') -> list:
"""
Given a directory, return file paths for all fcs files in directory and subdirectories contained within
Parameters
----------
fcs_dir: str
path to directory for search
exclude_comps: bool
if True, compensation files will be ignored (note: function searches for 'comp' in file name
for exclusion)
exclude_dir: str (default = 'DUPLICATES')
Will ignore any directories with this name
Returns
--------
List
list of fcs file paths
"""
fcs_files = []
for root, dirs, files in os.walk(fcs_dir):
if os.path.basename(root) == exclude_dir:
continue
if exclude_comps:
fcs = [f for f in files if f.lower().endswith('.fcs') and f.lower().find('comp') == -1]
else:
fcs = [f for f in files if f.lower().endswith('.fcs')]
fcs = [os.path.join(root, f) for f in fcs]
fcs_files = fcs_files + fcs
return fcs_files
def get_fcs_file_paths(fcs_dir: str,
control_names: list,
ctrl_id: str,
ignore_comp: bool = True,
exclude_dir: str = "DUPLICATE") -> dict:
"""
Generate a standard dictionary object of fcs files in given directory
Parameters
-----------
fcs_dir: str
target directory for search
control_names: list
names of expected control files (names must appear in filenames)
ctrl_id: str
global identifier for control file e.g. 'FMO' (must appear in filenames)
ignore_comp: bool, (default=True)
If True, files with 'compensation' in their name will be ignored (default = True)
exclude_dir: str (default = 'DUPLICATES')
Will ignore any directories with this name
Returns
--------
dict
standard dictionary of fcs files contained in target directory
"""
file_tree = dict(primary=[], controls={})
fcs_files = filter_fcs_files(fcs_dir, exclude_comps=ignore_comp, exclude_dir=exclude_dir)
ctrl_files = [f for f in fcs_files if f.find(ctrl_id) != -1]
primary = [f for f in fcs_files if f.find(ctrl_id) == -1]
for c_name in control_names:
matched_controls = list(filter(lambda x: x.find(c_name) != -1, ctrl_files))
if not matched_controls:
print(f'Warning: no file found for {c_name} control')
continue
if len(matched_controls) > 1:
print(f'Warning: multiple files found for {c_name} control')
file_tree['controls'][c_name] = matched_controls
if len(primary) > 1:
print('Warning! Multiple non-control (primary) files found in directory. Check before proceeding.')
file_tree['primary'] = primary
return file_tree
def chunks(df_list: list,
n: int) -> pd.DataFrame:
"""
Yield successive n-sized chunks from l.
ref: https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks
Parameters
-----------
df_list: list
list of DataFrames to generated 'chunks' from
n: int
number of chunks to generate
Returns
--------
generator
Yields successive n-sized DataFrames
"""
for i in range(0, len(df_list), n):
yield df_list[i:i + n]
def fcs_mappings(path: str) -> list or None:
"""
Fetch channel mappings from fcs file.
Parameters
------------
path: str
path to fcs file
Returns
--------
List or None
List of channel mappings. Will return None if file fails to load.
"""
try:
fo = FCSFile(path)
except ValueError as e:
print(f'Failed to load file {path}; {e}')
return None
return fo.channel_mappings
def explore_channel_mappings(fcs_dir: str,
exclude_comps: bool = True) -> list:
"""
Given a directory, explore all fcs files and find all permutations of channel/marker mappings
Parameters
----------
fcs_dir: str
root directory to search
exclude_comps: bool, (default=True)
exclude compentation files (must have 'comp' in filename)
Returns
--------
List
list of all unique channel/marker mappings
"""
fcs_files = filter_fcs_files(fcs_dir, exclude_comps)
with Pool(cpu_count()) as pool:
mappings = list(pool.map(fcs_mappings, fcs_files))
mappings = list(pool.map(json.dumps, mappings))
return [json.loads(x) for x in mappings]
def _get_spill_matrix(matrix_string: str) -> pd.DataFrame:
"""
Generate pandas dataframe for the fluorochrome spillover matrix used for compensation calc
Code is modified from: https://github.com/whitews/FlowUtils
Pedersen NW, Chandran PA, Qian Y, et al. Automated Analysis of Flow Cytometry
Data to Reduce Inter-Lab Variation in the Detection of Major Histocompatibility
Complex Multimer-Binding T Cells. Front Immunol. 2017;8:858.
Published 2017 Jul 26. doi:10.3389/fimmu.2017.00858
Parameters
-----------
matrix_string: str
string value extracted from the 'spill' parameter of the FCS file
Returns
--------
Pandas.DataFrame
"""
matrix_list = matrix_string.split(',')
n = int(matrix_list[0])
header = matrix_list[1:(n+1)]
header = [i.strip().replace('\n', '') for i in header]
values = [i.strip().replace('\n', '') for i in matrix_list[n+1:]]
matrix = np.reshape(list(map(float, values)), (n, n))
matrix_df = pd.DataFrame(matrix)
matrix_df = matrix_df.rename(index={k: v for k, v in zip(matrix_df.columns.to_list(), header)},
columns={k: v for k, v in zip(matrix_df.columns.to_list(), header)})
return matrix_df
def _get_channel_mappings(fluoro_dict: dict) -> list:
"""
Generates a list of dictionary objects that describe the fluorochrome mappings in this FCS file
Parameters
-----------
fluoro_dict: dict
dictionary object from the channels param of the fcs file
Returns
--------
List
List of dict obj with keys 'channel' and 'marker'. Use to map fluorochrome channels to
corresponding marker
"""
fm = [(int(k), x) for k, x in fluoro_dict.items()]
fm = [x[1] for x in sorted(fm, key=lambda x: x[0])]
mappings = []
for fm_ in fm:
channel = fm_['PnN'].replace('_', '-')
if 'PnS' in fm_.keys():
marker = fm_['PnS'].replace('_', '-')
else:
marker = ''
mappings.append({'channel': channel, 'marker': marker})
return mappings
class FCSFile:
"""
Utilising FlowIO to generate an object for representing an FCS file
Attributes
----------
filepath: str
location of fcs file to parse
comp_matrix: str
csv file containing compensation matrix (optional, not required if a
spillover matrix is already linked to the file)
"""
def __init__(self, filepath, comp_matrix=None):
fcs = flowio.FlowData(filepath)
self.filename = fcs.text.get('fil', 'Unknown_filename')
self.sys = fcs.text.get('sys', 'Unknown_system')
self.total_events = int(fcs.text.get('tot', 0))
self.tube_name = fcs.text.get('tube name', 'Unknown')
self.exp_name = fcs.text.get('experiment name', 'Unknown')
self.cytometer = fcs.text.get('cyt', 'Unknown')
self.creator = fcs.text.get('creator', 'Unknown')
self.operator = fcs.text.get('export user name', 'Unknown')
self.channel_mappings = _get_channel_mappings(fcs.channels)
self.cst_pass = False
self.data = fcs.events
self.event_data = np.reshape(np.array(fcs.events, dtype=np.float32), (-1, fcs.channel_count))
if 'threshold' in fcs.text.keys():
self.threshold = [{'channel': c, 'threshold': v} for c, v in chunks(fcs.text["threshold"].split(','), 2)]
else:
self.threshold = 'Unknown'
try:
self.processing_date = date_parser.parse(fcs.text['date'] +
' ' + fcs.text['etim']).isoformat()
except KeyError:
self.processing_date = 'Unknown'
if comp_matrix is not None:
self.spill = pd.read_csv(comp_matrix)
self.spill_txt = None
else:
if 'spill' in fcs.text.keys():
self.spill_txt = fcs.text['spill']
elif 'spillover' in fcs.text.keys():
self.spill_txt = fcs.text['spillover']
else:
self.spill_txt = None
if self.spill_txt is not None:
if(len(self.spill_txt)) < 1:
print("""Warning: no spillover matrix found, please provide
path to relevant csv file with 'comp_matrix' argument if compensation is necessary""")
self.spill = None
else:
self.spill = _get_spill_matrix(self.spill_txt)
else:
self.spill = None
if 'cst_setup_status' in fcs.text:
if fcs.text['cst setup status'] == 'SUCCESS':
self.cst_pass = True
def compensate(self):
"""
Apply compensation to event data
Returns
-------
None
"""
assert self.spill is not None, f'Unable to locate spillover matrix, please provide a compensation matrix'
channel_idx = [i for i, x in enumerate(self.channel_mappings) if x['marker'] != '']
if len(channel_idx) == 0:
# No markers defined in file
channel_idx = [i for i, x in enumerate(self.channel_mappings) if all([z not in x['channel'].lower()
for z in ['fsc', 'ssc', 'time']])]
comp_data = self.event_data[:, channel_idx]
comp_data = np.linalg.solve(self.spill.values.T, comp_data.T).T
self.event_data[:, channel_idx] = comp_data
|
{"hexsha": "f92211c502c0626398340f11d07aa25743417d16", "size": 12059, "ext": "py", "lang": "Python", "max_stars_repo_path": "cytopy/data/read_write.py", "max_stars_repo_name": "JANHMS/CytoPy", "max_stars_repo_head_hexsha": "8537d707fa25645b55b4ec1e25fff9f19847fb1b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 41, "max_stars_repo_stars_event_min_datetime": "2020-04-08T11:01:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T17:17:18.000Z", "max_issues_repo_path": "cytopy/data/read_write.py", "max_issues_repo_name": "JANHMS/CytoPy", "max_issues_repo_head_hexsha": "8537d707fa25645b55b4ec1e25fff9f19847fb1b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 27, "max_issues_repo_issues_event_min_datetime": "2020-04-07T14:59:24.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-01T20:43:34.000Z", "max_forks_repo_path": "cytopy/data/read_write.py", "max_forks_repo_name": "JANHMS/CytoPy", "max_forks_repo_head_hexsha": "8537d707fa25645b55b4ec1e25fff9f19847fb1b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2020-04-28T15:16:24.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-02T19:02:14.000Z", "avg_line_length": 36.6534954407, "max_line_length": 117, "alphanum_fraction": 0.6259225475, "include": true, "reason": "import numpy", "num_tokens": 2845}
|
# -*- coding: utf-8 -*-
"""
NEURAL NETWORKS AND DEEP LEARNING
ICT FOR LIFE AND HEALTH - Department of Information Engineering
A.A. 2019/20
Giulia Bressan
Homework 4
"""
import torch
import matplotlib.pyplot as plt
import random
from torch import nn
from torch.utils.data import DataLoader, Subset
from torchvision import transforms
from torchvision.datasets import MNIST
import numpy as np
random.seed(3)
#%% Define paths
data_root_dir = '../datasets'
#%% Create dataset
train_transform = transforms.Compose([
transforms.ToTensor(),
])
test_transform = transforms.Compose([
transforms.ToTensor(),
])
train_dataset = MNIST(data_root_dir, train=True, download=True, transform=train_transform)
test_dataset = MNIST(data_root_dir, train=False, download=True, transform=test_transform)
### Plot some sample
plt.close('all')
fig, axs = plt.subplots(5, 5, figsize=(8,8))
for ax in axs.flatten():
img, label = random.choice(train_dataset)
ax.imshow(img.squeeze().numpy(), cmap='gist_gray')
ax.set_title('Label: %d' % label)
ax.set_xticks([])
ax.set_yticks([])
plt.tight_layout()
#%% Subset division
kfold_ind = list(range(0, len(train_dataset)))
train_set_1 = Subset(train_dataset, kfold_ind[int(len(kfold_ind)/3):])
train_set_2 = Subset(train_dataset, kfold_ind[:int(len(kfold_ind)/3)]+kfold_ind[2*int(len(kfold_ind)/3):])
train_set_3 = Subset(train_dataset, kfold_ind[:2*int(len(kfold_ind)/3)])
val_set_1 = Subset(train_dataset, kfold_ind[:int(len(kfold_ind)/3)])
val_set_2 = Subset(train_dataset, kfold_ind[int(len(kfold_ind)/3):2*int(len(kfold_ind)/3)])
val_set_3 = Subset(train_dataset, kfold_ind[2*int(len(kfold_ind)/3):])
train_set_list = [train_set_1, train_set_2, train_set_3]
val_set_list = [val_set_1, val_set_2, val_set_3]
#%% Define the network architecture
class Autoencoder(nn.Module):
def __init__(self, encoded_space_dim):
super().__init__()
### Encoder
self.encoder_cnn = nn.Sequential(
nn.Conv2d(1, 8, 3, stride=2, padding=1),
nn.ReLU(True),
nn.Conv2d(8, 16, 3, stride=2, padding=1),
nn.ReLU(True),
nn.Conv2d(16, 32, 3, stride=2, padding=0),
nn.ReLU(True)
)
self.encoder_lin = nn.Sequential(
nn.Linear(3 * 3 * 32, 64),
nn.ReLU(True),
nn.Linear(64, encoded_space_dim)
)
### Decoder
self.decoder_lin = nn.Sequential(
nn.Linear(encoded_space_dim, 64),
nn.ReLU(True),
nn.Linear(64, 3 * 3 * 32),
nn.ReLU(True)
)
self.decoder_conv = nn.Sequential(
nn.ConvTranspose2d(32, 16, 3, stride=2, output_padding=0),
nn.ReLU(True),
nn.ConvTranspose2d(16, 8, 3, stride=2, padding=1, output_padding=1),
nn.ReLU(True),
nn.ConvTranspose2d(8, 1, 3, stride=2, padding=1, output_padding=1)
)
def forward(self, x):
x = self.encode(x)
x = self.decode(x)
return x
def encode(self, x):
# Apply convolutions
x = self.encoder_cnn(x)
# Flatten
x = x.view([x.size(0), -1])
# Apply linear layers
x = self.encoder_lin(x)
return x
def decode(self, x):
# Apply linear layers
x = self.decoder_lin(x)
# Reshape
x = x.view([-1, 32, 3, 3])
# Apply transposed convolutions
x = self.decoder_conv(x)
x = torch.sigmoid(x)
return x
#%% Network training
### Training function
def train_epoch(net, dataloader, loss_fn, optimizer):
# Training
net.train()
for sample_batch in dataloader:
# Extract data and move tensors to the selected device
image_batch = sample_batch[0].to(device)
# Forward pass
output = net(image_batch)
loss = loss_fn(output, image_batch)
# Backward pass
optim.zero_grad()
loss.backward()
optim.step()
# Print loss
#print('\t partial train loss: %f' % (loss.data))
### Testing function
def test_epoch(net, dataloader, loss_fn, optimizer):
# Validation
net.eval() # Evaluation mode (e.g. disable dropout)
with torch.no_grad(): # No need to track the gradients
conc_out = torch.Tensor().float()
conc_label = torch.Tensor().float()
for sample_batch in dataloader:
# Extract data and move tensors to the selected device
image_batch = sample_batch[0].to(device)
# Forward pass
out = net(image_batch)
# Concatenate with previous outputs
conc_out = torch.cat([conc_out, out.cpu()])
conc_label = torch.cat([conc_label, image_batch.cpu()])
# Evaluate global loss
val_loss = loss_fn(conc_out, conc_label)
return val_loss.data
#%% K-fold cross-validation
# Parameters grid
params = {
'lr':[1e-3, 1e-4, 1e-5],
'encoded_space_dim': [2, 4, 6, 8],
'batch_size': [256, 512, 1024],
}
loss_opt = 1000000000
lr_opt = 0
encoded_space_dim_opt = 0
batch_size_opt = 0
# Cross-validation
for encoded_space_dim in params['encoded_space_dim']:
for batch_size in params['batch_size']:
for lr in params['lr']:
loss_temp = [] # to save the MSE for each fold
for folds in range(3):
print('FOLD %d/%d' % (folds + 1, 3))
### Initialize the network
net = Autoencoder(encoded_space_dim=encoded_space_dim)
### Define dataloader
train_dataloader = DataLoader(train_set_list[folds], batch_size=batch_size, shuffle=True)
val_dataloader = DataLoader(val_set_list[folds], batch_size=batch_size, shuffle=False)
### Define a loss function
loss_fn = torch.nn.MSELoss()
### Define an optimizer
optim = torch.optim.Adam(net.parameters(), lr=lr, weight_decay=1e-5)
### If cuda is available set the device to GPU
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
# Move all the network parameters to the selected device (if they are already on that device nothing happens)
net.to(device)
### Training cycle
training = True
num_epochs = 10
if training:
for epoch in range(num_epochs):
print('\t ## EPOCH %d' % (epoch + 1))
### Training
train_epoch(net, dataloader=train_dataloader, loss_fn=loss_fn, optimizer=optim)
### Validation
val_loss = test_epoch(net, dataloader=val_dataloader, loss_fn=loss_fn, optimizer=optim)
# Print Validationloss
print('\t\t VALIDATION - loss: %f' % (val_loss))
### Save current loss (last loss obtained)
loss_temp.append(val_loss)
### Compare with the previous results
loss_avg = np.mean(np.array(loss_temp))
if (loss_avg<=loss_opt):
loss_opt = loss_avg
lr_opt = lr
encoded_space_dim_opt = encoded_space_dim
batch_size_opt = batch_size
### Print results
params_set = {
'lr':[lr],
'encoded_space_dim': [encoded_space_dim],
'batch_size': [batch_size],
}
print('SET OF PARAMETERS: ' + str(params_set))
print('AVG LOSS FOR THIS SET: ' + str(loss_avg))
params_set_opt = {
'lr':[lr_opt],
'encoded_space_dim': [encoded_space_dim_opt],
'batch_size': [batch_size_opt],
}
print('OPTIMAL SET OF PARAMETERS: ' + str(params_set_opt))
print('AVG LOSS FOR OPTIMAL SET: ' + str(loss_opt))
|
{"hexsha": "5e70ff5404a5c20dadecdf43cff02b25d9313dd3", "size": 8256, "ext": "py", "lang": "Python", "max_stars_repo_path": "autoencoders for digit reconstruction/code/hw4_crossval.py", "max_stars_repo_name": "gitBress/NNDL", "max_stars_repo_head_hexsha": "b73b42f9c421922626c6e74afdb71ff0c7cadc46", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "autoencoders for digit reconstruction/code/hw4_crossval.py", "max_issues_repo_name": "gitBress/NNDL", "max_issues_repo_head_hexsha": "b73b42f9c421922626c6e74afdb71ff0c7cadc46", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "autoencoders for digit reconstruction/code/hw4_crossval.py", "max_forks_repo_name": "gitBress/NNDL", "max_forks_repo_head_hexsha": "b73b42f9c421922626c6e74afdb71ff0c7cadc46", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.6324110672, "max_line_length": 125, "alphanum_fraction": 0.5744912791, "include": true, "reason": "import numpy", "num_tokens": 1944}
|
import sncosmo
from astropy.tests.helper import remote_data
@remote_data
def test_hst_bands():
""" check that the HST and JWST bands are accessible """
for bandname in ['f606w', 'uvf606w', 'f125w', 'f127m',
'f115w']: # jwst nircam
sncosmo.get_bandpass(bandname)
@remote_data
def test_jwst_miri_bands():
for bandname in ['f1130w']:
sncosmo.get_bandpass(bandname)
|
{"hexsha": "89ebd824dcaea02146d22d6c15d129bbbd1c20fe", "size": 418, "ext": "py", "lang": "Python", "max_stars_repo_path": "sncosmo/tests/test_builtins.py", "max_stars_repo_name": "rbiswas4/sncosmo", "max_stars_repo_head_hexsha": "813b707044fd21e8e35e7a1cdc650b48417f0ebc", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-03-27T09:46:46.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-27T09:46:46.000Z", "max_issues_repo_path": "sncosmo/tests/test_builtins.py", "max_issues_repo_name": "jasminelujia/sncosmo", "max_issues_repo_head_hexsha": "6ca3be6a52f7a096b874e181c21b93f711610f12", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sncosmo/tests/test_builtins.py", "max_forks_repo_name": "jasminelujia/sncosmo", "max_forks_repo_head_hexsha": "6ca3be6a52f7a096b874e181c21b93f711610f12", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-02-08T16:07:46.000Z", "max_forks_repo_forks_event_max_datetime": "2019-02-08T16:07:46.000Z", "avg_line_length": 23.2222222222, "max_line_length": 61, "alphanum_fraction": 0.6626794258, "include": true, "reason": "from astropy", "num_tokens": 131}
|
[STATEMENT]
lemma higher_plus: "higher (p + q) v = higher p v + higher q v"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. higher (p + q) v = higher p v + higher q v
[PROOF STEP]
by (rule poly_mapping_eqI, simp add: lookup_add lookup_higher)
|
{"llama_tokens": 97, "file": "Polynomials_MPoly_Type_Class_Ordered", "length": 1}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Includes the EPrime data source class."""
# TODO(janmtl): Provide an interface that deuglifies the _0 and channels in this
# interface
import pandas as pd
import io
import numpy as np
from data_source import DataSource
from schema import Schema, Or
from utils import merge_and_rename_columns
def _idem(x, pos, label_bin):
return x.values[0]
class EPrime(DataSource):
def __init__(self, config, schedule):
"""."""
# Call the parent class init
super(EPrime, self).__init__(config, schedule)
channels = self.config.keys()
channels.remove('ID')
channels.remove('Condition')
self.panels = {channel: {'VAL': _idem}
for channel in channels}
def load(self, file_paths):
"""Load Keyvalue-format edat file."""
with io.open(file_paths['samples'], 'r', encoding="utf-16") as kv_file:
raw = kv_file.read()
raw = raw.replace('\t', '')
raw = raw.replace('*** LogFrame End ***', '')
arr = raw.split('*** LogFrame Start ***')
frames = []
for frame in arr:
lines = frame.split('\n')
lines = [line.split(':', 1) for line in lines]
d = {line[0].strip(' '): line[1].strip(' ')
for line in lines
if len(line) == 2}
frames.append(d)
self.data['samples'] = pd.DataFrame.from_dict(frames)
def merge_data(self):
"""Clean the EPrime file data."""
# Assemble samples
self.data['samples'] = self._clean_samples(self.data['samples'])
self.data['samples'].loc[:, 'pos'] = True
# Assemble labels
self.data['labels'] = self.data['samples'].loc[:, ['ID', 'Condition']]
self.data['labels'].loc[:, 'Label'] = None
def create_label_bins(self, labels):
"""Construct the dummy label_bins dataframe."""
label_bins = labels
label_bins.loc[:, 'Order'] = labels.index.values - 1
label_bins.loc[:, 'Bin_Order'] = labels.index.values
label_bins.loc[:, 'Start_Time'] = labels.index.values
label_bins.loc[:, 'End_Time'] = labels.index.values + 1
label_bins.loc[:, 'Bin_Index'] = 0
return label_bins
def _clean_samples(self, samples):
"""
Create the columns from the keys and names that were passed into the
configuration.
"""
samples.replace("nan", np.nan, inplace=True)
for key, names in self.config.iteritems():
samples = merge_and_rename_columns(samples, key, names)
# Pick out only the columns of interest
samples = samples.loc[:, self.config.keys()]
# Pick out only the rows that are not all nan
samples.dropna(how='all', axis=0, inplace=True)
return samples
@staticmethod
def _validate_config(raw):
"""
Validate the label configuration dict passed to the Data Source.
Args:
raw (dict): must match the following schema
{'ID': key name or list of keys,
'Condition': key name or list of keys,
'Channels': list of keys}}
"""
schema = Schema({'ID': Or([str], str),
'Condition': Or([str], str),
str: Or([str], str)})
return schema.validate(raw)
@staticmethod
def _validate_schedule(raw):
"""
Validate the schedule configuration dict passed to the Data Source.
Args:
raw (dict): must match the following schema
{file_type (str): pattern (str)}
"""
schema = Schema({str: str})
return schema.validate(raw)
|
{"hexsha": "70d8b83a49c788693208e0072e637bbb67226a3b", "size": 3785, "ext": "py", "lang": "Python", "max_stars_repo_path": "pypsych/data_sources/eprime.py", "max_stars_repo_name": "janmtl/pypsych", "max_stars_repo_head_hexsha": "1c606342dbdb984bc06aa9fd26963f3ce0a378b1", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pypsych/data_sources/eprime.py", "max_issues_repo_name": "janmtl/pypsych", "max_issues_repo_head_hexsha": "1c606342dbdb984bc06aa9fd26963f3ce0a378b1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pypsych/data_sources/eprime.py", "max_forks_repo_name": "janmtl/pypsych", "max_forks_repo_head_hexsha": "1c606342dbdb984bc06aa9fd26963f3ce0a378b1", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4955752212, "max_line_length": 80, "alphanum_fraction": 0.5669749009, "include": true, "reason": "import numpy", "num_tokens": 837}
|
#include "readline_buffer.h"
#include <readline/readline.h>
#include <readline/history.h>
#include <iostream>
#include <boost/thread/mutex.hpp>
#include <boost/thread/lock_guard.hpp>
#include <boost/algorithm/string.hpp>
static bool same_as_last_line(const std::string&);
static void install_line_handler();
static void remove_line_handler();
static boost::mutex sync_mutex;
static rdln::linestatus line_stat;
static char *the_line;
namespace
{
rdln::readline_buffer* current = NULL;
}
rdln::suspend_readline::suspend_readline()
: m_buffer(NULL), m_restart(false)
{
m_buffer = current;
if(!m_buffer)
return;
m_restart = m_buffer->is_running();
if(m_restart)
m_buffer->stop();
}
rdln::suspend_readline::~suspend_readline()
{
if(!m_buffer)
return;
if(m_restart)
m_buffer->start();
}
std::vector<std::string>& rdln::readline_buffer::completion_commands()
{
static std::vector<std::string> commands = {"exit"};
return commands;
}
rdln::readline_buffer::readline_buffer()
: std::stringbuf(), m_cout_buf(NULL), m_prompt_length(0)
{
current = this;
}
void rdln::readline_buffer::start()
{
boost::lock_guard<boost::mutex> lock(sync_mutex);
if(m_cout_buf != NULL)
return;
m_cout_buf = std::cout.rdbuf();
std::cout.rdbuf(this);
install_line_handler();
}
void rdln::readline_buffer::stop()
{
boost::lock_guard<boost::mutex> lock(sync_mutex);
if(m_cout_buf == NULL)
return;
std::cout.rdbuf(m_cout_buf);
m_cout_buf = NULL;
remove_line_handler();
}
rdln::linestatus rdln::readline_buffer::get_line(std::string& line) const
{
boost::lock_guard<boost::mutex> lock(sync_mutex);
line_stat = rdln::partial;
if (!m_cout_buf)
{
line = "";
return rdln::full;
}
rl_callback_read_char();
if (line_stat == rdln::full)
{
line = the_line;
free(the_line);
the_line = NULL;
}
return line_stat;
}
void rdln::readline_buffer::set_prompt(const std::string& prompt)
{
boost::lock_guard<boost::mutex> lock(sync_mutex);
if(m_cout_buf == NULL)
return;
rl_set_prompt(std::string(m_prompt_length, ' ').c_str());
rl_redisplay();
rl_set_prompt(prompt.c_str());
rl_redisplay();
m_prompt_length = prompt.size();
}
void rdln::readline_buffer::add_completion(const std::string& command)
{
if(std::find(completion_commands().begin(), completion_commands().end(), command) != completion_commands().end())
return;
completion_commands().push_back(command);
}
const std::vector<std::string>& rdln::readline_buffer::get_completions()
{
return completion_commands();
}
int rdln::readline_buffer::sync()
{
boost::lock_guard<boost::mutex> lock(sync_mutex);
if (m_cout_buf == nullptr)
{
return -1;
}
#if RL_READLINE_VERSION < 0x0700
char lbuf[2] = {0,0};
char *line = NULL;
int end = 0, point = 0;
#endif
if (rl_end || (rl_prompt && *rl_prompt))
{
#if RL_READLINE_VERSION >= 0x0700
rl_clear_visible_line();
#else
line = rl_line_buffer;
end = rl_end;
point = rl_point;
rl_line_buffer = lbuf;
rl_end = 0;
rl_point = 0;
rl_save_prompt();
rl_redisplay();
#endif
}
do
{
m_cout_buf->sputc( this->sgetc() );
}
while ( this->snextc() != EOF );
#if RL_READLINE_VERSION < 0x0700
if (end || (rl_prompt && *rl_prompt))
{
rl_restore_prompt();
rl_line_buffer = line;
rl_end = end;
rl_point = point;
}
#endif
rl_on_new_line();
rl_redisplay();
return 0;
}
static void handle_line(char* line)
{
bool exit = false;
if (line)
{
line_stat = rdln::full;
the_line = line;
std::string test_line = line;
boost::trim_right(test_line);
if(!test_line.empty())
{
if (!same_as_last_line(test_line))
{
add_history(test_line.c_str());
history_set_pos(history_length);
}
if (test_line == "exit" || test_line == "q")
exit = true;
}
} else
/* EOF */
{
line_stat = rdln::empty;
exit = true;
}
rl_done = 1;
if (exit)
rl_set_prompt("");
return;
}
// same_as_last_line returns true, if the last line in the history is
// equal to test_line.
static bool same_as_last_line(const std::string& test_line)
{
// Note that state->offset == state->length, when a new line was entered.
HISTORY_STATE* state = history_get_history_state();
return state->length > 0
&& test_line.compare(state->entries[state->length-1]->line) == 0;
}
static char* completion_matches(const char* text, int state)
{
static size_t list_index;
static size_t len;
if(state == 0)
{
list_index = 0;
len = strlen(text);
}
const std::vector<std::string>& completions = rdln::readline_buffer::get_completions();
for(; list_index<completions.size(); )
{
const std::string& cmd = completions[list_index++];
if(cmd.compare(0, len, text) == 0)
{
return strdup(cmd.c_str());
}
}
return NULL;
}
static char** attempted_completion(const char* text, int start, int end)
{
rl_attempted_completion_over = 1;
return rl_completion_matches(text, completion_matches);
}
static void install_line_handler()
{
rl_attempted_completion_function = attempted_completion;
rl_callback_handler_install("", handle_line);
stifle_history(500);
}
static void remove_line_handler()
{
rl_replace_line("", 0);
rl_set_prompt("");
rl_redisplay();
rl_callback_handler_remove();
}
void rdln::clear_screen()
{
rl_clear_screen(0, 0);
}
|
{"hexsha": "1047d16968feffd23a0f5481cc701827c32ba584", "size": 5419, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "contrib/epee/src/readline_buffer.cpp", "max_stars_repo_name": "glazedtorus/monero", "max_stars_repo_head_hexsha": "105e2304a2b6344cbfeb382d0e8950de7f7385e0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7521.0, "max_stars_repo_stars_event_min_datetime": "2016-09-03T10:52:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T23:27:34.000Z", "max_issues_repo_path": "contrib/epee/src/readline_buffer.cpp", "max_issues_repo_name": "glazedtorus/monero", "max_issues_repo_head_hexsha": "105e2304a2b6344cbfeb382d0e8950de7f7385e0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4486.0, "max_issues_repo_issues_event_min_datetime": "2016-09-03T10:53:24.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T17:57:13.000Z", "max_forks_repo_path": "contrib/epee/src/readline_buffer.cpp", "max_forks_repo_name": "indigoPreserve55/monero", "max_forks_repo_head_hexsha": "440035e6184eaf5f0260eae3c82637c16b929db2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3851.0, "max_forks_repo_forks_event_min_datetime": "2016-09-03T15:44:54.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T00:53:22.000Z", "avg_line_length": 20.9227799228, "max_line_length": 115, "alphanum_fraction": 0.6748477579, "num_tokens": 1481}
|
[STATEMENT]
lemma rbt_sorted_param[param]:
"(ord.rbt_sorted,ord.rbt_sorted)\<in>(Rk\<rightarrow>Rk\<rightarrow>Id)\<rightarrow>\<langle>Rk,Rv\<rangle>rbt_rel\<rightarrow>Id"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (ord.rbt_sorted, ord.rbt_sorted) \<in> (Rk \<rightarrow> Rk \<rightarrow> bool_rel) \<rightarrow> \<langle>Rk, Rv\<rangle>rbt_rel \<rightarrow> bool_rel
[PROOF STEP]
unfolding ord.rbt_sorted_def[abs_def]
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>less. rec_rbt True (\<lambda>c l k v r la ra. ord.rbt_less less k l \<and> ord.rbt_greater less k r \<and> la \<and> ra), \<lambda>less. rec_rbt True (\<lambda>c l k v r la ra. ord.rbt_less less k l \<and> ord.rbt_greater less k r \<and> la \<and> ra)) \<in> (Rk \<rightarrow> Rk \<rightarrow> bool_rel) \<rightarrow> \<langle>Rk, Rv\<rangle>rbt_rel \<rightarrow> bool_rel
[PROOF STEP]
by (parametricity)
|
{"llama_tokens": 361, "file": "Collections_GenCF_Impl_Impl_RBT_Map", "length": 2}
|
"""v1 - Try just replicating some noiseless sweep data taking runs."""
from datetime import datetime
import time
import qsimcirq
import cirq
import numpy as np
from z2_sim.src.QuantumCircuits.Cirq_Code.Z2GaugeCirq import make_trotter_circuit
from z2_sim.src.QuantumCircuits.Cirq_Code import util
grid_sizes = [3, 4, 5]
N_TROTTER_STEPS = 20
EFIELD = 10
MFIELD = 1 / EFIELD
DT = 0.05
obc = True
N_THREADS = 8
FUSION = 4 # previously optimized
TRAJECTORIES = 10
sim_types = [
{'r': TRAJECTORIES, 'f': FUSION, 'g': True}, # GPU
{'r': TRAJECTORIES, 'f': FUSION, 't': N_THREADS}, # CPU
]
timestamp = datetime.today().strftime('%Y%m%d')
time_results = np.zeros((2, len(grid_sizes)))
for i, qsim_options in enumerate(sim_types):
if i == 0:
print("GPU:")
else:
print("CPU")
for j, n in enumerate(grid_sizes):
print("\t grid size: {}".format(n))
qsim_simulator = qsimcirq.QSimSimulator(qsim_options=qsim_options)
circuit, (source, ancilla) = make_trotter_circuit(n, n, n_timesteps=N_TROTTER_STEPS, Jcoup=EFIELD, Gamma=MFIELD, dt=DT, obc=obc, decompbasis='MS')
noisy_circuit = circuit.with_noise(cirq.phase_damp(0.01))
observables = util.make_all_plaq_observables(source, ancilla, circuit)
start = time.time()
expectation = qsim_simulator.simulate_expectation_values_sweep(noisy_circuit, observables=observables, params=None)[0]
end = time.time()
time_results[i, j] = end - start
print("\t", end - start)
np.save(f"results_gpu_bench_{timestamp}.npy", time_results)
|
{"hexsha": "005e28967db80b866f0e2845c106d4833887d5f3", "size": 1584, "ext": "py", "lang": "Python", "max_stars_repo_path": "benchmarks/gpu_bench_v1.py", "max_stars_repo_name": "Fermilab-Quantum-Science/Z2Sim-public", "max_stars_repo_head_hexsha": "dfbefffd933aa2e39a0cb9f668b424596dfa7d35", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-11-02T17:28:30.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-17T17:14:02.000Z", "max_issues_repo_path": "benchmarks/gpu_bench_v1.py", "max_issues_repo_name": "Fermilab-Quantum-Science/Z2Sim-public", "max_issues_repo_head_hexsha": "dfbefffd933aa2e39a0cb9f668b424596dfa7d35", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "benchmarks/gpu_bench_v1.py", "max_forks_repo_name": "Fermilab-Quantum-Science/Z2Sim-public", "max_forks_repo_head_hexsha": "dfbefffd933aa2e39a0cb9f668b424596dfa7d35", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-23T16:12:16.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T16:12:16.000Z", "avg_line_length": 32.3265306122, "max_line_length": 154, "alphanum_fraction": 0.6919191919, "include": true, "reason": "import numpy", "num_tokens": 482}
|
from __future__ import print_function
import argparse
import os
import numpy as np
from datetime import datetime
#import matplotlib
#import matplotlib.pyplot as plt
from astrometry.util.fits import fits_table, merge_tables
def ccd_cuts_inplace(ccds, use_blacklist=True):
# coadd/ccd.fits file has extra white space
ccds.camera= np.char.strip(ccds.camera)
bands= ['g','r','z']
# Continue like runbrick
from legacypipe.runs import get_survey
survey= get_survey('dr4v2',survey_dir=os.getenv('LEGACY_SURVEY_DIR'),output_dir='./')
if use_blacklist:
I = survey.apply_blacklist(ccds)
ccds.cut(I)
# Sort images by band -- this also eliminates images whose
# *filter* string is not in *bands*.
ccds.cut(np.hstack([np.flatnonzero(ccds.filter==band) for band in bands]))
I = survey.photometric_ccds(ccds)
ccds.cut(I)
I = survey.bad_exposures(ccds)
ccds.cut(I)
I = survey.other_bad_things(ccds)
ccds.cut(I)
I = survey.ccds_for_fitting(brick, ccds)
if I is not None:
ccds.cut(I)
def get_dr4b_drc4_dicts(rint):
'''rint -- randon int betwen 0 and len of tractor catalogue'''
#from collections import defaultdict
#B= defaultdict(lambda: defaultdict(dict))
#C= defaultdict(lambda: defaultdict(dict))
B,C= dict(),dict()
B['decam_flux']= (1,rint)
B['decam_flux_ivar']= (1,rint)
B['decam_rchi2']= (1,rint)
B['decam_allmask']= (1,rint)
B['decam_depth']= (1,rint)
B['wise_flux']= (0,rint)
B['wise_flux_ivar']= (0,rint)
B['wise_rchi2']= (0,rint)
B['wise_mask']= (0,rint)
B['decam_apflux']= (1,4,rint)
B['decam_apflux_ivar']= (1,4,rint)
mapper= dict(decam_flux= 'flux_g',
decam_flux_ivar= 'flux_ivar_g',
decam_rchi2= 'rchisq_g',
decam_allmask= 'allmask_g',
decam_depth= 'psfdepth_g',
wise_flux= 'flux_w1',
wise_flux_ivar= 'flux_ivar_w1',
wise_rchi2= 'rchisq_w1',
wise_mask= 'wisemask_w1',
decam_apflux= 'apflux_g',
decam_apflux_ivar= 'apflux_ivar_g'
)
C['flux_g']= (rint,)
C['flux_ivar_g']= (rint,)
C['rchisq_g']= (rint,)
C['allmask_g']= (rint,)
C['psfdepth_g']= (rint,)
C['flux_w1']= (rint,)
C['flux_ivar_w1']= (rint,)
C['rchisq_w1']= (rint,)
C['wisemask_w1']= (rint,)
C['apflux_g']= (4,rint)
C['apflux_ivar_g']= (4,rint)
return B,C,mapper
def parse_coords(s):
'''stackoverflow:
https://stackoverflow.com/questions/9978880/python-argument-parser-list-of-list-or-tuple-of-tuples'''
try:
x, y = map(int, s.split(','))
return x, y
except:
raise argparse.ArgumentTypeError("Coordinates must be x,y")
parser = argparse.ArgumentParser(description='Generate a legacypipe-compatible CCDs file from a set of reduced imaging.')
parser.add_argument('--dowhat', choices=['dr4_stats','blobs','dr4c_vs_dr4b','bricks_notdone','time_per_brick','nersc_time','sanity_tractors','num_grz','badastrom','count_objects'],action='store', default=True)
parser.add_argument('--fn', action='store', default=False)
parser.add_argument('--line_start', type=int,default=None, help='first line in fn list to use')
parser.add_argument('--line_end', type=int,default=None, help='last line in fn list to use')
args = parser.parse_args()
if args.dowhat == 'dr4_stats':
# RUN: python job_accounting.py --dowhat sanity_tractors --fn dr4_tractors_done.tmp
#bricks with Inf,Nans (N total, print to file which cols and whether Inf or Nan)
#total number sources,
#N in primary brick, psf, simp, exp, dev, comp
ncols= 165
# RUN: python job_accounting.py --dowhat sanity_tractors --fn dr4_tractors_done.tmp
fns=np.loadtxt(args.fn,dtype=str)
assert(len(fns) > 0)
print(args)
if args.line_start and args.line_end:
fns= fns[args.line_start:args.line_end+1]
# Remove file lists for clean slate
suff= '%d_%d' % (args.line_start,args.line_end)
fils= dict(readerr='%s_readerr_%s.txt' % (args.dowhat,suff),
stats='%s_stats_%s.txt' % (args.dowhat,suff),
infnan='%s_infnan_%s.txt' % (args.dowhat,suff))
for key,outfn in fils.items():
print(outfn)
if os.path.exists(outfn):
os.remove(outfn)
# Loop over completed Tractor Cats
for ith,fn in enumerate(fns):
if ith % 100 == 0: print('%d/%d' % (ith+1,len(fns)))
brick= os.path.basename(fn)
brick= brick.replace('tractor-','').replace('.fits','')
try:
t=fits_table(fn)
except:
# Report any read errors
print('error reading %s' % fn)
with open(fils['readerr'],'a') as foo:
foo.write('%s\n' % fn)
# Book keeping
bk={}
bk['rows']= len(t)
bk['cols']= len(t.get_columns())
bk['prim']= len(t[t.brick_primary])
typ= np.char.strip(t.type)
models= ['psf','simp','exp','dev','comp']
for model in models:
bk[model]= len(t[typ == model.upper()])
# Write: brick, rows, cols, prim, model 1, 2, 3, 4, 5, blobs
line= '%s %d %d %d' % (brick,bk['rows'],bk['cols'],bk['prim'])
for model in models:
line += ' %d' % bk[model]
line += '\n'
with open(fils['stats'],'a') as foo:
foo.write(line)
# Write Infs/Nans to file
for col in t.get_columns():
try:
ind= np.isfinite(t.get(col)) == False
# This col has a non-finite value
if np.any(ind):
with open(fils['infnan'],'a') as foo:
foo.write('%s %s %f\n' % (brick,col,t.get(col)[ind][0]))
except TypeError:
# np.isfinite cannot be applied to these data types
if col in ['brickname','type','wise_coadd_id']:
pass
# report col if this error occurs for a col not in the above
else:
with open(fils['unexpectedcol'],'a') as foo:
foo.write('%s %s\n' % (brick,col))
elif args.dowhat == 'blobs':
# RUN: python job_accounting.py --dowhat sanity_tractors --fn dr4_tractors_done.tmp
# %sources affected by galdepth (694 pixels)
fns=np.loadtxt(args.fn,dtype=str)
assert(len(fns) > 0)
print(args)
if args.line_start and args.line_end:
fns= fns[args.line_start:args.line_end+1]
# Remove file lists for clean slate
suff= '%d_%d' % (args.line_start,args.line_end)
fils= dict(readerr='%s_readerr_%s.txt' % (args.dowhat,suff),
blobs='%s_blobs_%s.txt' % (args.dowhat,suff))
for outfn in fils.keys():
if os.path.exists(outfn):
os.remove(outfn)
# Loop over completed Tractor Cats
for ith,fn in enumerate(fns):
if ith % 100 == 0: print('%d/%d' % (ith+1,len(fns)))
brick= os.path.basename(fn)
brick= brick.replace('tractor-','').replace('.fits','')
try:
t=fits_table(fn)
except:
# Report any read errors
print('error reading %s' % fn)
with open(fils['readerr'],'a') as foo:
foo.write('%s\n' % fn)
# Large blobs
SZ= 694
has_blob= np.any((t.blob_width > SZ,t.blob_height > SZ),axis=0)
nblobs= np.where(has_blob)[0].size
# Primary
nblobs_prim= np.where( (has_blob)*(t.brick_primary) )[0].size
rows= len(t)
rows_prim= len(t[t.brick_primary])
with open(fils['blobs'],'a') as foo:
foo.write('%s %d %d %d %d\n' % (brick,rows_prim,rows,nblobs,nblobs_prim))
elif args.dowhat == 'dr4c_vs_dr4b':
ncols= dict(dr4c=165,dr4b=73)
# RUN: python job_accounting.py --dowhat dr4c_vs_dr4b --fn dr4_tractors_done.tmp
fns=np.loadtxt(args.fn,dtype=str)
assert(len(fns) > 0)
print(args)
if args.line_start and args.line_end:
fns= fns[args.line_start:args.line_end]
# Remove file lists for clean slate
fils= dict(readerr='%s_readerr.txt' % args.dowhat,
ncolswrong='%s_ncolswrong.txt' % args.dowhat,
wrongvals='%s_wrongvals.txt' % args.dowhat)
if args.line_start and args.line_end:
suff= '%d_%d' % (args.line_start, args.line_end)
fils= dict(readerr='%s_readerr_%s.txt' % (args.dowhat,suff),
ncolswrong='%s_ncolswrong_%s.txt' % (args.dowhat,suff),
wrongvals='%s_wrongvals_%s.txt' % (args.dowhat,suff))
for outfn in fils.keys():
if os.path.exists(outfn):
os.remove(outfn)
# Loop over completed Tractor Cats
for ith,fn_c in enumerate(fns):
if ith % 100 == 0: print('%d/%d' % (ith+1,len(fns)))
try:
c=fits_table(fn_c)
fn_b= fn_c.replace('/global/projecta/projectdirs/cosmo/work/dr4c',
'/global/cscratch1/sd/desiproc/dr4/data_release/dr4_fixes')
b=fits_table(fn_b)
except:
# Report any read errors
print('error reading %s OR %s ' % (fn_c,fn_b))
with open(fils['readerr'],'a') as foo:
foo.write('%s\n' % fn_c)
# Number of columns
if len(c.get_columns()) != ncols['dr4c']:
with open(fils['ncolswrong'],'a') as foo:
foo.write('%s %d\n' % (fn_c,len(c.get_columns())))
if len(b.get_columns()) != ncols['dr4b']:
with open(fils['ncolswrong'],'a') as foo:
foo.write('%s %d\n' % (fn_b,len(b.get_columns())))
# Decimal values DR4b to c
rint= np.random.randint(low=0,high=len(c),size=1)[0]
B,C,mapper= get_dr4b_drc4_dicts(rint)
for key in B.keys():
# DR4b value
tup= B[key]
if len(tup) == 2:
iband,i= tup
data_b= b.get(key)[i,iband]
elif len(tup) == 3:
iband,iap,i= tup
data_b= b.get(key)[i,iband,iap]
# DR4c value
key_c= mapper[key]
tup= C[key_c]
if len(tup) == 1:
i,= tup
data_c= c.get(key_c)[i]
elif len(tup) == 2:
iap,i= tup
data_c= c.get(key_c)[i,iap]
#print('%s|%s %.2f|%.2f for index=%d' % (key,key_c,data_b,data_c,rint))
if data_b != data_c:
with open(fils['wrongvals'],'a') as foo:
foo.write('%f %f %s %s %s %s\n' % (data_b,data_c,key,key_c,fn_b,fn_c))
elif args.dowhat == 'time_per_brick':
fns,start1,start2,end1,end2=np.loadtxt(args.fn,dtype=str,unpack=True)
# Remove extraneous digits
# Handle single log file
if type(fns) == np.string_:
fns= [fns]
start1= [start1]
end1= [end1]
start2= [start2]
end2= [end2]
start2=np.array([val[:11] for val in start2])
end2=np.array([val[:11] for val in end2])
sumdt=0
out=args.fn.replace('startend.txt','dt.txt')
with open(out,'w') as foo:
foo.write('# %s\n' % (os.path.dirname(args.fn),) )
for fn,s1,s2,e1,e2 in zip(fns,start1,start2,end1,end2):
name=os.path.basename(fn)
start=datetime.strptime("%s %s" % (s1,s2), "%Y-%m-%d %H:%M:%S.%f")
end=datetime.strptime("%s %s" % (e1,e2), "%Y-%m-%d %H:%M:%S.%f")
# Write dt to file
dt= end - start
dt= dt.total_seconds() / 60.
foo.write('%s %s\n' % (name,dt) )
# Sum
sumdt+= dt
foo.write('# Total(min) %.1f\n' % sumdt)
print('Wrote %s' % out)
elif args.dowhat == 'nersc_time':
def time_hist(hrs,name='hist.png'):
print('Making hist')
bins=np.linspace(0,2,num=100)
fig,ax= plt.subplots()
ax.hist(hrs,bins=bins)
# Median and 90% of everything
med=np.percentile(hrs,q=50)
ax.plot([med]*2,ax.get_ylim(),'r--')
print('med=%f' % med)
xlab=ax.set_xlabel('Hours (Wall Clock)')
#ax.set_xlim(xlim)
plt.savefig(name)
plt.close()
print('Wrote %s' % name)
def nersc_time(hrs):
tot= np.sum(hrs)
nersc= 2*2*24*np.sum(hrs) # 24 cores/node, 2x queue factor, 2x machine factor
print('total time nodes used [hrs]=%f' % tot)
print('total NERSC time [hrs]=%f' % nersc)
out='bricks_time_don.txt'
if not os.path.exists(out):
bricks,dt=np.loadtxt('bricks_time.txt',dtype=str,unpack=True)
dt= dt.astype(float)
don=np.loadtxt('don.txt',dtype=str)
data={}
with open(out,'w') as foo:
print('Looping over don bricks')
for b in don:
data[b]= dt[bricks == b]
if len( data[b] ) == 1:
foo.write('%s %s\n' % (b,str(data[b][0])) )
print('Wrote %s' % out)
# ALL DR4 bricks
bricks,dt=np.loadtxt('bricks_time.txt',dtype=str,unpack=True)
hrs=dt.astype(float)/3600
time_hist(hrs,name='hist_bricks_all.png')
print('ALL bricks')
nersc_time(hrs)
# Finished bricks
bricks,dt=np.loadtxt(out,dtype=str,unpack=True)
hrs= dt.astype(float)/3600.
time_hist(hrs,name='hist_bricks_done.png')
print('Done bricks')
nersc_time(hrs)
elif args.dowhat == 'sanity_tractors':
# RUN: python job_accounting.py --dowhat sanity_tractors --fn dr4_tractors_done.tmp
# Read each finished Tractor Catalogue
# Append name to file if:
# -- error reading it
# -- no wise flux
fns=np.loadtxt(args.fn,dtype=str)
assert(len(fns) > 0)
# Remove file lists for clean slate
fils= dict(readerr='sanity_tractors_readerr.txt',\
nowise='sanity_tractors_nowise.txt',\
nolc='sanity_tractors_nolc.txt',\
badastrom='sanity_tractors_badastromccds.txt',\
ccds='sanity_tractors_hascutccds.txt')
for outfn in fils.keys():
if os.path.exists(outfn):
os.remove(outfn)
# Loop over completed Tractor Cats
for fn in fns:
try:
t=fits_table(fn)
except:
print('error reading %s' % fn)
with open(fils['readerr'],'a') as foo:
foo.write('%s\n' % fn)
# No wise info
if not 'wise_flux' in t.get_columns():
print('wise_flux not in %s' % fn)
with open(fils['nowise'],'a') as foo:
foo.write('%s\n' % fn)
elif not 'wise_lc_flux' in t.get_columns():
print('wise LCs not in %s' % fn)
with open(fils['nolc'],'a') as foo:
foo.write('%s\n' % fn)
# CCDs
# tractor/120/tractor-1201p715.fits
# coadd/120/1201p715/legacysurvey-1201p715-ccds.fits
brick= os.path.basename(fn).replace('tractor-','').replace('.fits','')
ccdfn= os.path.join( os.path.dirname(fn).replace('tractor','coadd'),\
brick, \
'legacysurvey-%s-ccds.fits' % brick )
ccds= fits_table(ccdfn)
# Bad Astrometry
flag= (np.sqrt(ccds.ccdrarms**2 + ccds.ccddecrms**2) >= 0.1)*(ccds.ccdphrms >= 0.1)
if np.where(flag)[0].size > 0:
with open(fils['badastrom'],'a') as foo:
foo.write('%s\n' % ccdfn)
#ccds2= ccds.copy()
#ccd_cuts_inplace(ccds)
#if len(ccds) != len(ccds2):
# print('has ccds that should have been removed: %s' % ccdfn)
# with open(fils['ccds'],'a') as foo:
# foo.write('%s\n' % ccdfn)
elif args.dowhat == 'badastrom':
# RUN: python job_accounting.py --dowhat sanity_tractors --fn dr4_tractors_done.tmp
# Read each finished Tractor Catalogue
# Append name to file if:
# -- error reading it
# -- no wise flux
bricks=np.loadtxt(args.fn,dtype=str)
assert(len(bricks) > 0)
# Remove file lists for clean slate
fils= dict(readerr='sanity_ccds_readerr.txt',\
badastrom='sanity_ccds_have_badastrom.txt')
for outfn in fils.keys():
if os.path.exists(outfn):
os.remove(outfn)
for cnt,brick in enumerate(bricks):
if cnt % 100 == 0: print('reading %d/%d' % (cnt,len(bricks)))
bri= brick[:3]
fn= '/scratch1/scratchdirs/desiproc/DRs/data-releases/dr4_fixes/coadd/%s/%s/legacysurvey-%s-ccds.fits' % (bri,brick,brick)
try:
ccds=fits_table(fn)
# Bad Astrometry
flag= np.any((np.sqrt(ccds.ccdrarms**2 + ccds.ccddecrms**2) > 0.1,
ccds.ccdphrms > 0.2), axis=0)
if np.where(flag)[0].size > 0:
with open(fils['badastrom'],'a') as foo:
foo.write('%s %s\n' % (brick,fn))
except:
print('error reading %s' % fn)
with open(fils['readerr'],'a') as foo:
foo.write('%s\n' % fn)
elif args.dowhat == 'count_objects':
# RUN: python job_accounting.py --dowhat sanity_tractors --fn dr4_tractors_done.tmp
# Read each finished Tractor Catalogue
# Append name to file if:
# -- error reading it
# -- no wise flux
fns=np.loadtxt(args.fn,dtype=str)
assert(len(fns) > 0)
# Remove file lists for clean slate
fils= dict(readerr='sanity_tractors_readerr.txt',\
counts='sanity_tractors_counts.txt')
for outfn in fils.keys():
if os.path.exists(outfn):
os.remove(outfn)
# Loop over completed Tractor Cats
for fn in fns:
try:
t=fits_table(fn)
except:
print('error reading %s' % fn)
with open(fils['readerr'],'a') as foo:
foo.write('%s\n' % fn)
with open(fils['counts'],'a') as foo:
foo.write('%d %s\n' % (len(t),fn))
elif args.dowhat == 'num_grz':
# record number of grz for a list of bricks
bricklist= np.loadtxt= np.loadtxt(args.fn,dtype=str)
nccds= []
d={}
for i in range(1,10,2):
d['%d <= grz' % i]=[]
for cnt,brick in enumerate(bricklist):
if cnt % 100 == 0:
print('Reading %d/%d' % (cnt+1,len(bricklist)))
# Try reading
try:
fn= "/scratch1/scratchdirs/desiproc/DRs/data-releases/dr4/coadd/%s/%s/legacysurvey-%s-ccds.fits" % (brick[:3],brick,brick)
#fn= "/scratch1/scratchdirs/desiproc/DRs/data-releases/dr4_fixes/coadd/%s/%s/legacysurvey-%s-ccds.fits" % (brick[:3],brick,brick)
a=fits_table(fn)
except IOError:
print('Cannot find %s' % fn)
continue
# Save info
nccds.append( len(a) )
for i in range(1,10,2):
g= np.where(a.filter == 'g')[0].size
r= np.where(a.filter == 'r')[0].size
z= np.where(a.filter == 'z')[0].size
if (g >= i) & (r >= i) & (z >= i):
d['%d <= grz' % i].append( brick )
# Print
for i in range(1,10,2):
key= '%d <= grz' % i
print('%d bricks with %s' % (len(d[key]),key))
nccds= np.array(nccds)
inds= np.argsort(nccds)[::-1]
for brick,ccd in zip(bricklist[inds][:10],nccds[inds][:10]):
print('%d ccds in %s' % (ccd,brick))
with open('grz_ge_1.txt','w') as foo:
for brick in d['1 <= grz']:
foo.write('%s\n' % brick)
print('wrote %s' % 'grz_ge_1.txt')
|
{"hexsha": "844eb9febf40e64f6b44df89d4648fa3ba1510cb", "size": 19494, "ext": "py", "lang": "Python", "max_stars_repo_path": "bin/dr4/job_accounting.py", "max_stars_repo_name": "manera/legacypipe", "max_stars_repo_head_hexsha": "64dfe164fe1def50f5ad53784edd9a63321b0d45", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 32, "max_stars_repo_stars_event_min_datetime": "2015-08-25T00:25:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-04T06:35:54.000Z", "max_issues_repo_path": "bin/dr4/job_accounting.py", "max_issues_repo_name": "manera/legacypipe", "max_issues_repo_head_hexsha": "64dfe164fe1def50f5ad53784edd9a63321b0d45", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 644, "max_issues_repo_issues_event_min_datetime": "2015-07-08T16:26:28.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T19:09:10.000Z", "max_forks_repo_path": "bin/dr4/job_accounting.py", "max_forks_repo_name": "manera/legacypipe", "max_forks_repo_head_hexsha": "64dfe164fe1def50f5ad53784edd9a63321b0d45", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 22, "max_forks_repo_forks_event_min_datetime": "2015-08-24T18:27:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-04T03:10:42.000Z", "avg_line_length": 40.1111111111, "max_line_length": 210, "alphanum_fraction": 0.5596080845, "include": true, "reason": "import numpy", "num_tokens": 5720}
|
# This script process the simulation data using non-linear least squares fitting
# for both the original Reference Region Model (NRRM) and the Constrained approach (CNRRM)
# Run this script by launching julia then entering: include("path/to/e01_2a_simProcessNLSQ.jl")
# Estimated runtimes:
# ~2.6 hours for NRRM
# ~1.2 hours for CNRRM
# So total run time is almost 4 hours
# Note: These runtimes were obtained using 4 threads.
# If only a single core is used, then the runtime could be 4 times longer
# Pick the choice of parameters for the reference tissue parameters
# (This should match the choice from step e01_1)
refName = "refY"
# The temporal resolution (in seconds) to process over
listTRes = [1,5,10,15,30,60]
# The choice of Contrast-Noise Ratios
# (This does not change noise, rather it is only here to remind the code what the CNRs are. Should match values from simMaker.)
listCNR = collect(5:5:50)
# First, figure out the current file path
curFilePath = Base.source_path() # Path to current file
workDir = dirname(curFilePath) # Our working directory (directory of current file)
## If the above doesn't work, then user has to manually enter the location of the CRRM directory
## i.e. wordDir = "/path/to/CLRRM/Directory"
import Hwloc
topology = Hwloc.topology_load()
counts = Hwloc.histmap(topology)
nCores = counts[:Core]
if nprocs()<nCores
addprocs(nCores-nprocs()+1)
end
## Load packages
using DCEMRI # Needed for the levenberg-marquardt fitting
using MAT # Needed for loading/saving .mat files
# Some required directories/files
matDir = workDir * "/data/simData/$refName/rawData"
warmupFile = workDir * "/data/QIBA-ToftsV6/QIBAv6-Mini4Jl-Warmup.mat"
auxCode = workDir * "/jlfiles/auxCode.jl"
refRegCode = workDir * "/jlfiles/refRegionFunction.jl"
outDir = workDir * "/data/simData/$refName/NRRM"
# Make the output directory if it doesn't already exist
if !(isdir(outDir))
mkdir(outDir)
end
# Load pre-requisite code
include(auxCode)
## Warmup run to make sure all is well
println("==== Warmup Run ====")
results = fitdata(datafile=warmupFile, models=[2])
rm("output.mat") # Delete output since it isn't needed
# If warmup succeeds, then proceed by loading Reference Region Model
include(refRegCode)
# Define the reference tissue parameters
# Default is 'refY'
refKTrans = 0.1;
refVe = 0.1;
if (refName == "refWS")
refKTrans = 0.07;
refVe = 0.14;
elseif (refName == "refP")
refKTrans = 0.137;
refVe = 0.115;
end
refKep = refKTrans/refVe
# Get the names of the mat files containing simulated data
matFiles = readdir(matDir)
numFiles = length(matFiles)
println("")
println("===============================")
println("==== Start processing data ====")
# Loop through the simulated datasets
for q=1:numFiles
# Load the simulated data for each CNR
curCNR = listCNR[q]
println("")
println("------------")
println("Processing CNR = $curCNR")
curFile = matDir * "/Sim-CNR-$curCNR.mat"
matData = matread(curFile)
cnrInd = listCNR[q]
for i=1:length(listTRes)
# Appropriately downsample simulated data to obtain desired temporal resolutions
curTRes = listTRes[i]
println("")
println("---")
println("Temporal Resolution = $curTRes s")
# Downsample the data
Ct = downsample(matData["CtNoisy"],curTRes)
Crr = downsample(vec(matData["CrrClean"]),curTRes)
t = downsample(vec(matData["T"]),curTRes)
# Build a dummy mask (ones everywhere)
(nT, nV) = size(Ct)
# Non-Linear Reference Region Model
# (Unconstrained and Constrained are both run if estKepRR is set to zero)
(pkParamsCN, residCN, medianKepRR, pkParamsN, residN, runtimeN, runtimeC) = fitCNRRM(Ct, Crr, t, estKepRR=0.0, doTime=true)
# Output results
results = Dict()
results["pkParamsN"] = pkParamsN
results["residN"] = residN
results["pkParamsCN"] = pkParamsCN
results["residCN"] = residCN
results["runtimeN"] = runtimeN
results["runtimeC"] = runtimeC
results["medianKepRR"] = medianKepRR
matwrite(outDir * "/Sim-CNR-$curCNR-TRes-$curTRes.mat", results)
end
end
println("==== Finished ====")
println("==================")
|
{"hexsha": "0681020f638e4a85221b797b2fec1c10a1828727", "size": 4137, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "e01_2a_simProcessNLSQ.jl", "max_stars_repo_name": "MPUmri/CRRM", "max_stars_repo_head_hexsha": "75249121d7c98c57b8b0c01013e86963707113e9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-04-24T11:14:32.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-24T11:14:32.000Z", "max_issues_repo_path": "e01_2a_simProcessNLSQ.jl", "max_issues_repo_name": "MPUmri/CRRM", "max_issues_repo_head_hexsha": "75249121d7c98c57b8b0c01013e86963707113e9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-08-21T02:30:08.000Z", "max_issues_repo_issues_event_max_datetime": "2019-08-21T02:30:08.000Z", "max_forks_repo_path": "e01_2a_simProcessNLSQ.jl", "max_forks_repo_name": "MPUmri/CRRM", "max_forks_repo_head_hexsha": "75249121d7c98c57b8b0c01013e86963707113e9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-09-07T16:14:07.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-07T16:14:07.000Z", "avg_line_length": 33.3629032258, "max_line_length": 127, "alphanum_fraction": 0.7067923616, "num_tokens": 1204}
|
[STATEMENT]
lemma lset_llist_of_tllist [simp]:
"lset (llist_of_tllist xs) = tset xs" (is "?lhs = ?rhs")
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. lset (llist_of_tllist xs) = tset xs
[PROOF STEP]
proof(intro set_eqI iffI)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>x. x \<in> lset (llist_of_tllist xs) \<Longrightarrow> x \<in> tset xs
2. \<And>x. x \<in> tset xs \<Longrightarrow> x \<in> lset (llist_of_tllist xs)
[PROOF STEP]
fix x
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>x. x \<in> lset (llist_of_tllist xs) \<Longrightarrow> x \<in> tset xs
2. \<And>x. x \<in> tset xs \<Longrightarrow> x \<in> lset (llist_of_tllist xs)
[PROOF STEP]
assume "x \<in> ?lhs"
[PROOF STATE]
proof (state)
this:
x \<in> lset (llist_of_tllist xs)
goal (2 subgoals):
1. \<And>x. x \<in> lset (llist_of_tllist xs) \<Longrightarrow> x \<in> tset xs
2. \<And>x. x \<in> tset xs \<Longrightarrow> x \<in> lset (llist_of_tllist xs)
[PROOF STEP]
thus "x \<in> ?rhs"
[PROOF STATE]
proof (prove)
using this:
x \<in> lset (llist_of_tllist xs)
goal (1 subgoal):
1. x \<in> tset xs
[PROOF STEP]
by(induct "llist_of_tllist xs" arbitrary: xs rule: llist_set_induct)(auto simp: tllist.set_sel(2))
[PROOF STATE]
proof (state)
this:
x \<in> tset xs
goal (1 subgoal):
1. \<And>x. x \<in> tset xs \<Longrightarrow> x \<in> lset (llist_of_tllist xs)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. x \<in> tset xs \<Longrightarrow> x \<in> lset (llist_of_tllist xs)
[PROOF STEP]
fix x
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. x \<in> tset xs \<Longrightarrow> x \<in> lset (llist_of_tllist xs)
[PROOF STEP]
assume "x \<in> ?rhs"
[PROOF STATE]
proof (state)
this:
x \<in> tset xs
goal (1 subgoal):
1. \<And>x. x \<in> tset xs \<Longrightarrow> x \<in> lset (llist_of_tllist xs)
[PROOF STEP]
thus "x \<in> ?lhs"
[PROOF STATE]
proof (prove)
using this:
x \<in> tset xs
goal (1 subgoal):
1. x \<in> lset (llist_of_tllist xs)
[PROOF STEP]
proof(induct rule: tllist_set_induct)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>xs. \<not> is_TNil xs \<Longrightarrow> thd xs \<in> lset (llist_of_tllist xs)
2. \<And>xs y. \<lbrakk>\<not> is_TNil xs; y \<in> tset (ttl xs); y \<in> lset (llist_of_tllist (ttl xs))\<rbrakk> \<Longrightarrow> y \<in> lset (llist_of_tllist xs)
[PROOF STEP]
case (find xs)
[PROOF STATE]
proof (state)
this:
\<not> is_TNil xs
goal (2 subgoals):
1. \<And>xs. \<not> is_TNil xs \<Longrightarrow> thd xs \<in> lset (llist_of_tllist xs)
2. \<And>xs y. \<lbrakk>\<not> is_TNil xs; y \<in> tset (ttl xs); y \<in> lset (llist_of_tllist (ttl xs))\<rbrakk> \<Longrightarrow> y \<in> lset (llist_of_tllist xs)
[PROOF STEP]
thus ?case
[PROOF STATE]
proof (prove)
using this:
\<not> is_TNil xs
goal (1 subgoal):
1. thd xs \<in> lset (llist_of_tllist xs)
[PROOF STEP]
by(cases xs) auto
[PROOF STATE]
proof (state)
this:
thd xs \<in> lset (llist_of_tllist xs)
goal (1 subgoal):
1. \<And>xs y. \<lbrakk>\<not> is_TNil xs; y \<in> tset (ttl xs); y \<in> lset (llist_of_tllist (ttl xs))\<rbrakk> \<Longrightarrow> y \<in> lset (llist_of_tllist xs)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>xs y. \<lbrakk>\<not> is_TNil xs; y \<in> tset (ttl xs); y \<in> lset (llist_of_tllist (ttl xs))\<rbrakk> \<Longrightarrow> y \<in> lset (llist_of_tllist xs)
[PROOF STEP]
case step
[PROOF STATE]
proof (state)
this:
\<not> is_TNil xs_
y_ \<in> tset (ttl xs_)
y_ \<in> lset (llist_of_tllist (ttl xs_))
goal (1 subgoal):
1. \<And>xs y. \<lbrakk>\<not> is_TNil xs; y \<in> tset (ttl xs); y \<in> lset (llist_of_tllist (ttl xs))\<rbrakk> \<Longrightarrow> y \<in> lset (llist_of_tllist xs)
[PROOF STEP]
thus ?case
[PROOF STATE]
proof (prove)
using this:
\<not> is_TNil xs_
y_ \<in> tset (ttl xs_)
y_ \<in> lset (llist_of_tllist (ttl xs_))
goal (1 subgoal):
1. y_ \<in> lset (llist_of_tllist xs_)
[PROOF STEP]
by(auto simp add: ltl_llist_of_tllist[symmetric] simp del: ltl_llist_of_tllist dest: in_lset_ltlD)
[PROOF STATE]
proof (state)
this:
y_ \<in> lset (llist_of_tllist xs_)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
x \<in> lset (llist_of_tllist xs)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1996, "file": "Coinductive_TLList", "length": 19}
|
program unum_type_conversion_test
use ISO_FORTRAN_ENV, only : REAL64
use unum_env_mod
use unum_to_float_mod
use real_to_unum_mod
use display_unum_as_real_mod
use display_masks_mod
use unum_t_mod
use display_env_mod, only : Display_unum => Display_env,&
Display_posinfu,&
Display_neginfu
implicit none
real (REAL64) :: real_temp
real (REAL64) :: temp_result
type (unum_t) :: unum_temp
!integer (INT64) :: unum_pi = b'011001001000011111101111' !(1,4) env
call Set_unum_env (4,5)
!print *, 'esize mask'
!call Display_esizemask ()
!print *, 'fsize mask'
!call Display_fsizemask ()
!print *, 'efsize mask'
!call Display_efsizemask ()
!print *, 'ubit mask'
!call Display_ubitmask ()
!print *, 'utag mask'
!call Display_utagmask ()
print *, 'Max ubits = ', Get_maxubits ()
print *, 'Maxreal = ', Get_maxreal ()
print *, 'smallnormal'
print *, U2f (Get_smallnormalu ())
!print *, 'Approximate Pi'
!print *, '(', u2f (unum_pi), ',', u2f (unum_pi + Get_ulpu ()), ')'
print *, 'Positive infinity'
call Display_posinfu ()
print *, U2f (Get_posinfu ())
!print *, 'Signbigu'
!call Display_unum (Get_signbigu ())
print *, 'Negative infinity'
call Display_neginfu ()
print *, U2f (Get_neginfu ())
real_temp = 16.12
print *, 'Value in double precision is: ', real_temp
print *, 'Convert to unum'
unum_temp = X2u (real_temp)
call Display_unum (unum_temp)
print *, 'Convert back to real'
call Display_unum_as_real (unum_temp)
end program unum_type_conversion_test
|
{"hexsha": "aa8cab7fc4b1513e7fc1220d649ef07c909f6250", "size": 1616, "ext": "f08", "lang": "FORTRAN", "max_stars_repo_path": "unum_type_conversion_test.f08", "max_stars_repo_name": "CrashBurnRepeat/fUnums", "max_stars_repo_head_hexsha": "ef0b621bc0aab5d7915f6c1366d671f02293af30", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2017-05-10T08:27:17.000Z", "max_stars_repo_stars_event_max_datetime": "2017-05-15T22:34:48.000Z", "max_issues_repo_path": "unum_type_conversion_test.f08", "max_issues_repo_name": "CrashBurnRepeat/fUnums", "max_issues_repo_head_hexsha": "ef0b621bc0aab5d7915f6c1366d671f02293af30", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "unum_type_conversion_test.f08", "max_forks_repo_name": "CrashBurnRepeat/fUnums", "max_forks_repo_head_hexsha": "ef0b621bc0aab5d7915f6c1366d671f02293af30", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-29T07:57:15.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-29T07:57:15.000Z", "avg_line_length": 24.1194029851, "max_line_length": 74, "alphanum_fraction": 0.6577970297, "num_tokens": 502}
|
import math
import numpy as np
import os
import pandas as pd
import torch
from tqdm import tqdm as tqdm
from spacy import displacy
from spacy.util import is_in_jupyter
from transformers import AutoTokenizer
from typing import Dict
from thermostat.data import get_local_explanations
from thermostat.utils import delistify, detach_to_list, read_path
class ColorToken:
def __init__(self, token, attribution, text_field, token_index, thermounit_vars: Dict):
self.token = token
self.attribution = attribution
self.text_field = text_field
self.token_index = token_index
for var_name, value in thermounit_vars.items():
if var_name not in ['texts']:
setattr(self, var_name, value)
# White color per default
self.red = '255'
self.green = '255'
self.blue = '255'
self.score = None
assert not math.isnan(self.attribution), 'Attribution of token {} is NaN'.format(self.token)
def add_color(self, gamma, threshold=0):
""" Needs to be explicitly called to calculate the color of a token """
score = self.gamma_correction(self.attribution, gamma)
if score >= threshold:
r = str(int(255))
g = str(int(255 * (1 - score)))
b = str(int(255 * (1 - score)))
else:
b = str(int(255))
r = str(int(255 * (1 + score)))
g = str(int(255 * (1 + score)))
# TODO: Add more color schemes from: https://colorbrewer2.org/#type=diverging&scheme=RdBu&n=5
self.red = r
self.green = g
self.blue = b
setattr(self, 'score', round(score, ndigits=3))
def __repr__(self):
if "score" in vars(self):
score_str = f'Score: {self.score}'
else:
score_str = f'Attribution: {self.attribution}'
return f'{self.token} (Index: {self.token_index}, {score_str}, Color: {self.hex()}, ' \
f'Text field: {self.text_field})'
def __str__(self):
return repr(self)
@staticmethod
def gamma_correction(score, gamma):
return np.sign(score) * np.power(np.abs(score), gamma)
def hex(self):
return '#%02x%02x%02x' % (int(self.red), int(self.green), int(self.blue))
class TextField(list):
def __init__(self, color_tokens):
super().__init__(color_tokens)
def __repr__(self):
return ' '.join([ctok.token for ctok in self])
class Heatmap(TextField):
def __init__(self, color_tokens, attributions=None, gamma=1.0):
super().__init__(color_tokens)
for i in range(len(self)):
if attributions:
self[i].attribution = attributions[i]
self[i].add_color(gamma=gamma)
self.table = pd.DataFrame({
'token_index': delistify(self['token_index']),
'token': delistify(self['token']),
'attribution': delistify(self['attribution']),
'text_field': delistify(self['text_field'])}
).set_index('token_index').T
def __getitem__(self, idx):
if isinstance(idx, str):
""" String indexing """
if idx in list(self[0].__dict__.keys()):
return [getattr(u, idx) for u in self]
return list(self)[idx]
def __repr__(self):
return repr(self.table)
def render(self, labels=False):
""" Uses the displaCy visualization tool to render a HTML from the heatmap """
# Call this function once for every text field
if len(set([t.text_field for t in self])) > 1:
for field in self[0].text_fields:
print(f'Heatmap "{field}"')
Heatmap([t for t in self if t.text_field == field]).render(labels=labels)
return
ents = []
colors = {}
ii = 0
for color_token in self:
ff = ii + len(color_token.token)
# One entity in displaCy contains start and end markers (character index) and optionally a label
# The label can be added by setting "attribution_labels" to True
ent = {
'start': ii,
'end': ff,
'label': str(color_token.score),
}
ents.append(ent)
# A "colors" dict takes care of the mapping between attribution labels and hex colors
colors[str(color_token.score)] = color_token.hex()
ii = ff
to_render = {
'text': ''.join([t.token for t in self]),
'ents': ents,
}
if labels:
template = """
<mark class="entity" style="background: {bg}; padding: 0.45em 0.6em; margin: 0 0.25em; line-height: 2;
border-radius: 0.35em; box-decoration-break: clone; -webkit-box-decoration-break: clone">
{text}
<span style="font-size: 0.8em; font-weight: bold; line-height: 1; border-radius: 0.35em; text-transform:
uppercase; vertical-align: middle; margin-left: 0.5rem">{label}</span>
</mark>
"""
else:
template = """
<mark class="entity" style="background: {bg}; padding: 0.15em 0.3em; margin: 0 0.2em; line-height: 2.2;
border-radius: 0.25em; box-decoration-break: clone; -webkit-box-decoration-break: clone">
{text}
</mark>
"""
html = displacy.render(
to_render,
style='ent',
manual=True,
jupyter=is_in_jupyter(),
options={'template': template,
'colors': colors,
}
)
return html if not is_in_jupyter() else None
def token_to_html(token, rgb):
return f"<span style=\"background-color: {rgb}\"> {token.replace('<', '').replace('>', '')} </span>"
def summarize(summary: Dict):
res = "<h4>"
for k, v in summary.items():
res += f"{k}: {summary[k]} <br/>"
res += "</h4>"
return res
def append_heatmap(tokens, scores, latex, gamma, caption, pad_token, formatting="colorbox", truncate_pad=True):
"""
Produce a heatmap for LaTeX
Format options: colorbox, text"""
if gamma != 1:
raise NotImplementedError
latex += "\n\\begin{figure}[!htb]"
for token, score in zip(tokens, scores):
if token == pad_token and truncate_pad:
continue
color = "blue"
if score >= 0:
color = "red"
latex += f"\\{formatting}" + "{" + f"{color}!{abs(score) * 100}" + "}" + "{" + token + "}"
latex += "\\caption{" + f"{caption}" + "}"
latex += "\\end{figure}\n"
return latex
def normalize_attributions(attributions):
max_abs_score = max(max(attributions), abs(min(attributions)))
return [(score / max_abs_score) for score in attributions]
def run_visualize(config: Dict, dataset=None):
raise NotImplementedError("Deprecated due to Heatmap and ColorToken refactoring")
tokenizer = AutoTokenizer.from_pretrained(config['model']['name'])
visualization_config = config['visualization']
if not dataset:
dataset = get_local_explanations(config=visualization_config)
dataset_name = f'{config["dataset"]["name"]}' \
f': {config["dataset"]["subset"]}' if 'subset' in config['dataset'] else \
config['dataset']['name']
str_dataset_name = f'{dataset_name} ({config["dataset"]["split"]})'
file_out = open(read_path(config['path_html']), 'w+')
tokenizer_str = str(type(tokenizer)).split('.')[-1].strip("'>")
for idx_instance in tqdm(range(len(dataset))):
instance = dataset[idx_instance]
html = f"<html><h3>"
html += f"<h2>Instance: {instance['idx']} | Dataset: {str_dataset_name} |" \
f" Model: {config['model']['name']} | Tokenizer: {tokenizer_str}"
html += '</h3><div style=\"border:3px solid #000;\">'
html += "<div>"
tokens = [tokenizer.decode(token_ids=token_ids) for token_ids in instance['input_ids']]
atts = detach_to_list(instance['attributions'])
if visualization_config['normalize']:
atts = normalize_attributions(atts)
heatmap = Heatmap(words=tokens, scores=atts, gamma=visualization_config['gamma'])
summary = {'Sum of Attribution Scores': str(sum(atts))}
if 'dataset' in dataset:
label_names = dataset['dataset'][0]['label_names']
else:
label_names = dataset.info.features['label'].names
if 'labels' in instance or 'label' in instance:
if 'labels' in instance:
label = detach_to_list(instance['labels'])
else:
label = instance['label']
summary['True Label Index'] = str(label)
summary['True Label'] = str(label_names[label])
if 'predictions' in instance:
preds = instance['predictions']
summary['Logits'] = detach_to_list(preds)
preds_max = torch.argmax(preds) if type(preds) == torch.Tensor else preds.index(max(preds))
preds_max_detached = detach_to_list(preds_max)
summary['Predicted Label'] = str(label_names[preds_max_detached])
html += summarize(summary)
for instance in heatmap: # brackets to reuse iterator
html += token_to_html(instance['token'], instance['color'])
html += "</br></br>"
html += "</div>"
html += "</div></br></br></br></html>"
file_out.write(html + os.linesep)
|
{"hexsha": "9180d100f8adc745f022633acc03c3ffaaada855", "size": 9563, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/thermostat/visualize.py", "max_stars_repo_name": "MNasert/thermostat", "max_stars_repo_head_hexsha": "ad9dbd8942eef63c2df8c0adc2a79568acf31049", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/thermostat/visualize.py", "max_issues_repo_name": "MNasert/thermostat", "max_issues_repo_head_hexsha": "ad9dbd8942eef63c2df8c0adc2a79568acf31049", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/thermostat/visualize.py", "max_forks_repo_name": "MNasert/thermostat", "max_forks_repo_head_hexsha": "ad9dbd8942eef63c2df8c0adc2a79568acf31049", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.5, "max_line_length": 121, "alphanum_fraction": 0.5801526718, "include": true, "reason": "import numpy", "num_tokens": 2261}
|
[STATEMENT]
lemma TopoS_modelLibrary_yields_new_configured_SecurityInvariant:
assumes NetModelLib: "TopoS_modelLibrary m sinvar_spec"
and nPdef: "nP = nm_node_props m C"
and formalSpec: "Spec = \<lparr>
c_sinvar = (\<lambda>G. sinvar_spec G nP),
c_offending_flows = (\<lambda>G. SecurityInvariant_withOffendingFlows.set_offending_flows sinvar_spec G nP),
c_isIFS = nm_receiver_violation m
\<rparr>"
shows "new_configured_SecurityInvariant (sinvar_spec, nm_default m, nm_receiver_violation m, nP) = Some Spec"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. new_configured_SecurityInvariant (sinvar_spec, nm_default m, nm_receiver_violation m, nP) = Some Spec
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. new_configured_SecurityInvariant (sinvar_spec, nm_default m, nm_receiver_violation m, nP) = Some Spec
[PROOF STEP]
from NetModelLib
[PROOF STATE]
proof (chain)
picking this:
TopoS_modelLibrary m sinvar_spec
[PROOF STEP]
have NetModel: "SecurityInvariant sinvar_spec (nm_default m) (nm_receiver_violation m)"
[PROOF STATE]
proof (prove)
using this:
TopoS_modelLibrary m sinvar_spec
goal (1 subgoal):
1. SecurityInvariant sinvar_spec (nm_default m) (nm_receiver_violation m)
[PROOF STEP]
by(simp add: TopoS_modelLibrary_def TopoS_List_Impl_def)
[PROOF STATE]
proof (state)
this:
SecurityInvariant sinvar_spec (nm_default m) (nm_receiver_violation m)
goal (1 subgoal):
1. new_configured_SecurityInvariant (sinvar_spec, nm_default m, nm_receiver_violation m, nP) = Some Spec
[PROOF STEP]
have Spec: "\<lparr>c_sinvar = \<lambda>G. sinvar_spec G nP,
c_offending_flows = \<lambda>G. SecurityInvariant_withOffendingFlows.set_offending_flows sinvar_spec G nP,
c_isIFS = nm_receiver_violation m\<rparr> = Spec"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lparr>c_sinvar = \<lambda>G. sinvar_spec G nP, c_offending_flows = \<lambda>G. SecurityInvariant_withOffendingFlows.set_offending_flows sinvar_spec G nP, c_isIFS = nm_receiver_violation m\<rparr> = Spec
[PROOF STEP]
by(simp add: formalSpec)
[PROOF STATE]
proof (state)
this:
\<lparr>c_sinvar = \<lambda>G. sinvar_spec G nP, c_offending_flows = \<lambda>G. SecurityInvariant_withOffendingFlows.set_offending_flows sinvar_spec G nP, c_isIFS = nm_receiver_violation m\<rparr> = Spec
goal (1 subgoal):
1. new_configured_SecurityInvariant (sinvar_spec, nm_default m, nm_receiver_violation m, nP) = Some Spec
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. new_configured_SecurityInvariant (sinvar_spec, nm_default m, nm_receiver_violation m, nP) = Some Spec
[PROOF STEP]
unfolding new_configured_SecurityInvariant.simps
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (if SecurityInvariant sinvar_spec (nm_default m) (nm_receiver_violation m) then Some \<lparr>c_sinvar = \<lambda>G. sinvar_spec G nP, c_offending_flows = \<lambda>G. SecurityInvariant_withOffendingFlows.set_offending_flows sinvar_spec G nP, c_isIFS = nm_receiver_violation m\<rparr> else None) = Some Spec
[PROOF STEP]
by(simp add: NetModel Spec)
[PROOF STATE]
proof (state)
this:
new_configured_SecurityInvariant (sinvar_spec, nm_default m, nm_receiver_violation m, nP) = Some Spec
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1300, "file": "Network_Security_Policy_Verification_TopoS_Composition_Theory_impl", "length": 10}
|
# -*- coding: utf-8 -*-
"""
Plot the interpolation functions for a 4-node isoparametric
element.
@author: Nicolas Guarin-Zapata
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import rcParams
#import seaborn
rcParams['font.family'] = 'serif'
rcParams['font.size'] = 14
plt.close("all")
def make_plot(x, y, N):
x_cords = [-1, 1, 1, -1]
y_cords = [-1, -1, 1, 1]
fig = plt.figure(figsize=(8, 5))
ax = fig.add_subplot(111, projection='3d')
ax.plot([-1, 1, 1, -1, -1], [-1, -1, 1, 1, -1], "-ko", zorder=-10)
ax.plot([x_cords[cont-1], x_cords[cont-1]],
[y_cords[cont-1], y_cords[cont-1]], [0, 1], "--k", zorder=-10)
ax.plot_surface(x, y, N, cstride=1, rstride=1, cmap="YlGnBu_r",
alpha=0.6, lw=0.5, zorder=3)
ax.view_init(azim=-60, elev=30)
ax.set_xlabel(r"$x$", fontsize=18)
ax.set_ylabel(r"$y$", fontsize=18)
ax.set_zlabel(r"$N^%i(x, y)$"%cont, fontsize=18)
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
ax.set_zlim(0, 1)
plt.savefig("../../img/TheFEM/shape_func-4-nodes-%i.pdf"%cont,
bbox_inches="tight", pad_inches=0.1, transparent=True)
x, y = np.mgrid[-1:1:21j, -1:1:21j]
N1 = 0.25*(1 - x)*(1 - y)
N2 = 0.25*(1 + x)*(1 - y)
N3 = 0.25*(1 + x)*(1 + y)
N4 = 0.25*(1 - x)*(1 + y)
cont = 0
for N in [N1, N2, N3, N4]:
cont = cont + 1
make_plot(x, y, N)
plt.show()
|
{"hexsha": "81abed26b6e93a64c5da30d7e9d09538a0df7262", "size": 1461, "ext": "py", "lang": "Python", "max_stars_repo_path": "img_src/TheFEM/Four_nodes_shape_func.py", "max_stars_repo_name": "jomorlier/FEM-Notes", "max_stars_repo_head_hexsha": "3b81053aee79dc59965c3622bc0d0eb6cfc7e8ae", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-04-15T01:53:14.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-15T01:53:14.000Z", "max_issues_repo_path": "img_src/TheFEM/Four_nodes_shape_func.py", "max_issues_repo_name": "jomorlier/FEM-Notes", "max_issues_repo_head_hexsha": "3b81053aee79dc59965c3622bc0d0eb6cfc7e8ae", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "img_src/TheFEM/Four_nodes_shape_func.py", "max_forks_repo_name": "jomorlier/FEM-Notes", "max_forks_repo_head_hexsha": "3b81053aee79dc59965c3622bc0d0eb6cfc7e8ae", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-05-25T17:19:53.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-25T17:19:53.000Z", "avg_line_length": 29.8163265306, "max_line_length": 74, "alphanum_fraction": 0.5886379192, "include": true, "reason": "import numpy", "num_tokens": 566}
|
import numpy
import torch
from .isnumpy import *
from .istorch import *
def squeeze(tensor):
"""
Removes all the dimensions with value 1 of the input tensor
Parameters
----------
tensor : Tensor
the input tensor
Returns
-------
Tensor
the squeezed input
Raises
------
AssertError
if tensor does not belongs to Numpy or PyTorch
"""
if isnumpy(tensor):
return numpy.squeeze(tensor)
if istorch(tensor):
return tensor.squeeze()
assert False, 'Unknown data type'
|
{"hexsha": "b1d24d84b0327dd122bd57b4db2616e045c40926", "size": 564, "ext": "py", "lang": "Python", "max_stars_repo_path": "ACME/utility/squeeze.py", "max_stars_repo_name": "mauriziokovacic/ACME", "max_stars_repo_head_hexsha": "2615b66dd4addfd5c03d9d91a24c7da414294308", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-10-23T23:10:55.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-01T07:30:14.000Z", "max_issues_repo_path": "ACME/utility/squeeze.py", "max_issues_repo_name": "mauriziokovacic/ACME-Python", "max_issues_repo_head_hexsha": "2615b66dd4addfd5c03d9d91a24c7da414294308", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ACME/utility/squeeze.py", "max_forks_repo_name": "mauriziokovacic/ACME-Python", "max_forks_repo_head_hexsha": "2615b66dd4addfd5c03d9d91a24c7da414294308", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-07-11T11:35:43.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-11T11:35:43.000Z", "avg_line_length": 17.625, "max_line_length": 63, "alphanum_fraction": 0.6028368794, "include": true, "reason": "import numpy", "num_tokens": 125}
|
import json
import logging
import os
import random
import time
import warnings
import numpy as np
import pandas as pd
from autogluon.core import Space
from autogluon.core.constants import QUANTILE
from autogluon.core.utils import try_import_torch
from autogluon.core.utils.exceptions import TimeLimitExceeded
from autogluon.core.models.abstract.abstract_model import AbstractNeuralNetworkModel
from .tabular_nn_model import TabularNeuralNetModel
from ..utils import fixedvals_from_searchspaces
logger = logging.getLogger(__name__)
class TabularNeuralQuantileModel(TabularNeuralNetModel):
"""
Class for neural network models that operate on tabular data for multi-quantile prediction based on PyTorch.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
if self.problem_type != QUANTILE:
raise ValueError("This neural network is only available for quantile regression")
def set_net_defaults(self, train_dataset, params):
""" Sets dataset-adaptive default values to use for our neural network """
# infer default y-range
if params['y_range'] is None:
y_vals = train_dataset.data_list[train_dataset.label_index]
min_y = float(np.min(y_vals))
max_y = float(np.max(y_vals))
std_y = np.std(y_vals)
y_ext = params['y_range_extend'] * std_y
# infer y must be non-negative
if min_y >= 0:
min_y = max(0, min_y - y_ext)
else:
min_y = min_y - y_ext
# infer y must be non-positive
if max_y <= 0:
max_y = min(0, max_y + y_ext)
else:
max_y = max_y+y_ext
params['y_range'] = (min_y, max_y)
return
def _fit(self, X, y, X_val=None, y_val=None,
time_limit=None, sample_weight=None, num_cpus=1, num_gpus=0, reporter=None, **kwargs):
try_import_torch()
import torch
from .tabular_nn_torch import TabularPyTorchDataset
start_time = time.time()
self.verbosity = kwargs.get('verbosity', 2)
if sample_weight is not None: # TODO: support
logger.log(15, "sample_weight not yet supported for TabularNeuralQuantileModel,"
" this model will ignore them in training.")
params = self.params.copy()
params = fixedvals_from_searchspaces(params)
if num_cpus is not None:
self.num_dataloading_workers = max(1, int(num_cpus/2.0))
else:
self.num_dataloading_workers = 1
if self.num_dataloading_workers == 1:
self.num_dataloading_workers = 0 # 0 is always faster and uses less memory than 1
self.num_dataloading_workers = 0
self.max_batch_size = params['max_batch_size']
if isinstance(X, TabularPyTorchDataset):
self.batch_size = min(int(2 ** (3 + np.floor(np.log10(len(X))))), self.max_batch_size)
else:
self.batch_size = min(int(2 ** (3 + np.floor(np.log10(X.shape[0])))), self.max_batch_size)
train_dataset, val_dataset = self.generate_datasets(X=X, y=y, params=params, X_val=X_val, y_val=y_val)
logger.log(15, "Training data for TabularNeuralQuantileModel has: %d examples, %d features "
"(%d vector, %d embedding, %d language)" %
(train_dataset.num_examples, train_dataset.num_features,
len(train_dataset.feature_groups['vector']), len(train_dataset.feature_groups['embed']),
len(train_dataset.feature_groups['language'])))
if num_gpus is not None and num_gpus >= 1:
if torch.cuda.is_available():
self.device = torch.device("cuda")
if num_gpus > 1:
logger.warning("TabularNeuralQuantileModel not yet configured to use more than 1 GPU."
" 'num_gpus' set to >1, but we will be using only 1 GPU.")
else:
self.device = torch.device("cpu")
else:
self.device = torch.device("cpu")
self.get_net(train_dataset, params=params)
if time_limit is not None:
time_elapsed = time.time() - start_time
time_limit_orig = time_limit
time_limit = time_limit - time_elapsed
# if 60% of time was spent preprocessing, likely not enough time to train model
if time_limit <= time_limit_orig * 0.4:
raise TimeLimitExceeded
# train network
self.train_net(train_dataset=train_dataset,
params=params,
val_dataset=val_dataset,
initialize=True,
setup_trainer=True,
time_limit=time_limit,
reporter=reporter)
self.params_post_fit = params
def get_net(self, train_dataset, params):
from .tabular_nn_torch import NeuralMultiQuantileRegressor
# set network params
self.set_net_defaults(train_dataset, params)
self.model = NeuralMultiQuantileRegressor(quantile_levels=self.quantile_levels,
train_dataset=train_dataset, params=params, device=self.device)
self.model = self.model.to(self.device)
if not os.path.exists(self.path):
os.makedirs(self.path)
def train_net(self, train_dataset, params, val_dataset=None, initialize=True, setup_trainer=True, time_limit=None, reporter=None):
import torch
start_time = time.time()
logger.log(15, "Training neural network for quantile prediction for up to %s updates..." % params['num_updates'])
seed_value = params.get('seed_value')
if seed_value is not None: # Set seed
random.seed(seed_value)
np.random.seed(seed_value)
torch.manual_seed(seed_value)
if initialize:
logging.debug("initializing neural network...")
self.model.init_params()
logging.debug("initialized")
if setup_trainer:
self.optimizer = self.setup_trainer(params=params)
train_dataloader = train_dataset.build_loader(self.batch_size, self.num_dataloading_workers, is_test=False)
best_val_metric = -np.inf # higher = better
best_val_update = 0
val_improve_update = 0 # most recent update where validation-score strictly improved
num_updates = params['num_updates']
updates_wo_improve = params['updates_wo_improve']
if val_dataset is not None:
y_val = val_dataset.get_labels()
else:
y_val = None
if self.verbosity <= 1:
verbose_eval = False
else:
verbose_eval = True
net_filename = self.path + self.temp_file_name
if num_updates == 0:
# use dummy training loop that stops immediately
# useful for using NN just for data preprocessing / debugging
logger.log(20, "Not training Neural Net since num_updates == 0. Neural network architecture is:")
# for each batch
for batch_idx, data_batch in enumerate(train_dataloader):
loss = self.model.compute_loss(data_batch, weight=1.0, margin=params['gamma'])
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if batch_idx > 0:
break
os.makedirs(os.path.dirname(self.path), exist_ok=True)
torch.save(self.model, net_filename)
logger.log(15, "untrained Quantile Neural Network saved to file")
return
start_fit_time = time.time()
if time_limit is not None:
time_limit = time_limit - (start_fit_time - start_time)
# start training Loop:
logger.log(15, "Start training Qunatile Neural network")
total_updates = 0
do_update = True
while do_update:
total_train_loss = 0.0
total_train_size = 0.0
for batch_idx, data_batch in enumerate(train_dataloader):
# forward
weight = (np.cos(min((total_updates / float(updates_wo_improve)), 1.0) * np.pi) + 1) * 0.5
loss = self.model.compute_loss(data_batch, weight=weight, margin=params['gamma'])
total_train_loss += loss.item()
total_train_size += 1
# update
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
total_updates += 1
# validation
if total_updates % 100 == 0 and val_dataset is not None:
# compute validation score
val_metric = self.score(X=val_dataset, y=y_val, metric=self.stopping_metric)
if np.isnan(val_metric):
if total_updates == 1:
raise RuntimeError("NaNs encountered in TabularNeuralQuantileModel training. "
"Features/labels may be improperly formatted, "
"or NN weights may have diverged.")
else:
logger.warning("Warning: NaNs encountered in TabularNeuralQuantileModel training. "
"Reverting model to last checkpoint without NaNs.")
break
# update best validation
if (val_metric >= best_val_metric) or (total_updates == 1):
if val_metric > best_val_metric:
val_improve_update = total_updates
best_val_metric = val_metric
best_val_update = total_updates
os.makedirs(os.path.dirname(self.path), exist_ok=True)
torch.save(self.model, net_filename)
if verbose_eval:
logger.log(15, "Update %s. Train loss: %s, Val %s: %s" %
(total_updates, total_train_loss/total_train_size, self.stopping_metric.name, val_metric))
if reporter is not None:
reporter(epoch=total_updates,
validation_performance=val_metric, # Higher val_metric = better
train_loss=total_train_loss/total_train_size,
eval_metric=self.eval_metric.name,
greater_is_better=self.eval_metric.greater_is_better)
# no improvement
if total_updates - val_improve_update > updates_wo_improve:
do_update = False
break
elif total_updates % 100 == 0:
best_val_update = total_updates
if verbose_eval:
logger.log(15, "Update %s. Train loss: %s" % (total_updates, total_train_loss/total_train_size))
# time limit
if time_limit is not None:
time_elapsed = time.time() - start_fit_time
if time_limit < time_elapsed:
logger.log(15, f"\tRan out of time, stopping training early. (Stopping on updates {total_updates})")
do_update = False
break
# max updates
if total_updates == num_updates:
logger.log(15, f"\tReached the max number of updates. ({num_updates})")
do_update = False
break
# revert back to best model
if val_dataset is not None:
try:
self.model = torch.load(net_filename)
os.remove(net_filename)
except FileNotFoundError:
pass
# evaluate one final time
final_val_metric = self.score(X=val_dataset, y=y_val, metric=self.stopping_metric)
if np.isnan(final_val_metric):
final_val_metric = -np.inf
logger.log(15, "Best model found in updates %d. Val %s: %s" %
(best_val_update, self.stopping_metric.name, final_val_metric))
else:
logger.log(15, "Best model found in updates %d" % best_val_update)
self.params_trained['num_updates'] = best_val_update
return
def _predict_proba(self, X, **kwargs):
""" To align predict with abstract_model API.
Preprocess here only refers to feature processing steps done by all AbstractModel objects,
not tabularNN-specific preprocessing steps.
If X is not DataFrame but instead TabularNNDataset object, we can still produce predictions,
but cannot use preprocess in this case (needs to be already processed).
"""
from .tabular_nn_torch import TabularPyTorchDataset
if isinstance(X, TabularPyTorchDataset):
return self._predict_tabular_data(new_data=X, process=False)
elif isinstance(X, pd.DataFrame):
X = self.preprocess(X, **kwargs)
return self._predict_tabular_data(new_data=X, process=True)
else:
raise ValueError("X must be of type pd.DataFrame or TabularPyTorchDataset, not type: %s" % type(X))
def _predict_tabular_data(self, new_data, process=True, predict_proba=True):
from .tabular_nn_torch import TabularPyTorchDataset
if process:
new_data = self.process_test_data(new_data, None)
if not isinstance(new_data, TabularPyTorchDataset):
raise ValueError("new_data must of of type TabularNNDataset if process=False")
val_dataloader = new_data.build_loader(self.max_batch_size, self.num_dataloading_workers, is_test=True)
preds_dataset = []
for batch_idx, data_batch in enumerate(val_dataloader):
preds_batch = self.model.predict(data_batch)
preds_dataset.append(preds_batch)
preds_dataset = np.concatenate(preds_dataset, 0)
return preds_dataset
def generate_datasets(self, X, y, params, X_val=None, y_val=None):
from .tabular_nn_torch import TabularPyTorchDataset
impute_strategy = params['proc.impute_strategy']
max_category_levels = params['proc.max_category_levels']
skew_threshold = params['proc.skew_threshold']
embed_min_categories = params['proc.embed_min_categories']
use_ngram_features = params['use_ngram_features']
if isinstance(X, TabularPyTorchDataset):
train_dataset = X
else:
X = self.preprocess(X)
train_dataset = self.process_train_data(df=X, labels=y,
impute_strategy=impute_strategy,
max_category_levels=max_category_levels,
skew_threshold=skew_threshold,
embed_min_categories=embed_min_categories,
use_ngram_features=use_ngram_features)
if X_val is not None:
if isinstance(X_val, TabularPyTorchDataset):
val_dataset = X_val
else:
X_val = self.preprocess(X_val)
val_dataset = self.process_test_data(df=X_val, labels=y_val)
else:
val_dataset = None
return train_dataset, val_dataset
def process_test_data(self, df, labels=None, **kwargs):
""" Process train or test DataFrame into a form fit for neural network models.
Args:
df (pd.DataFrame): Data to be processed (X)
labels (pd.Series): labels to be processed (y)
Returns:
Dataset object
"""
from .tabular_nn_torch import TabularPyTorchDataset
# sklearn processing n_quantiles warning
warnings.filterwarnings("ignore", module='sklearn.preprocessing')
if labels is not None and len(labels) != len(df):
raise ValueError("Number of examples in Dataframe does not match number of labels")
if (self.processor is None or self._types_of_features is None
or self.feature_arraycol_map is None or self.feature_type_map is None):
raise ValueError("Need to process training data before test data")
if self.features_to_drop:
drop_cols = [col for col in df.columns if col in self.features_to_drop]
if drop_cols:
df = df.drop(columns=drop_cols)
# self.feature_arraycol_map, self.feature_type_map have been previously set while processing training data.
df = self.processor.transform(df)
return TabularPyTorchDataset(df, self.feature_arraycol_map, self.feature_type_map, labels)
def process_train_data(self, df, impute_strategy, max_category_levels, skew_threshold,
embed_min_categories, use_ngram_features, labels, **kwargs):
from .tabular_nn_torch import TabularPyTorchDataset
# sklearn processing n_quantiles warning
warnings.filterwarnings("ignore", module='sklearn.preprocessing')
if labels is None:
raise ValueError("Attempting process training data without labels")
if len(labels) != len(df):
raise ValueError("Number of examples in Dataframe does not match number of labels")
# dict with keys: : 'continuous', 'skewed', 'onehot', 'embed', 'language', values = column-names of df
self._types_of_features, df = self._get_types_of_features(df, skew_threshold=skew_threshold,
embed_min_categories=embed_min_categories,
use_ngram_features=use_ngram_features)
logger.log(15, "AutoGluon Qunatile Neural Network (pytorch) infers features are of the following types:")
logger.log(15, json.dumps(self._types_of_features, indent=4))
logger.log(15, "\n")
self.processor = self._create_preprocessor(impute_strategy=impute_strategy,
max_category_levels=max_category_levels)
df = self.processor.fit_transform(df)
# OrderedDict of feature-name -> list of column-indices in df corresponding to this feature
self.feature_arraycol_map = self._get_feature_arraycol_map(max_category_levels=max_category_levels)
# should match number of columns in processed array
num_array_cols = np.sum([len(self.feature_arraycol_map[key]) for key in self.feature_arraycol_map])
if num_array_cols != df.shape[1]:
raise ValueError("Error during one-hot encoding data processing for neural network."
" Number of columns in df array does not match feature_arraycol_map.")
# OrderedDict of feature-name -> feature_type string (options: 'vector', 'embed', 'language')
self.feature_type_map = self._get_feature_type_map()
return TabularPyTorchDataset(df, self.feature_arraycol_map, self.feature_type_map, labels)
def setup_trainer(self, params, **kwargs):
"""
Set up optimizer needed for training.
Network must first be initialized before this.
"""
import torch
if params['optimizer'] == 'sgd':
optimizer = torch.optim.SGD(params=self.model.parameters(),
lr=params['learning_rate'],
weight_decay=params['weight_decay'])
elif params['optimizer'] == 'adam':
optimizer = torch.optim.Adam(params=self.model.parameters(),
lr=params['learning_rate'],
weight_decay=params['weight_decay'])
else:
raise ValueError("Unknown optimizer specified: %s" % params['optimizer'])
return optimizer
def save(self, path: str = None, verbose=True) -> str:
if self.model is not None:
self._architecture_desc = self.model.architecture_desc
temp_model = self.model
self.model = None
path_final = super().save(path=path, verbose=verbose)
self.model = temp_model
self._architecture_desc = None
# Export model
if self.model is not None:
import torch
params_filepath = path_final + self.params_file_name
# TODO: Don't use os.makedirs here, have save_parameters function in tabular_nn_model that checks if local path or S3 path
os.makedirs(os.path.dirname(path_final), exist_ok=True)
torch.save(self.model, params_filepath)
return path_final
@classmethod
def load(cls, path: str, reset_paths=True, verbose=True):
model: TabularNeuralQuantileModel = AbstractNeuralNetworkModel.load(path=path, reset_paths=reset_paths, verbose=verbose)
if model._architecture_desc is not None:
import torch
from .tabular_nn_torch import NeuralMultiQuantileRegressor
# recreate network from architecture description
model.model = NeuralMultiQuantileRegressor(quantile_levels=model.quantile_levels,
architecture_desc=model._architecture_desc,
device=model.device)
model._architecture_desc = None
model.model = torch.load(model.path + model.params_file_name)
return model
def _hyperparameter_tune(self, X, y, X_val, y_val, scheduler_options, **kwargs):
""" Performs HPO and sets self.params to best hyperparameter values """
try_import_torch()
from .tabular_nn_torch import tabular_pytorch_trial, TabularPyTorchDataset
time_start = time.time()
self.verbosity = kwargs.get('verbosity', 2)
logger.log(15, "Beginning hyperparameter tuning for Neural Network...")
# changes non-specified default hyperparams from fixed values to search-spaces.
self._set_default_searchspace()
scheduler_cls, scheduler_params = scheduler_options # Unpack tuple
if scheduler_cls is None or scheduler_params is None:
raise ValueError("scheduler_cls and scheduler_params cannot be None for hyperparameter tuning")
num_cpus = scheduler_params['resource']['num_cpus']
params_copy = self.params.copy()
self.num_dataloading_workers = max(1, int(num_cpus/2.0))
self.max_batch_size = params_copy['max_batch_size']
self.batch_size = min(int(2 ** (3 + np.floor(np.log10(X.shape[0])))), self.max_batch_size)
train_dataset, val_dataset = self.generate_datasets(X=X, y=y, params=params_copy, X_val=X_val, y_val=y_val)
train_path = self.path + "train"
val_path = self.path + "validation"
train_dataset.save(file_prefix=train_path)
val_dataset.save(file_prefix=val_path)
if not np.any([isinstance(params_copy[hyperparam], Space) for hyperparam in params_copy]):
logger.warning("Warning: Attempting to do hyperparameter optimization without any search space (all hyperparameters are already fixed values)")
else:
logger.log(15, "Hyperparameter search space for Neural Network: ")
for hyperparam in params_copy:
if isinstance(params_copy[hyperparam], Space):
logger.log(15, str(hyperparam)+ ": "+str(params_copy[hyperparam]))
util_args = dict(
train_path=train_path,
val_path=val_path,
model=self,
time_start=time_start,
time_limit=scheduler_params['time_out'],
fit_kwargs=scheduler_params['resource'],
)
tabular_pytorch_trial.register_args(util_args=util_args, **params_copy)
scheduler = scheduler_cls(tabular_pytorch_trial, **scheduler_params)
if ('dist_ip_addrs' in scheduler_params) and (len(scheduler_params['dist_ip_addrs']) > 0):
# TODO: Ensure proper working directory setup on remote machines
# This is multi-machine setting, so need to copy dataset to workers:
logger.log(15, "Uploading preprocessed data to remote workers...")
scheduler.upload_files([
train_path + TabularPyTorchDataset.DATAOBJ_SUFFIX,
val_path + TabularPyTorchDataset.DATAOBJ_SUFFIX,
]) # TODO: currently does not work.
logger.log(15, "uploaded")
scheduler.run()
scheduler.join_jobs()
return self._get_hpo_results(scheduler=scheduler, scheduler_params=scheduler_params, time_start=time_start)
|
{"hexsha": "37ddf165edff4be1e6cd59ed8310b55ed557d6c5", "size": 25106, "ext": "py", "lang": "Python", "max_stars_repo_path": "tabular/src/autogluon/tabular/models/tabular_nn/tabular_nn_quantile.py", "max_stars_repo_name": "songqiang/autogluon", "max_stars_repo_head_hexsha": "529d7cc65fad411622072aa0349215a15e1e901c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2021-05-17T08:02:05.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-05T13:03:17.000Z", "max_issues_repo_path": "tabular/src/autogluon/tabular/models/tabular_nn/tabular_nn_quantile.py", "max_issues_repo_name": "songqiang/autogluon", "max_issues_repo_head_hexsha": "529d7cc65fad411622072aa0349215a15e1e901c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tabular/src/autogluon/tabular/models/tabular_nn/tabular_nn_quantile.py", "max_forks_repo_name": "songqiang/autogluon", "max_forks_repo_head_hexsha": "529d7cc65fad411622072aa0349215a15e1e901c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.7495145631, "max_line_length": 155, "alphanum_fraction": 0.6097745559, "include": true, "reason": "import numpy", "num_tokens": 5040}
|
import numpy as np
import random
from ..utils.operations import create_3D_rotations
class SemsegAugmentation():
"""Class consisting different augmentation methods for Semantic Segmentation.
Args:
cfg: Config for augmentation.
"""
def __init__(self, cfg):
self.cfg = cfg
@staticmethod
def normalize(pc, feat, cfg):
if 'points' in cfg:
cfg_p = cfg['points']
if cfg_p.get('recentering', False):
pc -= pc.mean(0)
if cfg_p.get('method', 'linear') == 'linear':
pc -= pc.mean(0)
pc /= (pc.max(0) - pc.min(0)).max()
if 'feat' in cfg and feat is not None:
cfg_f = cfg['feat']
if cfg_f.get('recentering', False):
feat -= feat.mean(0)
if cfg_f.get('method', 'linear') == 'linear':
bias = cfg_f.get('bias', 0)
scale = cfg_f.get('scale', 1)
feat -= bias
feat /= scale
return pc, feat
@staticmethod
def rotate(pc, cfg):
# Initialize rotation matrix
R = np.eye(pc.shape[1])
method = cfg.get('method', 'vertical')
if method == 'vertical':
# Create random rotations
theta = np.random.rand() * 2 * np.pi
c, s = np.cos(theta), np.sin(theta)
R = np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]], dtype=np.float32)
elif method == 'all':
# Choose two random angles for the first vector in polar coordinates
theta = np.random.rand() * 2 * np.pi
phi = (np.random.rand() - 0.5) * np.pi
# Create the first vector in carthesian coordinates
u = np.array([
np.cos(theta) * np.cos(phi),
np.sin(theta) * np.cos(phi),
np.sin(phi)
])
# Choose a random rotation angle
alpha = np.random.rand() * 2 * np.pi
# Create the rotation matrix with this vector and angle
R = create_3D_rotations(np.reshape(u, (1, -1)),
np.reshape(alpha, (1, -1)))[0]
R = R.astype(np.float32)
return np.matmul(pc, R)
@staticmethod
def scale(pc, cfg):
# Choose random scales for each example
scale_anisotropic = cfg.get('scale_anisotropic', False)
min_s = cfg.get('min_s', 1.)
max_s = cfg.get('max_s', 1.)
if scale_anisotropic:
scale = np.random.rand(pc.shape[1]) * (max_s - min_s) + min_s
else:
scale = np.random.rand() * (max_s - min_s) + min_s
return pc * scale
@staticmethod
def noise(pc, cfg):
noise_std = cfg.get('noise_std', 0.001)
noise = (np.random.randn(pc.shape[0], pc.shape[1]) * noise_std).astype(
np.float32)
return pc + noise
@staticmethod
def RandomDropout(pc, feats, labels, cfg):
dropout_ratio = cfg.get('dropout_ratio', 0.2)
if random.random() < dropout_ratio:
N = len(pc)
inds = np.random.choice(N,
int(N * (1 - dropout_ratio)),
replace=False)
return pc[inds], feats[inds], labels[inds]
return pc, feats, labels
@staticmethod
def RandomHorizontalFlip(pc, cfg):
axes = cfg.get('axes', [0, 1])
if random.random() < 0.95:
for curr_ax in axes:
if random.random() < 0.5:
pc_max = np.max(pc[:, curr_ax])
pc[:, curr_ax] = pc_max - pc[:, curr_ax]
return pc
@staticmethod
def ChromaticAutoContrast(feats, cfg):
randomize_blend_factor = cfg.get('randomize_blend_factor', True)
blend_factor = cfg.get('blend_factor', 0.5)
if random.random() < 0.2:
lo = feats[:, :3].min(0, keepdims=True)
hi = feats[:, :3].max(0, keepdims=True)
assert hi.max(
) > 1, "Invalid color value. Color is supposed to be in [0-255] for ChromaticAutoContrast augmentation"
scale = 255 / (hi - lo)
contrast_feats = (feats[:, :3] - lo) * scale
blend_factor = random.random(
) if randomize_blend_factor else blend_factor
feats[:, :3] = (
1 - blend_factor) * feats[:, :3] + blend_factor * contrast_feats
return feats
@staticmethod
def ChromaticTranslation(feats, cfg):
trans_range_ratio = cfg.get('trans_range_ratio', 0.1)
if random.random() < 0.95:
tr = (np.random.rand(1, 3) - 0.5) * 255 * 2 * trans_range_ratio
feats[:, :3] = np.clip(tr + feats[:, :3], 0, 255)
return feats
@staticmethod
def ChromaticJitter(feats, cfg):
std = cfg.get('std', 0.01)
if random.random() < 0.95:
noise = np.random.randn(feats.shape[0], 3)
noise *= std * 255
feats[:, :3] = np.clip(noise + feats[:, :3], 0, 255)
return feats
def augment(self, point, feat, labels, cfg):
if cfg is None:
return point, feat, labels
if 'normalize' in cfg:
point, feat = self.normalize(point, feat, cfg['normalize'])
if 'rotate' in cfg:
point = self.rotate(point, cfg['rotate'])
if 'scale' in cfg:
point = self.scale(point, cfg['scale'])
if 'noise' in cfg:
point = self.noise(point, cfg['noise'])
if 'RandomDropout' in cfg:
point, feat, labels = self.RandomDropout(point, feat, labels,
cfg['RandomDropout'])
if 'RandomHorizontalFlip' in cfg:
point = self.RandomHorizontalFlip(point,
cfg['RandomHorizontalFlip'])
if 'ChromaticAutoContrast' in cfg:
feat = self.ChromaticAutoContrast(feat,
cfg['ChromaticAutoContrast'])
if 'ChromaticTranslation' in cfg:
feat = self.ChromaticTranslation(feat, cfg['ChromaticTranslation'])
if 'ChromaticJitter' in cfg:
feat = self.ChromaticJitter(feat, cfg['ChromaticJitter'])
return point, feat, labels
class ObjdetAugmentation():
"""Class consisting different augmentation for Object Detection"""
@staticmethod
def PointShuffle(data):
np.random.shuffle(data['point'])
return data
@staticmethod
def ObjectRangeFilter(data, pcd_range):
pcd_range = np.array(pcd_range)
bev_range = pcd_range[[0, 1, 3, 4]]
filtered_boxes = []
for box in data['bbox_objs']:
if in_range_bev(bev_range, box.to_xyzwhlr()):
filtered_boxes.append(box)
return {
'point': data['point'],
'bbox_objs': filtered_boxes,
'calib': data['calib']
}
@staticmethod
def ObjectSample(data, db_boxes_dict, sample_dict):
rate = 1.0
points = data['point']
bboxes = data['bbox_objs']
gt_labels_3d = [box.label_class for box in data['bbox_objs']]
sampled_num_dict = {}
for class_name in sample_dict.keys():
max_sample_num = sample_dict[class_name]
existing = np.sum([n == class_name for n in gt_labels_3d])
sampled_num = int(max_sample_num - existing)
sampled_num = np.round(rate * sampled_num).astype(np.int64)
sampled_num_dict[class_name] = sampled_num
sampled = []
for class_name in sampled_num_dict.keys():
sampled_num = sampled_num_dict[class_name]
if sampled_num < 0:
continue
sampled_cls = sample_class(class_name, sampled_num, bboxes,
db_boxes_dict[class_name])
sampled += sampled_cls
bboxes = bboxes + sampled_cls
if len(sampled) != 0:
sampled_points = np.concatenate(
[box.points_inside_box for box in sampled], axis=0)
points = remove_points_in_boxes(points, sampled)
points = np.concatenate([sampled_points, points], axis=0)
return {'point': points, 'bbox_objs': bboxes, 'calib': data['calib']}
@staticmethod
def ObjectNoise(input,
trans_std=[0.25, 0.25, 0.25],
rot_range=[-0.15707963267, 0.15707963267],
num_try=100):
raise NotImplementedError
|
{"hexsha": "bce1b48ceb0e3c92efdfd058040ae21b931cfe64", "size": 8611, "ext": "py", "lang": "Python", "max_stars_repo_path": "ml3d/datasets/augment/augmentation.py", "max_stars_repo_name": "thomasbrockmeier-ams/Open3D-ML", "max_stars_repo_head_hexsha": "1e362bbf133537668923905a12a15c540d9b689d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2021-08-11T02:21:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-15T19:32:04.000Z", "max_issues_repo_path": "ml3d/datasets/augment/augmentation.py", "max_issues_repo_name": "thomasbrockmeier-ams/Open3D-ML", "max_issues_repo_head_hexsha": "1e362bbf133537668923905a12a15c540d9b689d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-08-31T09:06:08.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-31T09:06:08.000Z", "max_forks_repo_path": "ml3d/datasets/augment/augmentation.py", "max_forks_repo_name": "thomasbrockmeier-ams/Open3D-ML", "max_forks_repo_head_hexsha": "1e362bbf133537668923905a12a15c540d9b689d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.6174242424, "max_line_length": 115, "alphanum_fraction": 0.5348972245, "include": true, "reason": "import numpy", "num_tokens": 2065}
|
from numpy import allclose
def very_close(a, b):
""" close to machine precision """
return allclose(a, b, rtol=1e-14, atol=1e-14)
|
{"hexsha": "6e91be2ba15e443ef310b9ba2b96c4159bdc8f71", "size": 140, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/__init__.py", "max_stars_repo_name": "computationalmodelling/fidimag", "max_stars_repo_head_hexsha": "07a275c897a44ad1e0d7e8ef563f10345fdc2a6e", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 53, "max_stars_repo_stars_event_min_datetime": "2016-02-27T09:40:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-19T21:37:44.000Z", "max_issues_repo_path": "tests/__init__.py", "max_issues_repo_name": "computationalmodelling/fidimag", "max_issues_repo_head_hexsha": "07a275c897a44ad1e0d7e8ef563f10345fdc2a6e", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 132, "max_issues_repo_issues_event_min_datetime": "2016-02-26T13:18:58.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-01T21:52:42.000Z", "max_forks_repo_path": "tests/__init__.py", "max_forks_repo_name": "computationalmodelling/fidimag", "max_forks_repo_head_hexsha": "07a275c897a44ad1e0d7e8ef563f10345fdc2a6e", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 32, "max_forks_repo_forks_event_min_datetime": "2016-02-26T13:21:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T08:54:51.000Z", "avg_line_length": 20.0, "max_line_length": 49, "alphanum_fraction": 0.6642857143, "include": true, "reason": "from numpy", "num_tokens": 43}
|
#include "<test_name>.h"
// Generated from the AFU JSON file by afu_json_mgr
#include "afu_json_info.h"
#include <unistd.h>
#include <time.h>
#include <boost/format.hpp>
#include <boost/algorithm/string.hpp>
#include <stdlib.h>
#include <sys/mman.h>
#include <chrono>
#include <thread>
#include "common.h"
#define ELEM_LINE 8
// ========================================================================
//
// Each test must provide these functions used by main to find the
// specific test instance.
//
// ========================================================================
const char* testAFUID()
{
return AFU_ACCEL_UUID;
}
void testConfigOptions(po::options_description &desc)
{
// Add test-specific options
desc.add_options()
("repeat,r", po::value<int>()->default_value(10), "Number of repetitions")
("VL0_enemy", po::value<int>()->default_value(0), "Activate a VL0 enemy, a higher number coresponds to a higher frequency")
("VH0_enemy", po::value<int>()->default_value(0), "Activate a VH0 enemy, a higher number coresponds to a higher frequency")
("VH1_enemy", po::value<int>()->default_value(0), "Activate a VH0 enemy, a higher number coresponds to a higher frequency")
;
}
CCI_TEST* allocTest(const po::variables_map& vm, SVC_WRAPPER& svc)
{
return new <test_name>(vm, svc);
}
#define WR_REQ 1
#define WR_RSP 2
#define RD_REQ 3
#define RD_RSP 4
#define FN_REQ 5
#define FN_RSP 6
#define FN_REQANY 5
#define FN_RSPANY 6
#define X_ADDR 0
#define Y_ADDR 1
#define Z_ADDR 2
#define VA 0
#define VL0 1
#define VH0 2
#define VH1 3
uint64_t *r0, *r1, *r2;
<thread_declaration>
// ========================================================================
//
// <test_name>
//
// ========================================================================
int <test_name>::test()
{
uint64_t repeat = 10000;
uint64_t experiments_repeat = uint64_t(vm["repeat"].as<int>());
uint64_t VL0_enemy = uint64_t(vm["VL0_enemy"].as<int>());
uint64_t VH0_enemy = uint64_t(vm["VH0_enemy"].as<int>());
uint64_t VH1_enemy = uint64_t(vm["VH1_enemy"].as<int>());
// Allocate the registers for reads
r0 = (uint64_t*) calloc(repeat, sizeof(uint64_t) );
r1 = (uint64_t*) calloc(repeat, sizeof(uint64_t) );
r2 = (uint64_t*) calloc(repeat, sizeof(uint64_t) );
// Allocate memory for the x array
auto x_buf_handle = this->allocBuffer( repeat * ELEM_LINE * sizeof(uint64_t) );
auto x_buf = reinterpret_cast<volatile uint64_t*>(x_buf_handle->c_type());
assert(NULL != x_buf);
setArray(const_cast<uint64_t*>(x_buf), 42, repeat * ELEM_LINE);
// Initialize the x array in the buffer
// Allocate memory for the y array
auto y_buf_handle = this->allocBuffer( repeat * ELEM_LINE * sizeof(uint64_t) );
auto y_buf = reinterpret_cast<volatile uint64_t*>(y_buf_handle->c_type());
assert(NULL != y_buf);
// Initialize the y array in the buffer
setArray(const_cast<uint64_t*>(y_buf), 42, repeat * ELEM_LINE);
// Allocate memory for the z array
auto z_buf_handle = this->allocBuffer( repeat * ELEM_LINE * sizeof(uint64_t) );
auto z_buf = reinterpret_cast<volatile uint64_t*>(z_buf_handle->c_type());
assert(NULL != z_buf);
// Initialize the z array in the buffer
setArray(const_cast<uint64_t*>(z_buf), 42, repeat * ELEM_LINE);
// Allocate memory for the read_registers array
auto read_registers_buf_handle = this->allocBuffer( repeat * ELEM_LINE * sizeof(uint64_t) );
auto read_registers_buf = reinterpret_cast<volatile uint64_t*>(read_registers_buf_handle->c_type());
assert(NULL != read_registers_buf);
// Initialize the read register in the buffer
setArray(const_cast<uint64_t*>(read_registers_buf), 42, repeat * ELEM_LINE);
// Allocate memory for the ok array
auto valid_buf_handle = this->allocBuffer( repeat * ELEM_LINE * sizeof(uint64_t) );
auto valid_buf = reinterpret_cast<volatile uint64_t*>(valid_buf_handle->c_type());
assert(NULL != valid_buf);
// Initialize the ok array in the buffer
setArray(const_cast<uint64_t*>(valid_buf), 42, repeat * ELEM_LINE);
// Allocate memory for array that singals finish array
auto finish_buf_handle = this->allocBuffer( 4* ELEM_LINE * sizeof(uint64_t) );
auto finish_buf = reinterpret_cast<volatile uint64_t*>(finish_buf_handle->c_type());
assert(NULL != finish_buf);
// Initialize the finish array in the buffer
setArray(const_cast<uint64_t*>(finish_buf), 42, 4*ELEM_LINE);
//
// Configure the HW test
//
writeTestCSR(1, intptr_t(x_buf));
writeTestCSR(2, intptr_t(y_buf));
writeTestCSR(3, intptr_t(z_buf));
writeTestCSR(4, intptr_t(read_registers_buf));
writeTestCSR(5, intptr_t(valid_buf));
writeTestCSR(6, intptr_t(finish_buf));
uint64_t vl0_lines = readCommonCSR(CCI_TEST::CSR_COMMON_VL0_RD_LINES) +
readCommonCSR(CCI_TEST::CSR_COMMON_VL0_WR_LINES);
uint64_t vh0_lines = readCommonCSR(CCI_TEST::CSR_COMMON_VH0_LINES);
uint64_t vh1_lines = readCommonCSR(CCI_TEST::CSR_COMMON_VH1_LINES);
uint64_t va_req_lines = readCommonCSR(CCI_TEST::CSR_COMMON_VA_REQ_LINES);
uint64_t vl0_req_lines = readCommonCSR(CCI_TEST::CSR_COMMON_VL0_REQ_LINES);
uint64_t vh0_req_lines = readCommonCSR(CCI_TEST::CSR_COMMON_VH0_REQ_LINES);
uint64_t vh1_req_lines = readCommonCSR(CCI_TEST::CSR_COMMON_VH1_REQ_LINES);
writeTestCSR(7, VL0_enemy);
writeTestCSR(8, VH0_enemy);
writeTestCSR(9, VH1_enemy);
writeTestCSR(10, repeat);
<fpga_thread>
cout << "x buff address " << hex << intptr_t(x_buf) << endl;
cout << "y buff address " << hex << intptr_t(y_buf) << endl;
cout << "z buff address " << hex << intptr_t(z_buf) << endl << endl;
cout << "read_registers buff address " << hex << intptr_t(read_registers_buf) << endl << endl;
cout << "valid buff address " << hex << intptr_t(valid_buf) << endl << endl;
cout << "finish buff address " << hex << intptr_t(finish_buf) << endl << endl;
cout << "Repeating " << dec << repeat << " times" << endl;
cout << endl << "Spin, waiting for the value in memory to change to something non-42" << endl;
std::chrono::high_resolution_clock::time_point time1 = std::chrono::high_resolution_clock::now();
struct timespec pause;
// Longer when simulating
pause.tv_sec = (hwIsSimulated() ? 3 : 0);
pause.tv_nsec = 2500000;
int valid_traces = 0;
int valid_test = 1;
for(int exp = 0; exp<experiments_repeat; exp++) {
setArray(const_cast<uint64_t*>(x_buf), 42, repeat * ELEM_LINE);
setArray(const_cast<uint64_t*>(y_buf), 42, repeat * ELEM_LINE);
setArray(const_cast<uint64_t*>(z_buf), 42, repeat * ELEM_LINE);
setArray(const_cast<uint64_t*>(read_registers_buf), 42, repeat * ELEM_LINE);
setArray(const_cast<uint64_t*>(valid_buf), 42, repeat * ELEM_LINE);
setArray(const_cast<uint64_t*>(finish_buf), 42, 4*ELEM_LINE);
// Start the test
writeTestCSR(0, 0);
<cpu_threads>
while (42 == finish_buf[0])
{
nanosleep(&pause, NULL);
}
for(int i=0; i<repeat; i++) {
if (valid_buf[i*ELEM_LINE]) {
valid_traces++;
<assert_test>
}
}
}
cout<< "I had " << valid_traces << " valid traces" << endl;
// cout << "Printing read registers" << endl;
// printArray(const_cast<uint64_t*>(read_registers_buf), ELEM_LINE, repeat);
// cout << "Printing valid registers" << endl;
// printArray(const_cast<uint64_t*>(valid_buf), ELEM_LINE, repeat);
std::chrono::high_resolution_clock::time_point time2 = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>( time2 - time1 ).count();
cout << "Test took " << (duration/1000) << " seconds" << endl;
cout << endl << endl;
uint64_t read_cnt = readTestCSR(4);
uint64_t write_cnt = readTestCSR(5);
uint64_t checked_read_cnt = readTestCSR(6);
uint64_t vl0_lines_n = readCommonCSR(CCI_TEST::CSR_COMMON_VL0_RD_LINES) +
readCommonCSR(CCI_TEST::CSR_COMMON_VL0_WR_LINES);
uint64_t vh0_lines_n = readCommonCSR(CCI_TEST::CSR_COMMON_VH0_LINES);
uint64_t vh1_lines_n = readCommonCSR(CCI_TEST::CSR_COMMON_VH1_LINES);
uint64_t va_req_lines_n = readCommonCSR(CCI_TEST::CSR_COMMON_VA_REQ_LINES);
uint64_t vl0_req_lines_n = readCommonCSR(CCI_TEST::CSR_COMMON_VL0_REQ_LINES);
uint64_t vh0_req_lines_n = readCommonCSR(CCI_TEST::CSR_COMMON_VH0_REQ_LINES);
uint64_t vh1_req_lines_n = readCommonCSR(CCI_TEST::CSR_COMMON_VH1_REQ_LINES);
cout << " VA REQ " << va_req_lines_n - va_req_lines
<< " : VL0 REQ " << vl0_req_lines_n - vl0_req_lines
<< " : VH0 REQ " << vh0_req_lines_n - vh0_req_lines
<< " : VH1 REQ " << vh1_req_lines_n - vh1_req_lines
<< endl;
cout << " "
<< " VL0 RSP " << vl0_lines_n - vl0_lines
<< " : VH0 RSP " << vh0_lines_n - vh0_lines
<< " : VH1 RSP " << vh1_lines_n - vh1_lines
<< endl;
// Reads CSRs to get some statistics
cout << "#" << endl
<< "# AFU frequency: " << getAFUMHz() << " MHz"
<< (hwIsSimulated() ? " [simulated]" : "")
<< endl;
if (!valid_test) {
cout << "Trace was not valid";
return 1;
}
// Stall the CPU so that it does not deallocate memory too soon
// pause.tv_sec = 2;
// nanosleep(&pause, NULL);
return 0;
}
uint64_t
<test_name>::testNumCyclesExecuted()
{
return totalCycles;
}
|
{"hexsha": "b7da8252d8eddef229feca502ed8a751920609b4", "size": 9808, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "backend/template/sw/template.cpp", "max_stars_repo_name": "diorga/harpy", "max_stars_repo_head_hexsha": "28daf9a223527588b9e1fdc3ff9c3e92f91a03a4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4.0, "max_stars_repo_stars_event_min_datetime": "2021-07-09T19:27:25.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-12T12:54:38.000Z", "max_issues_repo_path": "backend/template/sw/template.cpp", "max_issues_repo_name": "diorga/harpy", "max_issues_repo_head_hexsha": "28daf9a223527588b9e1fdc3ff9c3e92f91a03a4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "backend/template/sw/template.cpp", "max_forks_repo_name": "diorga/harpy", "max_forks_repo_head_hexsha": "28daf9a223527588b9e1fdc3ff9c3e92f91a03a4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1918819188, "max_line_length": 129, "alphanum_fraction": 0.6396818923, "num_tokens": 2636}
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Adapted from https://github.com/priba/nmp_qc
"""
utils.py: Functions to process dataset graphs.
Usage:
"""
from __future__ import print_function
import rdkit
import torch
from joblib import Parallel, delayed
import multiprocessing
import networkx as nx
import numpy as np
import shutil
import os
__author__ = "Pedro HC Avelar, Pau Riba, Anjan Dutta"
__email__ = "phcavelar@inf.ufrgs.br, priba@cvc.uab.cat, adutta@cvc.uab.cat"
def qm9_nodes(g, hydrogen=False):
h = []
for n, d in g.nodes(data=True):
h_t = []
# Atom type (One-hot H, C, N, O F)
h_t += [int(d['a_type'] == x) for x in ['H', 'C', 'N', 'O', 'F']]
# Atomic number
h_t.append(d['a_num'])
# Partial Charge
h_t.append(d['pc'])
# Acceptor
h_t.append(d['acceptor'])
# Donor
h_t.append(d['donor'])
# Aromatic
h_t.append(int(d['aromatic']))
# Hybradization
h_t += [int(d['hybridization'] == x) for x in [rdkit.Chem.rdchem.HybridizationType.SP, rdkit.Chem.rdchem.HybridizationType.SP2, rdkit.Chem.rdchem.HybridizationType.SP3]]
# If number hydrogen is used as a
if hydrogen:
h_t.append(d['num_h'])
h.append(h_t)
return h
def qm9_edges(g, e_representation='raw_distance'):
remove_edges = []
e={}
for n1, n2, d in g.edges(data=True):
e_t = []
# Raw distance function
if e_representation == 'chem_graph':
if d['b_type'] is None:
remove_edges += [(n1, n2)]
else:
e_t += [i+1 for i, x in enumerate([rdkit.Chem.rdchem.BondType.SINGLE, rdkit.Chem.rdchem.BondType.DOUBLE,
rdkit.Chem.rdchem.BondType.TRIPLE, rdkit.Chem.rdchem.BondType.AROMATIC])
if x == d['b_type']]
elif e_representation == 'distance_bin':
if d['b_type'] is None:
step = (6-2)/8.0
start = 2
b = 9
for i in range(0, 9):
if d['distance'] < (start+i*step):
b = i
break
e_t.append(b+5)
else:
e_t += [i+1 for i, x in enumerate([rdkit.Chem.rdchem.BondType.SINGLE, rdkit.Chem.rdchem.BondType.DOUBLE,
rdkit.Chem.rdchem.BondType.TRIPLE, rdkit.Chem.rdchem.BondType.AROMATIC])
if x == d['b_type']]
elif e_representation == 'raw_distance':
if d['b_type'] is None:
remove_edges += [(n1, n2)]
else:
e_t.append(d['distance'])
e_t += [int(d['b_type'] == x) for x in [rdkit.Chem.rdchem.BondType.SINGLE, rdkit.Chem.rdchem.BondType.DOUBLE,
rdkit.Chem.rdchem.BondType.TRIPLE, rdkit.Chem.rdchem.BondType.AROMATIC]]
else:
print('Incorrect Edge representation transform')
quit()
if e_t:
e[(n1, n2)] = e_t
for edg in remove_edges:
g.remove_edge(*edg)
return nx.to_numpy_matrix(g), e
def normalize_data(data, mean, std):
data_norm = (data-mean)/std
return data_norm
def get_values(obj, start, end, prop):
vals = []
for i in range(start, end):
v = {}
if 'degrees' in prop:
v['degrees'] = set(sum(obj[i][0][0].sum(axis=0, dtype='int').tolist(), []))
if 'edge_labels' in prop:
v['edge_labels'] = set(sum(list(obj[i][0][2].values()), []))
if 'target_mean' in prop or 'target_std' in prop:
v['params'] = obj[i][1]
vals.append(v)
return vals
def get_graph_stats(graph_obj_handle, prop='degrees'):
# if prop == 'degrees':
num_cores = multiprocessing.cpu_count()
inputs = [int(i*len(graph_obj_handle)/num_cores) for i in range(num_cores)] + [len(graph_obj_handle)]
res = Parallel(n_jobs=num_cores)(delayed(get_values)(graph_obj_handle, inputs[i], inputs[i+1], prop) for i in range(num_cores))
stat_dict = {}
if 'degrees' in prop:
stat_dict['degrees'] = list(set([d for core_res in res for file_res in core_res for d in file_res['degrees']]))
if 'edge_labels' in prop:
stat_dict['edge_labels'] = list(set([d for core_res in res for file_res in core_res for d in file_res['edge_labels']]))
if 'target_mean' in prop or 'target_std' in prop:
param = np.array([file_res['params'] for core_res in res for file_res in core_res])
if 'target_mean' in prop:
stat_dict['target_mean'] = np.mean(param, axis=0)
if 'target_std' in prop:
stat_dict['target_std'] = np.std(param, axis=0)
return stat_dict
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
pred = pred.type_as(target)
target = target.type_as(pred)
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def collate_g_concat_edge_data(batch):
g_M = lambda g: g[0][0]
g_n = lambda g: g_M(g).shape[0]
g_x = lambda g: g[0][1]
g_x_d = lambda g: len(g_x(g)[0])
g_e = lambda g: g[0][2]
g_m = lambda g: len(g_e(g))
g_e_keys = lambda g: list(g_e(g).keys())
g_e_values = lambda g: list(g_e(g).values())
g_e_d = lambda g: len(g_e_values(g)[0])
g_o = lambda g: g[1]
g_o_d = lambda g: len(g[1])
n_d, e_d, o_d = g_x_d(batch[0]), g_e_d(batch[0]), g_o_d(batch[0])
N = 0
M = 0
batch_size = len(batch)
for g in batch:
n = g_n(g)
m = g_m(g)
N += n
M += m
#end for
G = np.zeros([N, N])
B = np.zeros([N], dtype=np.int64)
X = np.zeros([N, n_d])
E_d = np.zeros([2*M, e_d])
E_src = np.zeros([2*M], dtype=np.int64)
E_tgt = np.zeros([2*M,2], dtype=np.int64)
Y = np.zeros([batch_size, o_d])
n_acc = 0
m_acc = 0
for b, g in enumerate(batch):
n = g_n(g)
G[n_acc:n_acc+n,n_acc:n_acc+n] = g_M(g)
B[n_acc:n_acc+n] = b
X[n_acc:n_acc+n,:] = g_x(g)
for edge_id, edge in enumerate(sorted(g_e_keys(g))):
src, tgt = edge
src_edge_id = m_acc+edge_id
E_d[src_edge_id,:] = g_e(g)[edge]
E_src[src_edge_id] = src
E_tgt[src_edge_id,:] = [tgt,src_edge_id]
tgt_edge_id = M+m_acc+edge_id
E_d[tgt_edge_id,:] = g_e(g)[edge]
E_src[tgt_edge_id] = tgt
E_tgt[tgt_edge_id] = [src,tgt_edge_id]
#end for
Y[b] = g_o(g)
n_acc+=n
m_acc+=g_m(g)
#end for
G = torch.FloatTensor(G)
B = torch.LongTensor(B)
X = torch.FloatTensor(X)
E_d = torch.FloatTensor(E_d)
E_src = torch.LongTensor(E_src)
E_tgt = torch.sparse.FloatTensor(torch.LongTensor(E_tgt.transpose()),torch.FloatTensor(np.ones(2*M)),torch.Size([N,2*M])).to_dense()
Y = torch.FloatTensor(Y)
return batch_size,G,B,X,E_d,E_src,E_tgt,Y
#end collate_g_concat
def collate_g_concat(batch):
g_M = lambda g: g[0][0]
g_n = lambda g: g_M(g).shape[0]
g_x = lambda g: g[0][1]
g_x_d = lambda g: len(g_x(g)[0])
g_e = lambda g: g[0][2]
g_m = lambda g: len(g_e(g))
g_e_keys = lambda g: list(g_e(g).keys())
g_e_values = lambda g: list(g_e(g).values())
g_e_d = lambda g: len(g_e_values(g)[0])
g_o = lambda g: g[1]
g_o_d = lambda g: len(g[1])
n_d, e_d, o_d = g_x_d(batch[0]), g_e_d(batch[0]), g_o_d(batch[0])
N = 0
M = 0
batch_size = len(batch)
for g in batch:
n = g_n(g)
m = g_m(g)
N += n
M += m
#end for
G = np.zeros([N, N])
B = np.zeros([N], dtype=np.int64)
X = np.zeros([N, n_d])
E_d = np.zeros([N, N, e_d])
E_i = np.zeros([M, 2], dtype=np.int64)
Y = np.zeros([batch_size, o_d])
n_acc = 0
for b, g in enumerate(batch):
n = g_n(g)
G[n_acc:n_acc+n,n_acc:n_acc+n] = g_M(g)
B[n_acc:n_acc+n] = b
X[n_acc:n_acc+n,:] = g_x(g)
for edge_id, edge in enumerate(sorted(g_e_keys(g))):
src, tgt = edge
E_i[edge_id,:] = [min(src,tgt),max(src,tgt)]
E_d[n_acc+src,n_acc+tgt,:] = g_e(g)[edge]
E_d[n_acc+tgt,n_acc+src,:] = g_e(g)[edge]
#end for
Y[b] = g_o(g)
n_acc+=n
#end for
G = torch.FloatTensor(G)
B = torch.LongTensor(B)
X = torch.FloatTensor(X)
E_d = torch.FloatTensor(E_d)
E_i = torch.LongTensor(E_i)
Y = torch.FloatTensor(Y)
return G,B,X,E_d,E_i,Y
#end collate_g_concat
def collate_g_concat_dict(batch):
g_M = lambda g: g[0][0]
g_n = lambda g: g_M(g).shape[0]
g_x = lambda g: g[0][1]
g_x_d = lambda g: len(g_x(g)[0])
g_e = lambda g: g[0][2]
g_e_keys = lambda g: list(g_e(g).keys())
g_e_values = lambda g: list(g_e(g).values())
g_e_d = lambda g: len(g_e_values(g)[0])
g_o = lambda g: g[1]
g_o_d = lambda g: len(g[1])
n_d, e_d, o_d = g_x_d(batch[0]), g_e_d(batch[0]), g_o_d(batch[0])
N = 0
batch_size = len(batch)
for g in batch:
M = g_M(g)
n = g_n(g)
N += n
#end for
G = np.zeros([N, N])
B = np.zeros([N], dtype=np.int64)
X = np.zeros([N, n_d])
E = {}
Y = np.zeros([batch_size, o_d])
n_acc = 0
print( "bla" )
for b, g in enumerate(batch):
n = g_n(g)
G[n_acc:n_acc+n,n_acc:n_acc+n] = g_M(g)
B[n_acc:n_acc+n] = b
X[n_acc:n_acc+n,:] = g_x(g)
for edge in g_e_keys(g):
src,tgt=edge
E[n_acc+src,n_acc+tgt] = g_e(g)[edge]
E[n_acc+tgt,n_acc+src] = g_e(g)[edge]
#end for
Y[b] = g_o(g)
n_acc+=n
#end for
G = torch.FloatTensor(G)
B = torch.LongTensor(B)
X = torch.FloatTensor(X)
print( "ble" )
for k in E.keys():
E[k] = torch.FloatTensor(E[k])
#end for
Y = torch.FloatTensor(Y)
return G,B,X,E,Y
#end collate_g_concat_dict
def collate_g(batch):
batch_sizes = np.max(np.array([[len(input_b[1]), len(input_b[1][0]), len(input_b[2]),
len(list(input_b[2].values())[0])]
if input_b[2] else
[len(input_b[1]), len(input_b[1][0]), 0,0]
for (input_b, target_b) in batch]), axis=0)
g = np.zeros((len(batch), batch_sizes[0], batch_sizes[0]))
h = np.zeros((len(batch), batch_sizes[0], batch_sizes[1]))
e = np.zeros((len(batch), batch_sizes[0], batch_sizes[0], batch_sizes[3]))
target = np.zeros((len(batch), len(batch[0][1])))
for i in range(len(batch)):
num_nodes = len(batch[i][0][1])
# Adjacency matrix
g[i, 0:num_nodes, 0:num_nodes] = batch[i][0][0]
# Node features
h[i, 0:num_nodes, :] = batch[i][0][1]
# Edges
for edge in batch[i][0][2].keys():
e[i, edge[0], edge[1], :] = batch[i][0][2][edge]
e[i, edge[1], edge[0], :] = batch[i][0][2][edge]
# Target
target[i, :] = batch[i][1]
g = torch.FloatTensor(g)
h = torch.FloatTensor(h)
e = torch.FloatTensor(e)
target = torch.FloatTensor(target)
return g, h, e, target
def save_checkpoint(state, is_best, directory):
if not os.path.isdir(directory):
os.makedirs(directory)
checkpoint_file = os.path.join(directory, 'checkpoint.pth')
best_model_file = os.path.join(directory, 'model_best.pth')
torch.save(state, checkpoint_file)
if is_best:
shutil.copyfile(checkpoint_file, best_model_file)
|
{"hexsha": "49d827b18611982ce6bdabdd882d794ee52b953a", "size": 12001, "ext": "py", "lang": "Python", "max_stars_repo_path": "QC/datasets/utils.py", "max_stars_repo_name": "phcavelar/graph-odenet", "max_stars_repo_head_hexsha": "cba1224c041e53ea221e31bf9103ef950b8bd460", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-12-10T18:49:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-16T03:21:30.000Z", "max_issues_repo_path": "QC/datasets/utils.py", "max_issues_repo_name": "phcavelar/graph-odenet", "max_issues_repo_head_hexsha": "cba1224c041e53ea221e31bf9103ef950b8bd460", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-11-04T04:41:09.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-07T18:52:37.000Z", "max_forks_repo_path": "QC/datasets/utils.py", "max_forks_repo_name": "phcavelar/graph-odenet", "max_forks_repo_head_hexsha": "cba1224c041e53ea221e31bf9103ef950b8bd460", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-04-03T12:05:33.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-10T11:57:48.000Z", "avg_line_length": 31.6649076517, "max_line_length": 177, "alphanum_fraction": 0.5483709691, "include": true, "reason": "import numpy,import networkx", "num_tokens": 3611}
|
# -*- coding: utf-8 -*-
# !/usr/bin/env python
#
# @file filter_items.py
# @brief filter_items for parser directory
# @author Hovakim Grabski
#
# <!--------------------------------------------------------------------------
# Copyright (c) 2016-2019,Hovakim Grabski.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the molmolpy Developers nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ------------------------------------------------------------------------ -->
import numpy as np
import scipy as sp
import pandas as pd
import difflib
from molmolpy.utils import helper as hlp
def filter_neighbour_frequency(frames_data_original, total_length = None):
frames_data = frames_data_original.copy()
test = 1
res_seq_frequency_dict = {}
# results_dict = {list(x.keys())[0]: x[list(x.keys())[0]] for x in results}
for frame in frames_data:
curr_data = frames_data[frame]
for res_seq in curr_data:
#print(res_seq)
if res_seq not in res_seq_frequency_dict.keys():
res_seq_frequency_dict.update({res_seq:curr_data[res_seq]})
res_seq_frequency_dict[res_seq].update({'frames':[frame]})
elif res_seq in res_seq_frequency_dict.keys():
res_seq_frequency_dict[res_seq]['freq'] += 1
res_seq_frequency_dict[res_seq]['frames'].append(frame)
frequency_dict = {}
for res_seq in res_seq_frequency_dict:
freq = res_seq_frequency_dict[res_seq]['freq']
print(freq)
frequency_dict.update({freq: res_seq_frequency_dict[res_seq]})
frequency_dict[freq].update({'nameVIP':res_seq})
if total_length is not None:
percentage = (freq * 100) / total_length
frequency_dict[freq].update({'percentage': percentage})
freq_stuff = sorted(list(frequency_dict.keys()))
print(freq_stuff[-10:-1])
test = 1
return res_seq_frequency_dict, frequency_dict, freq_stuff
def run_neighbour_analysis_parallel(neighbour_frame_index, topology, neighbours_data_frame):
frame_neighbours_freq = {neighbour_frame_index: {}}
for neighbours in neighbours_data_frame:
# info = traj.topology.atom(neighbours)
# info = traj_topology.atom(neighbours)
info = topology.iloc[neighbours]
res_name = info['resName']
res_seq = info['resSeq']
res_index = info['serial']
name_all = res_name + str(res_seq)
curr_frame_index = neighbour_frame_index
if name_all not in list(frame_neighbours_freq.keys()):
# print('yay')
frame_neighbours_freq[neighbour_frame_index].update({name_all: {'freq': 1, 'resName': res_name, 'resSeq': res_seq,
'resIndex': res_index, 'frameIndex': curr_frame_index}})
return frame_neighbours_freq
def run_neighbour_ligand_analysis_parallel(neighbour_frame_index, topology, neighbours_data_frame, ligand='QRC'):
frame_neighbours_freq = {neighbour_frame_index: {}}
for neighbours in neighbours_data_frame:
# info = traj.topology.atom(neighbours)
# info = traj_topology.atom(neighbours)
info = topology.iloc[neighbours]
res_name = info['resName']
res_seq = info['resSeq']
res_index = info['serial']
atom_name = info['name']
name_all = res_name + str(res_seq)
curr_frame_index = neighbour_frame_index
if res_name == ligand:
test = 1
frame_neighbours_freq[neighbour_frame_index].update(
{atom_name: {'freq': 1, 'resName': res_name, 'resSeq': res_seq,
'resIndex': res_index, 'frameIndex': curr_frame_index, 'atomName':atom_name}})
test = 1
# curr_frame_index = neighbour_frame_index
# if name_all not in list(frame_neighbours_freq.keys()):
# # print('yay')
# frame_neighbours_freq[neighbour_frame_index].update({name_all: {'freq': 1, 'resName': res_name, 'resSeq': res_seq,
# 'resIndex': res_index, 'frameIndex': curr_frame_index}})
return frame_neighbours_freq
@hlp.timeit
def filter_non_nan_blocks(dataframe, axis_name=None):
dictionary_data = {}
temp = []
for i in dataframe[axis_name]:
if not np.isnan(i):
temp.append(i)
else:
dictionary_data.update({str(len(temp)):temp})
temp = []
if len(temp) > 0:
dictionary_data.update({str(len(temp)): temp})
temp = []
test = 1
return dictionary_data
@hlp.timeit
def filter_similar_lists(list_of_lists):
data = np.copy(list_of_lists)
sorted_idx = np.lexsort(data.T)
sorted_data = data[sorted_idx, :]
# Get unique row mask
row_mask = np.append([True], np.any(np.diff(sorted_data, axis=0), 1))
# Get unique rows
out = sorted_data[row_mask]
return out
@hlp.timeit
def filter_similar_lists_slow(list_of_lists):
filtered_list = []
for array_list in list_of_lists:
if len(filtered_list) == 0:
filtered_list.append(array_list)
else:
for elem in filtered_list:
eq = np.array_equal(elem, array_list)
if eq is False:
filtered_list.append(array_list)
return filtered_list
@hlp.timeit
def filter_similar_lists_difflib(list_of_lists):
filtered_list = []
for array_list in list_of_lists:
if len(filtered_list) == 0:
filtered_list.append(array_list)
else:
for elem in filtered_list:
sm = difflib.SequenceMatcher(None, elem, array_list)
ratio = sm.ratio()
if ratio < 1.0:
filtered_list.append(array_list)
return filtered_list
|
{"hexsha": "d5e982a0c431d03dbcc687eb00e0387c3a0cf8d8", "size": 7280, "ext": "py", "lang": "Python", "max_stars_repo_path": "molmolpy/utils/filter_items.py", "max_stars_repo_name": "hovo1990/molmolpy", "max_stars_repo_head_hexsha": "30a825f0142ca0bd549896fb88fe908af47ad4de", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-10-19T19:35:15.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-19T19:35:15.000Z", "max_issues_repo_path": "molmolpy/utils/filter_items.py", "max_issues_repo_name": "hovo1990/molmolpy", "max_issues_repo_head_hexsha": "30a825f0142ca0bd549896fb88fe908af47ad4de", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "molmolpy/utils/filter_items.py", "max_forks_repo_name": "hovo1990/molmolpy", "max_forks_repo_head_hexsha": "30a825f0142ca0bd549896fb88fe908af47ad4de", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.8604651163, "max_line_length": 128, "alphanum_fraction": 0.646978022, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1599}
|
# -*- coding: utf-8 -*-
from keras import initializations
from keras.engine import Layer
from keras import backend as K
import librosa
import numpy as np
from . import backend
# Todo: Filterbank(); init with mel, log, linear, etc.
# not parameterised, just a matrix.
class Filterbank(Layer):
'''Filterbank assumes a 2d input, i.e., `(None, n_ch, n_freqs, n_time)` (theano).
# Shapes
input_shape: (None, n_ch, n_freqs, n_time)
output_shape: (None, n_ch, n_mels, n_time)
# Arguments
n_fbs: integer, number of filterbanks
sr: integer, sampling rate, used to initialize freq_to_mel
init: string. if 'mel', init with mel center frequencies and stds.
fmin = float, min frequency fb.
fmax = float. max frequency in fb.
trainable_fb: bool
'''
def __init__(self, n_fbs, trainable_fb, sr=None, init='mel', fmin=0., fmax=None,
bins_per_octave=12, **kwargs):
''' TODO: is sr necessary? is fmax necessary? init with None? '''
self.supports_masking = True
self.n_fbs = n_fbs
assert init in ('mel', 'log', 'linear', 'uni_random')
if fmax is None:
self.fmax = sr / 2.
if init in ('mel', 'log'):
assert sr is not None
self.bins_per_octave = bins_per_octave
self.sr = sr
self.trainable_fb = trainable_fb
super(Filterbank, self).__init__(**kwargs)
def build(self, input_shape):
if self.dim_ordering == 'th':
self.n_ch = input_shape[1]
self.n_freq = input_shape[2]
self.n_time = input_shape[3]
else:
self.n_ch = input_shape[3]
self.n_freq = input_shape[1]
self.n_time = input_shape[2]
if self.init == 'mel':
self.filterbank = backend.filterbank_mel(sr=self.sr,
n_freq=self.n_freq,
n_mels=self.n_fbs,
fmin=self.fmin,
fmax=self.fmax)
elif self.init == 'log':
self.filterbank = backend.filterbank_log(sr=sr,
n_freq=self.n_freq,
n_bins=self.n_fbs,
bins_per_octave=self.bins_per_octave,
fmin=self.fmin)
if self.trainable_fb == True:
self.trainable_weights.append(self.filterbank)
else:
self.non_trainable_weights.append(self.filterbank)
self.built = True
def get_output_shape_for(self, input_shape):
if self.dim_ordering == 'th':
return (input_shape[0], self.n_ch, self.n_fbs, self.n_time)
else:
return (input_shape[0], self.n_fbs, self.n_time, self.n_ch)
def call(self, x, mask=None):
if self.dim_ordering == 'th':
x = K.permute_dimensions(x, [0, 1, 3, 2])
else:
x = K.permute_dimensions(x, [0, 3, 2, 1])
output = K.dot(x, self.filterbank)
if self.dim_ordering == 'th':
return K.permute_dimensions(output, [0, 1, 3, 2])
else:
return K.permute_dimensions(output, [0, 3, 2, 1])
def get_config(self):
config = {'n_fbs': self.n_fbs,
'sr': self.sr,
'init': self.init,
'fmin': self.fmin,
'fmax': self.fmax,
'bins_per_octave': self.bins_per_octave,
'trainable_fb': self.trainable_fb}
base_config = super(Filterbank, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
{"hexsha": "17b42c2fad38d4494d1a1034a2d2da7f4ce1ac71", "size": 3881, "ext": "py", "lang": "Python", "max_stars_repo_path": "extractor/lib/kapre/kapre/filterbank.py", "max_stars_repo_name": "rtjfarrimond/spotify-recommender", "max_stars_repo_head_hexsha": "798a0b2bb500fcc1a7165071b8d9583b69a57ae0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-09T13:18:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-09T13:18:57.000Z", "max_issues_repo_path": "extractor/lib/kapre/kapre/filterbank.py", "max_issues_repo_name": "rtjfarrimond/spotify-recommender", "max_issues_repo_head_hexsha": "798a0b2bb500fcc1a7165071b8d9583b69a57ae0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "extractor/lib/kapre/kapre/filterbank.py", "max_forks_repo_name": "rtjfarrimond/spotify-recommender", "max_forks_repo_head_hexsha": "798a0b2bb500fcc1a7165071b8d9583b69a57ae0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.9351851852, "max_line_length": 90, "alphanum_fraction": 0.5243493945, "include": true, "reason": "import numpy", "num_tokens": 921}
|
import odradio.viewfactors.viewfactors as vf
from odradio.viewfactors.viewfactors import Point,Segment
import numpy as np
from numpy import linalg as LA
from collections import namedtuple
eps = np.finfo(float).eps
Regiondata = namedtuple('Regiondata', ['name', 'start', 'end','area', 'source', 'issource'], verbose=False)
def jacobi(A,b,Nmax,x=None):
if x is None:
x = np.zeros(len(A[0]))
D = np.diag(A)
R = A - np.diagflat(D)
nb = LA.norm(b)
for i in range(Nmax):
res = b - np.dot(A,x)
nres = LA.norm(res)
if nres/nb < 1e-13:
return x, i
x = (b - np.dot(R,x)) / D
return x, Nmax
class Simulator:
def __init__(self, name):
self.name = name
self.regiondata = []
self.region = []
self.source = []
self.stick = []
self.n = None
self.F = None
self.rho = None
self.src = None
self.s = None
self.RHO = None
self.RHOF_T = None
self.A = None
self.rhs = None
self.area = None
self.abs = None
self.absn = None
self.absnsrc = None
self.rc = None
self.zc = None
self.dirn = None
self.dirnsrc = None
def AddRegion(self, name, start, end, sticking, source, nelem):
# create Segment
areatotal = vf.Area(Segment(start,end))
self.regiondata.append(Regiondata(name=name,start=start,end=end,area=areatotal,source=source,issource=True if source>0 else False))
r = np.linspace(start.r,end.r,nelem+1)
z = np.linspace(start.z,end.z,nelem+1)
reg = []
for i in range(nelem):
reg.append(Segment(Point(z[i],r[i]),Point(z[i+1],r[i+1])) )
src = []
for s in reg:
area = vf.Area(s)
src.append(area/areatotal*source)
self.source.append(src)
self.region.append(reg)
self.stick.append(list(np.ones(nelem)*sticking))
def SetupLSE(self):
# problem size
n = sum(len(r) for r in self.region)
self.n = n
# compute F
F = np.zeros(shape=(n,n))
area = np.zeros(shape=(n))
rc = np.zeros(shape=(n))
zc = np.zeros(shape=(n))
for ri, ra in enumerate(self.region):
ioff = sum(len(r) for r in self.region[0:ri])
for i, _ in enumerate(ra):
area[ioff+i] = vf.Area(ra[i])
rc[ioff+i] = vf.rcenter(ra[i])
zc[ioff+i] = vf.zcenter(ra[i])
for rj, rb in enumerate(self.region):
joff = sum(len(r) for r in self.region[0:rj])
for j, _ in enumerate(rb):
if joff+j>ioff+i:
continue
else:
F[ioff+i,joff+j],F[joff+j,ioff+i] = vf.ViewFactor(ra[i],rb[j])
self.F = F
self.area = area
self.rc = rc
self.zc = zc
# compute source
src = []
for s in self.source:
for e in s:
src.append(e)
self.src = np.array(src)
# compute stick
s = []
for st in self.stick:
for e in st:
s.append(e)
self.s = np.array(s)
self.rho = (1-self.s)
# compute A
self.RHO = np.diag(self.rho)
I = np.identity(n)
self.RHOF_T = np.dot(self.RHO,F).T
self.A = np.asarray(I-self.RHOF_T)
# compute RHS
self.rhs = np.dot(F.T,src)
# check if geometry is closed\
if np.abs(np.sum(np.sum(F,axis=1))-n)>n*1e-8:
raise ValueError('Geometry is not closed')
def Simulate(self):
#self.rec = LA.solve(self.A, self.rhs)
#nmax = 5
self.rec, nmax = jacobi(self.A,self.rhs,1000)
print(nmax)
self.abs = self.rec * self.s
if abs(sum(self.abs)-sum(self.src))>1e-6*sum(self.src):
raise ValueError('absorbed enegery does not match source energy')
self.absn = self.abs/self.area
srcregion = next(r for r in self.regiondata if r.issource)
srcnorm = srcregion.source/srcregion.area
self.absnsrc = self.absn/(srcnorm * self.s)
self.dirn = self.rhs/self.area
self.dirnsrc = (self.dirn * self.s)/(srcnorm * self.s)
return nmax
def ExtractResults(self,name):
ri = [i for i,x in enumerate(self.regiondata) if x.name == name]
if len(ri)>1:
raise ValueError('more than one region with than name')
st = sum(len(r) for r in self.region[0:ri[0]])
en = st + len(self.region[ri[0]])
return st, en, ri[0]
|
{"hexsha": "54d4ac2670e3e3c4d0081a84db4435b9b8dbcddc", "size": 4727, "ext": "py", "lang": "Python", "max_stars_repo_path": "radiosity/odradio/simulation/simulation.py", "max_stars_repo_name": "hptcad/radiosity", "max_stars_repo_head_hexsha": "c4a7dba3acd42c4954880d5c7314c2cda14fdd1b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "radiosity/odradio/simulation/simulation.py", "max_issues_repo_name": "hptcad/radiosity", "max_issues_repo_head_hexsha": "c4a7dba3acd42c4954880d5c7314c2cda14fdd1b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "radiosity/odradio/simulation/simulation.py", "max_forks_repo_name": "hptcad/radiosity", "max_forks_repo_head_hexsha": "c4a7dba3acd42c4954880d5c7314c2cda14fdd1b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.503649635, "max_line_length": 139, "alphanum_fraction": 0.5229532473, "include": true, "reason": "import numpy,from numpy", "num_tokens": 1290}
|
import unittest
import numpy as np
from model_training.regression.multi_output.multi_output_regression import MultiOutputRegression
class TestMultiOutputRegression(unittest.TestCase):
mock_classifier_index = {
0: ('additional_indemnity_money', 'int'),
1: ('declares_housing_inhabitable', 'bool'),
2: ('declares_resiliation_is_correct', 'bool'),
3: ('orders_expulsion', 'bool'),
4: ('orders_immediate_execution', 'bool'),
5: ('orders_resiliation', 'bool'),
6: ('orders_tenant_pay_first_of_month', 'bool'),
7: ('rejects_landlord_demand', 'bool'),
8: ('rejects_tenant_demand', 'bool'),
9: ('tenant_ordered_to_pay_landlord', 'int'),
10: ('additional_indemnity_date', 'int'),
11: ('tenant_ordered_to_pay_landlord_legal_fees', 'int')
}
mock_facts = [1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1]
mock_outcomes = np.array([1, 0, 1, 1, 0, 1, 0, 0, 0, 0])
def test_predict(self):
regression_model = MultiOutputRegression()
regression_model.months_unpaid_index = 1
regression_model.monthly_payment_index = 1
outcomes = regression_model.predict(self.mock_facts, self.mock_outcomes)
for i in range(len(outcomes)):
self.assertEqual(self.mock_outcomes[i], outcomes[i])
|
{"hexsha": "9f33d24e3b55b47b160b7276980abbd73c8bd9f1", "size": 1319, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/ml_service/model_training/regression/multi_output/multi_output_regression_test.py", "max_stars_repo_name": "arekmano/JusticeAI", "max_stars_repo_head_hexsha": "a996a8903f7d05d5b5c275a1a394d0f55664f299", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-06-11T21:35:14.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-11T21:35:14.000Z", "max_issues_repo_path": "src/ml_service/model_training/regression/multi_output/multi_output_regression_test.py", "max_issues_repo_name": "mihaiqc/JusticeAI", "max_issues_repo_head_hexsha": "311a080112b17cfb50b445df1a803c6a320c5405", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-09-24T22:54:09.000Z", "max_issues_repo_issues_event_max_datetime": "2017-09-24T22:54:09.000Z", "max_forks_repo_path": "src/ml_service/model_training/regression/multi_output/multi_output_regression_test.py", "max_forks_repo_name": "mihaiqc/JusticeAI", "max_forks_repo_head_hexsha": "311a080112b17cfb50b445df1a803c6a320c5405", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.7941176471, "max_line_length": 96, "alphanum_fraction": 0.6588324488, "include": true, "reason": "import numpy", "num_tokens": 366}
|
SUBROUTINE DDR2
C
C DYNAMIC DATA RECOVERY--PART 2 --MODE ACCELERATION
C
C DMAP SEQUENCE
C
C INPUTS = 9
C
C USETD,VUD,PD,K2DD,B2DD,MDD,FRL,LLL,DM
C
C OUTPUTS = 3
C
C UAV,UEV,PAF
C
C SCRATCHES = 6
C
C PARAMETERS 1 BCD, 3INTEGERS
C
INTEGER USETD,PD,B2DD,FRL,DM,UAV,PAF,
1 SCR2,SCR3,SCR4,SCR5,SCR6,SCR7,
2 TYPE,REACT,TRAN,USET,VUD,PAD,UEV,PL
COMMON /BLANK / TYPE(2),NOUE,REACT,FRQSET
COMMON /BITPOS/ UM,UO,UR,USG,USB,UL,UA,UF,US,UN,UG,UE,UP,UNE,UFE,
1 UD
COMMON /PATX / LC,N,NO,N4,USET
COMMON /ZZZZZZ/ CORE(1)
DATA USETD , VUD, PD, K2DD,B2DD,MDD,FRL,LLL,DM /
1 101 , 102,103, 104, 105,106,107,108,109 /
DATA UAV , UEV, PAF, TRAN /
1 201 , 202, 203, 4HTRAN /
DATA SCR2 , SCR3,SCR4,SCR5,SCR6,SCR7,PAD /
1 302 , 303 , 304, 305, 306, 301,302 /
C
C
LC = KORSZ(CORE)
VUD = 102
SCR7 = 301
USET = USETD
PL = SCR6
ISOL = SCR7
IF (NOUE .GE. 0) GO TO 10
PAD = PAF
10 CONTINUE
IF (TYPE(1) .NE. TRAN) SCR7 = UAV
IF (TYPE(1).NE.TRAN .AND. REACT.LT.0 .AND. NOUE.GE.0) SCR7 = VUD
C
C MODE ACCELERATION
C
C FORM PAD
C
C
CALL DDR1A (PD,K2DD,B2DD,MDD,VUD,PAD,FRL,FRQSET,SCR3,SCR4,SCR5,
1 SCR6,TYPE(1),SCR7)
C
C DISP ON SCR7 IN TRANSIENT
C
IF (NOUE .LT. 0) GO TO 50
CALL CALCV (SCR3,UD,UA,UE,CORE(1))
CALL SSG2A (VUD,SCR4,UEV,SCR3)
C
C UA IS ON SCR4
C
VUD = SCR4
C
C BREAK UP PAD
C
CALL SSG2A (PAD,PAF,SCR5,SCR3)
50 IF (REACT .GE. 0) GO TO 90
C
C UR NULL
C
IF (TYPE(1) .NE. TRAN) SCR7 = ISOL
IF (TYPE(1).NE.TRAN .AND. NOUE.LT.0) SCR7 = UAV
CALL SSG3A (0,LLL,PAF,SCR7,SCR3,SCR6,-1,0)
60 IF (TYPE(1) .NE. TRAN) GO TO 80
C
C MERGE RECALCULATED SOLUTIONS AND ACCEL AND VELOCITY
C
ISOL = UAV
IF (NOUE .LT. 0) GO TO 70
ISOL = SCR5
70 CALL DDR1B (VUD,SCR7,ISOL)
C
C BUILD UP TO DSIZE ADDING IN UEV
C
80 IF (NOUE .LT. 0) GO TO 30
CALL SDR1B (SCR4,ISOL,UEV,UAV,UD,UA,UE,USETD,0,0)
30 RETURN
C
C FREE BODY PROBLEM
C
90 CALL CALCV (SCR3,UA,UL,UR,CORE(1))
C
C PARTITION PAF AND UA
C
CALL SSG2A (PAF,PL,SCR5,SCR3)
IVEC = VUD
IF (TYPE(1) .EQ. TRAN) IVEC = SCR7
CALL SSG2A (IVEC,SCR2,SCR5,SCR3)
C
C UR IS ON SCR5
C
CALL SSG3A (0,LLL,PL,SCR3,SCR2,SCR6,-1,0)
CALL SSG2B (DM,SCR5,SCR3,SCR4,0,2,1,SCR6)
CALL SDR1B (SCR3,SCR4,SCR5,SCR7,UA,UL,UR,USETD,0,0)
GO TO 60
END
|
{"hexsha": "2bd97beaeb42aaea793fbbe1885aa5df5f5acf90", "size": 2726, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "mis/ddr2.f", "max_stars_repo_name": "ldallolio/NASTRAN-95", "max_stars_repo_head_hexsha": "6d2c175f5b53ebaec4ba2b5186f7926ef9d0ed47", "max_stars_repo_licenses": ["NASA-1.3"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2016-01-09T14:33:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-18T11:51:42.000Z", "max_issues_repo_path": "mis/ddr2.f", "max_issues_repo_name": "gassive/NASTRAN95", "max_issues_repo_head_hexsha": "98cb3acaa7990d639360601648498834c7782056", "max_issues_repo_licenses": ["NASA-1.3"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2016-01-17T07:30:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-06T19:37:44.000Z", "max_forks_repo_path": "mis/ddr2.f", "max_forks_repo_name": "gassive/NASTRAN95", "max_forks_repo_head_hexsha": "98cb3acaa7990d639360601648498834c7782056", "max_forks_repo_licenses": ["NASA-1.3"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2017-04-07T20:51:33.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-04T14:16:01.000Z", "avg_line_length": 25.2407407407, "max_line_length": 72, "alphanum_fraction": 0.5385179751, "num_tokens": 1205}
|
# -*- coding: utf8 -*-
# # #
# Downscale PCMDI AR5 data to a pre-processed climatology
# extent, resolution, reference system
#
# ::NOTE:: this version of DeltaDownscale is built for tmin/tmax
# data where our old method causes for the mins and max and means to
# cross each other in non-normal ways.
#
# Author: Michael Lindgren (malindgren@alaska.edu)
# # #
from downscale import DeltaDownscale, utils
import os, rasterio
import numpy as np
import xarray as xr
def delta_mm( fn, mean_fn, variable, mean_variable='tas' ):
'''
simple way to compute extreme - mean deltas as
native model resolution and write to NetCDF4 on disk
'''
ds = xr.open_dataset( fn )[ variable ]
ds_mean = xr.open_dataset( mean_fn )[ mean_variable ]
delta = ds - ds_mean
return delta.to_dataset( name=variable )
class DeltaDownscaleMinMax( DeltaDownscale ):
def __init__( self, mean_ds=None, mean_variable=None, *args, **kwargs ):
'''
note here that all data falls into the 'historical' category, because we no longer need to
have the 1961-1990 climatology period for the futures as this version of DeltaDownscale computes
deltas by removing the mean in time instead of removing the climatology.
'''
# if there is no mean dataset to work with --> party's over
print('mean:{}'.format(mean_ds))
if mean_ds is None:
raise Exception( 'you must include the mean variable in the raw resolution \
as arg `mean_ds`=downscale.Dataset object or use `DeltaDownscale`' )
# setup new args
self.mean_ds = mean_ds.ds[ mean_variable ] # new
self.mean_variable = mean_variable
print( kwargs.keys() )
# force a false for interpolation of NA's with Super...
if 'interp' in kwargs.keys():
print( 'kwargs-interpval:{}'.format(kwargs['interp']) )
interp_val = kwargs.pop( 'interp' )
kwargs.update( interp=False )
print( 'interp_val:{}'.format(interp_val) )
# remove the super call since this is python2 and it suuucks...
# DeltaDownscale.__init__( self, **kwargs )
super( DeltaDownscaleMinMax, self ).__init__( **kwargs )
# print('finished super()!')
if 'interp' in kwargs:
# now reset the interpolation value so we can interpolate the anomalies
# INSTEAD of interpolating the input data series... This may be better practice overall.
# NOT changing it.
self.interp = interp_val
# mask some properties from the super() class. that are unneeded.
self.anomalies = None
self.clim_begin = None
self.clim_end = None
# calc deltas between the mean and the extreme data set
print( 'calc anoms minmax' )
self._calc_anomalies()
# # TESTING
# print('type_mean_ds: {} '.format( type( self.mean_ds ) ) )
print( 'self.interp: {}'.format(self.interp) )
if self.interp == True:
print( 'running interpolation across NAs -- base resolution -- !ANOMALIES! dataset' )
self.interp_na( )
def _calc_climatolgy( self ):
''' MASK THIS FOR MINMAX slice / aggregate to climatology using mean'''
self.climatology = None
def _calc_anomalies( self ):
''' calculate deltas but call them anomalies to fit the `downscale` pkg methods '''
if self.downscaling_operation == 'add':
print( 'calc_anom minmax version' )
# anomalies = (self.historical.ds[ self.historical.variable ] - self.mean_ds.ds[ self.mean_variable ] ) #.to_dataset( name=variable )
self.anomalies = (self.ds - self.mean_ds ) #.to_dataset( name=variable )
elif self.downscaling_operation == 'mult':
# anomalies = (self.historical.ds[ self.historical.variable ] / self.mean_ds.ds[ self.mean_variable ] ) #.to_dataset( name=variable )
self.anomalies = (self.ds / self.mean_ds ) #.to_dataset( name=variable )
else:
NameError( '_calc_anomalies (ar5): value of downscaling_operation must be "add" or "mult" ' )
# self.mean_ds = None # watch this one... trying to save on RAM...
# def _interp_na_mean( self ):
# '''
# np.float32
# method = [str] one of 'cubic', 'near', 'linear'
# return a list of dicts to pass to the xyz_to_grid in parallel
# '''
# from copy import copy
# import pandas as pd
# import numpy as np
# from pathos.mp_map import mp_map
# # remove the darn scientific notation
# np.set_printoptions( suppress=True )
# output_dtype = np.float32
# # if 0-360 leave it alone
# if ( self.mean_ds.lon > 200.0 ).any() == True:
# dat, lons = self.mean_ds.data, self.mean_ds.lon
# self._lonpc = lons
# else:
# # greenwich-centered rotate to 0-360 for interpolation across pacific
# dat, lons = self.utils.rotate( self.mean_ds.values, self.mean_ds.lon, to_pacific=True )
# self._rotated = True # update the rotated attribute
# self._lonpc = lons
# # mesh the lons and lats and unravel them to 1-D
# xi, yi = np.meshgrid( self._lonpc, self.mean_ds.lat.data )
# lo, la = [ i.ravel() for i in (xi,yi) ]
# # setup args for multiprocessing
# df_list = [ pd.DataFrame({ 'x':lo, 'y':la, 'z':d.ravel() }).dropna( axis=0, how='any' ) for d in dat ]
# args = [ {'x':np.array(df['x']), 'y':np.array(df['y']), 'z':np.array(df['z']), \
# 'grid':(xi,yi), 'method':self.historical.method, 'output_dtype':output_dtype } for df in df_list ]
# print( 'processing interpolation to convex hull in parallel using {} cpus.'.format( self.ncpus ) )
# dat_list = mp_map( self.wrap, args, nproc=self.ncpus )
# dat_list = [ np.array(i) for i in dat_list ] # drop the output mask
# dat = np.array( dat_list )
# lons = self._lonpc
# if self._rotated == True: # rotate it back
# dat, lons = self.utils.rotate( dat, lons, to_pacific=False )
# # place back into a new xarray.Dataset object for further processing
# # self.mean_ds = self.mean_ds.update( { self.historical.variable:( ['time','lat','lon'], dat ) } )
# self.mean_ds.data = dat
# print( 'ds interpolated updated into self.mean_ds' )
# return 1
def interp_na( self ):
'''
np.float32
method = [str] one of 'cubic', 'near', 'linear'
return a list of dicts to pass to the xyz_to_grid in parallel
'''
from copy import copy
import pandas as pd
import numpy as np
from pathos.mp_map import mp_map
# remove the darn scientific notation
np.set_printoptions( suppress=True )
output_dtype = np.float32
# if 0-360 leave it alone
if ( np.array(self.anomalies.lon) > 200.0 ).any() == True:
dat, lons = np.array(self.anomalies.data), np.array(self.anomalies.lon)
self._lonpc = lons
else:
# greenwich-centered rotate to 0-360 for interpolation across pacific
dat, lons = self.utils.rotate( np.array(self.anomalies.values), np.array(self.anomalies.lon), to_pacific=True )
self._rotated = True # update the rotated attribute
self._lonpc = lons
# mesh the lons and lats and unravel them to 1-D
xi, yi = np.meshgrid( self._lonpc, self.anomalies.lat.data )
lo, la = [ i.ravel() for i in (xi,yi) ]
# setup args for multiprocessing
df_list = [ pd.DataFrame({ 'x':lo, 'y':la, 'z':d.ravel() }).dropna( axis=0, how='any' ) for d in dat ]
args = [ {'x':np.array(df['x']), 'y':np.array(df['y']), 'z':np.array(df['z']), \
'grid':(xi,yi), 'method':self.historical.method, 'output_dtype':output_dtype } for df in df_list ]
print( 'processing interpolation to convex hull in parallel using {} cpus.'.format( self.ncpus ) )
dat_list = mp_map( self.wrap, args, nproc=self.ncpus )
dat_list = [ np.array(i) for i in dat_list ] # drop the output mask
dat = np.array( dat_list )
lons = self._lonpc
if self._rotated == True: # rotate it back
dat, lons = self.utils.rotate( dat, lons, to_pacific=False )
# place back into a new xarray.Dataset object for further processing
# self.anomalies = self.anomalies.update( { self.historical.variable:( ['time','lat','lon'], dat ) } )
self.anomalies.data = dat
print( 'anomalies interpolated updated into self.anomalies' )
return 1
def downscale( self, output_dir, prefix=None ):
'''
updated version of downscale function to mask the non-minmax version and how
it works with baseline climatology vs. the full mean series as with the min/max
'''
import affine, rasterio
from affine import Affine
import itertools
from functools import partial
from pathos.mp_map import mp_map
operation_switch = { 'add':self.utils.add, 'mult':self.utils.mult }
def two_digit_month( x ):
''' make 1 digit month a standard 2-digit for output filenames '''
month = str( x )
if len(month) == 1:
month = '0'+month
return month
time_suffix = [ '_'.join([two_digit_month( t.month ), str(t.year)]) for t in self.anomalies.time.to_pandas() ]
# handle missing variable / model names
if self.varname != None:
variable = self.varname
elif self.historical.variable != None:
variable = self.historical.variable
else:
variable = 'variable'
if self.modelname != None:
model = self.modelname
elif self.historical.model != None:
model = self.historical.model
else:
model = 'model'
output_filenames = [ os.path.join( output_dir, '_'.join([variable, self.historical.metric, self.historical.units, \
self.historical.project, model, self.historical.scenario, ts]) + '.tif') for ts in time_suffix ]
# if there is a specific name prefix, use it
if prefix != None:
output_filenames = [ os.path.join( output_dir, '_'.join([prefix, ts]) + '.tif' ) for ts in time_suffix ]
# rotate to pacific-centered
if ( self.anomalies.lon.data > 200.0 ).any() == True:
dat, lons = ( np.array(self.anomalies), np.array(self.anomalies.lon) )
self.anomalies_rot = dat
src_transform = self.historical.transform_from_latlon( self.ds.lat, lons )
# print( 'anomalies NOT rotated!' )
else:
dat, lons = self.utils.shiftgrid( 0., np.array(self.anomalies), np.array(self.anomalies.lon) )
self.anomalies_rot = dat
src_transform = self.historical.transform_from_latlon( self.ds.lat, lons )
# print( 'anomalies rotated!' )
# # # # # #TSSTING STUFF
# count, height, width = dat.shape
# meta = { 'dtype':'float32', 'driver':'GTiff', 'count':1, 'width':width, 'height':height, 'compress':'lzw', 'affine':src_transform }
# with rasterio.open( '/workspace/UA/malindgren/temporary/TEST_REGRID.tif', 'w', **meta ) as out:
# out.write( dat[0].astype( np.float32 ), 1 )
# # # # # # # # # # # #
# # # IMPORTANT: list all files since it without a REPEAT since it is tasmin/max...
rstlist = self.baseline.filelist
if isinstance( self.anomalies_rot, xr.Dataset ):
self.anomalies_rot = self.anomalies_rot[ self.historical.variable ].data
elif isinstance( self.anomalies_rot, xr.DataArray ):
self.anomalies_rot = self.anomalies_rot.data
else:
self.anomalies_rot = self.anomalies_rot
args = zip( self.anomalies_rot, rstlist, output_filenames )
args = [{'anom':i, 'base':j, 'output_filename':k,\
'downscaling_operation':self.downscaling_operation, \
'post_downscale_function':self.post_downscale_function,\
'mask':self.mask, 'mask_value':self.mask_value } for i,j,k in args ]
# partial and wrapper
f = partial( self.utils.interp_ds, src_crs=self.src_crs, src_nodata=None, \
dst_nodata=None, src_transform=src_transform, resample_type=self.resample_type )
run = partial( self.utils._run_ds, f=f, operation_switch=operation_switch, anom=self.anom, mask_value=self.mask_value )
# run it
out = mp_map( run, args, nproc=self.ncpus )
return output_dir
# @staticmethod
# def interp_ds( anom, base, src_crs, src_nodata, dst_nodata, src_transform, resample_type='bilinear',*args, **kwargs ):
# '''
# anom = [numpy.ndarray] 2-d array representing a single monthly timestep of the data to be downscaled.
# Must also be representative of anomalies.
# base = [str] filename of the corresponding baseline monthly file to use as template and downscale
# baseline for combining with anomalies.
# src_transform = [affine.affine] 6 element affine transform of the input anomalies. [should be greenwich-centered]
# resample_type = [str] one of ['bilinear', 'count', 'nearest', 'mode', 'cubic', 'index', 'average', 'lanczos', 'cubic_spline']
# '''
# import rasterio
# from rasterio.warp import reproject, RESAMPLING
# from affine import Affine
# resampling = {'average':RESAMPLING.average,
# 'cubic':RESAMPLING.cubic,
# 'lanczos':RESAMPLING.lanczos,
# 'bilinear':RESAMPLING.bilinear,
# 'cubic_spline':RESAMPLING.cubic_spline,
# 'mode':RESAMPLING.mode,
# 'count':RESAMPLING.count,
# 'index':RESAMPLING.index,
# 'nearest':RESAMPLING.nearest }
# # # lets try to flip the data and affine and do this right.
# # a,b,c,d,e,f,g,h,i = src_transform
# # src_transform = Affine( a, b, c, d, -(e), np.abs(f) ) # DANGEROUS
# # anom = np.flipud( anom )
# # # end new stuff for flipping... <-- this should happen before the anoms and the src_transform get to this point.
# base = rasterio.open( base )
# baseline_arr = base.read( 1 )
# baseline_meta = base.meta
# baseline_meta.update( compress='lzw' )
# output_arr = np.empty_like( baseline_arr )
# reproject( anom, output_arr, src_transform=src_transform, src_crs=src_crs, src_nodata=src_nodata,
# dst_transform=baseline_meta['affine'], dst_crs=baseline_meta['crs'],
# dst_nodata=dst_nodata, resampling=resampling[ resample_type ], SOURCE_EXTRA=1000 )
# return output_arr
# def sort_files( files, split_on='_', elem_month=-2, elem_year=-1 ):
# '''
# sort a list of files properly using the month and year parsed
# from the filename. This is useful with SNAP data since the standard
# is to name files like '<prefix>_MM_YYYY.tif'. If sorted using base
# Pythons sort/sorted functions, things will be sorted by the first char
# of the month, which makes thing go 1, 11, ... which sucks for timeseries
# this sorts it properly following SNAP standards as the default settings.
# ARGUMENTS:
# ----------
# files = [list] list of `str` pathnames to be sorted by month and year. usually from glob.glob.
# split_on = [str] `str` character to split the filename on. default:'_', SNAP standard.
# elem_month = [int] slice element from resultant split filename list. Follows Python slicing syntax.
# default:-2. For SNAP standard.
# elem_year = [int] slice element from resultant split filename list. Follows Python slicing syntax.
# default:-1. For SNAP standard.
# RETURNS:
# --------
# sorted `list` by month and year ascending.
# '''
# import pandas as pd
# months = [ int(fn.split('.')[0].split( split_on )[elem_month]) for fn in files ]
# years = [ int(fn.split('.')[0].split( split_on )[elem_year]) for fn in files ]
# df = pd.DataFrame( {'fn':files, 'month':months, 'year':years} )
# df_sorted = df.sort_values( ['year', 'month' ] )
# return df_sorted.fn.tolist()
# def only_years( files, begin=1901, end=2100, split_on='_', elem_year=-1 ):
# '''
# return new list of filenames where they are truncated to begin:end
# ARGUMENTS:
# ----------
# files = [list] list of `str` pathnames to be sorted by month and year. usually from glob.glob.
# begin = [int] four digit integer year of the begin time default:1901
# end = [int] four digit integer year of the end time default:2100
# split_on = [str] `str` character to split the filename on. default:'_', SNAP standard.
# elem_year = [int] slice element from resultant split filename list. Follows Python slicing syntax.
# default:-1. For SNAP standard.
# RETURNS:
# --------
# sliced `list` to begin and end year.
# '''
# import pandas as pd
# years = [ int(fn.split('.')[0].split( split_on )[elem_year]) for fn in files ]
# df = pd.DataFrame( { 'fn':files, 'year':years } )
# df_slice = df[ (df.year >= begin ) & (df.year <= end ) ]
# return df_slice.fn.tolist()
|
{"hexsha": "a4b9e3e0c026fa1ff488f49a7ef39648d4e3db7e", "size": 15730, "ext": "py", "lang": "Python", "max_stars_repo_path": "downscale/ds_minmax.py", "max_stars_repo_name": "ua-snap/downscale", "max_stars_repo_head_hexsha": "3fe8ea1774cf82149d19561ce5f19b25e6cba6fb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-06-24T21:55:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T16:32:54.000Z", "max_issues_repo_path": "downscale/ds_minmax.py", "max_issues_repo_name": "ua-snap/downscale", "max_issues_repo_head_hexsha": "3fe8ea1774cf82149d19561ce5f19b25e6cba6fb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2016-01-04T23:37:47.000Z", "max_issues_repo_issues_event_max_datetime": "2017-04-17T20:57:02.000Z", "max_forks_repo_path": "downscale/ds_minmax.py", "max_forks_repo_name": "ua-snap/downscale", "max_forks_repo_head_hexsha": "3fe8ea1774cf82149d19561ce5f19b25e6cba6fb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-09-16T04:48:57.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-25T03:46:00.000Z", "avg_line_length": 41.835106383, "max_line_length": 136, "alphanum_fraction": 0.684806103, "include": true, "reason": "import numpy", "num_tokens": 4648}
|
# pylint: disable=invalid-name
"""
Basic routines for ray tracing
To do:
* Add lenses and mirrors
* Properly document
Scott Prahl
May 2018
"""
import numpy as np
import matplotlib.pyplot as plt
__all__ = ['Plane',
'Ray',
'Sphere',
'Prism',
'Lens',
'ThinLens']
class Plane:
"""
A class to help to ray-trace planar objects
A plane is defined by the equation:
u*x + v*y + w*z + D = 0
where (u,v,w) is a unit normal vector to the plane.
"""
def __init__(self, xyz=(0, 0, 0), uvw=(0, 0, 1)):
"""
Args:
xyz: array describing any point in the plane
uvw: array with the direction cosines for the unit vector normal to the plane
"""
self.xyz = np.array(xyz)
self.uvw = np.array(uvw)
self.D = -np.dot(xyz, uvw)
def __str__(self):
a = "xyz=[%.3f,%.3f,%.3f]" % (self.xyz[0], self.xyz[1], self.xyz[2])
b = "uvw=[%.3f,%.3f,%.3f]" % (self.uvw[0], self.uvw[1], self.uvw[2])
length = np.dot(self.uvw, self.uvw)
return a + ", " + b + " norm=%.4f" % length + " D=%f" % self.D
def __repr__(self):
a = "[%f,%f,%f]" % (self.xyz[0], self.xyz[1], self.xyz[2])
b = "[%f,%f,%f]" % (self.uvw[0], self.uvw[1], self.uvw[2])
return "Plane(" + a + ", " + b + ")"
def draw_zy(self, ymin=0, ymax=1, zmin=0, zmax=1):
"""
Draw representation in the zy-plane (x==0) that lies in
the rectangle bounded by ymin,ymax and zmin,zmax
Thus v*y + w*z + D = 0
"""
if self.uvw[2] != 0:
ymn = ymin
ymx = ymax
zmn = -(self.D + self.uvw[1] * ymin) / self.uvw[2]
zmx = -(self.D + self.uvw[1] * ymax) / self.uvw[2]
zmn = max(zmin, zmn)
zmx = min(zmax, zmx)
print(" zy=(%.2f,%.2f), zy=(%.2f,%.2f)" % (zmn, ymn, zmx, ymx))
plt.plot([zmn, zmx], [ymn, ymx], 'k')
return
if self.uvw[1] != 0:
ymn = -(self.D + self.uvw[2] * zmin) / self.uvw[1]
ymx = -(self.D + self.uvw[2] * zmax) / self.uvw[1]
ymn = max(ymn, ymin)
ymx = min(ymx, ymax)
zmn = zmin
zmx = zmax
print(" zy=(%.2f,%.2f), zy=(%.2f,%.2f)" % (zmn, ymn, zmx, ymx))
plt.plot([zmn, zmx], [ymn, ymx], 'k')
def distance(self, ray):
"""
distance from start of ray to plane
"""
cos_angle = np.dot(ray.uvw, self.uvw)
if abs(cos_angle) < 1e-8:
return np.inf
return -(np.dot(ray.xyz, self.uvw) + self.D) / cos_angle
def is_in_plane(self, point):
"""
return True/False if point is in the plane
"""
dist = abs(np.dot(point, self.uvw) + self.D)
return dist < 1e-6
class Ray:
"""
A 3D ray specified by a starting point and a set of direction cosines
"""
def __init__(self, xyz=(0, 0, 0), uvw=(0, 0, 1)):
"""
Args:
xyz: array describing the starting point for the ray
uvw: array with the direction cosines
"""
self.xyz = np.array(xyz)
self.uvw = np.array(uvw)
def __str__(self):
a = "xyz=[%.3f,%.3f,%.3f]" % (self.xyz[0], self.xyz[1], self.xyz[2])
b = "uvw=[%.3f,%.3f,%.3f]" % (self.uvw[0], self.uvw[1], self.uvw[2])
length = np.dot(self.uvw, self.uvw)
return a + ", " + b + " norm=%.4f" % length
def __repr__(self):
a = "[%f,%f,%f]" % (self.xyz[0], self.xyz[1], self.xyz[2])
b = "[%f,%f,%f]" % (self.uvw[0], self.uvw[1], self.uvw[2])
return "Ray(" + a + ", " + b + ")"
def reflect_from_plane(self, plane):
"""
Spencer and Murty equation 46
"""
a = np.dot(self.uvw, plane.uvw)
out = self.uvw - 2 * a * plane.uvw
self.uvw = out
def move(self, d, draw_zy=False):
"""
Spencer and Murty equation 5
"""
dest = self.xyz + d * self.uvw
if draw_zy: # vertical is y and horizontal is z
plt.plot([self.xyz[2], dest[2]], [self.xyz[1], dest[1]], 'b')
self.xyz = dest
def refract(uvw, normal, ni, nt):
"""
Spencer and Murty, equation 36
"""
cosine = np.dot(normal, uvw)
if cosine < 0:
cosine *= -1
normal *= -1
refractive_index = nt/ni
a = refractive_index * cosine
b = refractive_index ** 2 - 1
disc = a ** 2 - b
if disc < 0: # reflected
out = uvw - 2 * cosine * normal
else:
g = -a + np.sqrt(disc)
out = refractive_index * uvw + g * normal
return out
class Sphere:
"""
A class to help to ray-trace spherical objects
A sphere is defined by the equation:
(x-x0)**2 + (y-y0)**2 + (z-z0)**2 = R**2
where (x0,y0,z0) is the center of the sphere and R is the radius
"""
def __init__(self, xyz=(0, 0, 0), R=1.0, n=1.0):
"""
Args:
xyz: array describing the center of the sphere in cartesian coordinates
R: radius of the sphere
n: index of refraction of the sphere
"""
self.xyz = np.array(xyz)
self.radius = R
self.n = n
def __str__(self):
a = "center=[%.3f,%.3f,%.3f]" % (self.xyz[0], self.xyz[1], self.xyz[2])
b = ", radius = %f" % self.radius
return a + b
def __repr__(self):
a = "[%f,%f,%f]" % (self.xyz[0], self.xyz[1], self.xyz[2])
return "Sphere(" + a + ", %f" % self.radius + ")"
def draw_zy(self, ymax=np.inf, side='both'):
"""
Draw representation in the zy-plane
"""
RR = np.sqrt(self.radius**2 - self.xyz[0]**2)
yy = min(ymax, RR)
y = np.linspace(-yy, yy, 50)
r = RR**2 - (y - self.xyz[1])**2
np.place(r, r < 0, 0)
z = np.sqrt(r)
if side == 'both' or side == 'right':
plt.plot(z + self.xyz[2], y, 'k')
if side == 'both' or side == 'left':
plt.plot(-z + self.xyz[2], y, 'k')
def unit_normal_at(self, point):
"""
Return outward normal to point on sphere
"""
diff = point - self.xyz
mag = np.sqrt(np.dot(diff, diff))
return diff / mag
def distance(self, ray):
"""
Return the nearest positive distance of a ray to the sphere
"""
OS = ray.xyz - self.xyz
b = 2 * np.dot(ray.uvw, OS)
c = np.dot(OS, OS) - self.radius * self.radius
disc = b * b - 4 * c
if disc < 0:
return np.inf
disc = np.sqrt(disc)
d1 = (-b - disc) / 2
d2 = (-b + disc) / 2
if d1 > 1e-6:
return d1
if d2 > 1e-6:
return d2
return np.inf
def refract(self, ray, outside=True):
"""
Spencer and Murty, equation 36
"""
normal = self.unit_normal_at(ray.xyz)
if outside:
return refract(ray.uvw, normal, 1, self.n)
return refract(ray.uvw, normal, self.n, 1)
class Prism:
"""
A class to help to ray-trace through prisms
A prism is defined by three planes
"""
def __init__(self, A, B, C, n):
"""
Args:
A: plane object for first side
B: plane object for second side
C: plane object for third side
"""
self.A = A
self.B = B
self.C = C
self.n = n
def __str__(self):
return self.A.__str__() + '\n' + self.B.__str__() + '\n' + self.C.__str__()
def __repr__(self):
return self.A.__repl__() + self.B.__repl__() + self.C.__repl__()
def draw_zy(self, ymin=0, ymax=1, zmin=0, zmax=1):
"""
Draw representation in the zy-plane
each plane satisfies u*x + v*y + w*z + D = 0. In the zy-plane x==0
therefore
v*y + w*z + D = 0
the corners (y,z) can be found by solving
v1*y + w1*z + D1 = 0
v2*y + w2*z + D2 = 0
y = -(D1*w2-D2*w1)/(w2*v1-w1*v2)
z = (D1*v2-D2*v1)/(w2*v1-w1*v2)
"""
denom = self.B.uvw[2]*self.A.uvw[1]-self.A.uvw[2]*self.B.uvw[1]
y1 = -(self.A.D*self.B.uvw[2]-self.B.D*self.A.uvw[2])/denom
z1 = -(self.A.D*self.B.uvw[1]-self.B.D*self.A.uvw[1])/denom
self.A.draw_zy(ymin=ymin, ymax=ymax, zmin=zmin, zmax=zmax)
self.B.draw_zy(ymin=ymin, ymax=ymax, zmin=zmin, zmax=zmax)
self.C.draw_zy(ymin=ymin, ymax=ymax, zmin=zmin, zmax=zmax)
def unit_normal_at(self, point):
"""
Return outward normal to point on prism
"""
if self.A.is_in_plane(point):
return self.A.uvw
if self.B.is_in_plane(point):
return self.B.uvw
if self.C.is_in_plane(point):
return self.C.uvw
# need to fail here
return np.array([0, 0, 0])
def distance(self, ray):
"""
Return the nearest positive distance of a ray to a prism face
"""
d1 = self.A.distance(ray)
d2 = self.B.distance(ray)
d3 = self.C.distance(ray)
dd = np.array([d1, d2, d3])
np.place(dd, dd <= 1e-6, 999)
# print("side 1 d=%.3f\n"%dd[0])
# print("side 2 d=%.3f\n"%dd[1])
# print("side 3 d=%.3f\n"%dd[2])
return min(dd)
def refract(self, ray, outside):
"""
Spencer and Murty, equation 36
"""
normal = self.unit_normal_at(ray.xyz)
if outside:
return refract(ray.uvw, normal, 1, self.n)
return refract(ray.uvw, normal, self.n, 1)
class Lens:
"""
A class to help to ray-trace through a lens
A lens is defined by two surfaces
"""
def __init__(self, surface1, surface2, refractive_index, thickness):
"""
Args:
surface1: first surface
surface2: second surface
refractive_index: index of refraction
d: thickness of lens
"""
self.surface1 = surface1
self.surface2 = surface2
self.refractive_index = refractive_index
self.thickness = thickness
def __str__(self):
a = str(self.surface1)
b = str(self.surface2)
c = "refractive index = %f" % self.refractive_index
d = "thickness = %f" % self.thickness
return a + "\n" + b + "\n" + c + "\n" + d
def __repr__(self):
a = repr(self.surface1)
b = repr(self.surface2)
c = ", %f, %f)" % (self.refractive_index, self.thickness)
return "Lens(" + a + "," + b + c
def distance(self, ray, which_surface):
"""
Distance to surface
"""
if which_surface == 1:
return self.surface1.distance(ray)
return self.surface2.distance(ray)
def refract(self, ray, which_surface):
"""
Bend light at surface
"""
if which_surface == 1:
return self.surface1.refract(ray, 1/self.refractive_index)
return self.surface2.refract(ray, self.refractive_index)
def draw_zy(self):
"""
Draw representation in the zy-plane
"""
self.surface1.draw_zy(side='left')
self.surface2.draw_zy(side='right')
class ThinLens:
"""
A class for a thin lens
"""
def __init__(self, focal_length, vertex, diameter=10):
"""
Args:
vertex: first surface
f: focal length
diameter: diameter of lens
"""
self.f = focal_length
self.vertex = vertex
self.diameter = diameter
def __str__(self):
a = "focal length = %f" % self.f
b = "vertex = %f" % self.vertex
c = "diameter = %f" % self.diameter
return a + "\n" + b + "\n" + c
def __repr__(self):
return "ThinLens(%f, %f, diameter=%f)" % (self.f, self.vertex, self.diameter)
def distance(self, ray):
"""
Distance to the lens
"""
z0 = ray.xyz[2]
z1 = self.vertex
w = ray.uvw[2]
if w == 0:
return np.inf
return (z1-z0)/w
def refract(self, ray):
"""
Bend light at surface
"""
w = ray.uvw[2]
yf = self.f/w
y0 = ray.xyz[1]
return self.f/np.sqrt((yf-y0)**2+self.f**2)
def draw_zy(self):
"""
Draw representation in the zy-plane
"""
plt.plot([self.vertex, self.vertex],
[-self.diameter/2, self.diameter/2], ':r')
|
{"hexsha": "7ade8c22514507587ab396e81bfb7c58816b2d94", "size": 12626, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyraytrace/pyraytrace.py", "max_stars_repo_name": "scottprahl/pyraytrace", "max_stars_repo_head_hexsha": "d43d999e301dece3ffce5ce23a183ac03dadef55", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-01-13T18:57:16.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-13T18:57:16.000Z", "max_issues_repo_path": "pyraytrace/pyraytrace.py", "max_issues_repo_name": "scottprahl/pyraytrace", "max_issues_repo_head_hexsha": "d43d999e301dece3ffce5ce23a183ac03dadef55", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyraytrace/pyraytrace.py", "max_forks_repo_name": "scottprahl/pyraytrace", "max_forks_repo_head_hexsha": "d43d999e301dece3ffce5ce23a183ac03dadef55", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.7494505495, "max_line_length": 89, "alphanum_fraction": 0.5009504198, "include": true, "reason": "import numpy", "num_tokens": 3809}
|
module Runtests
# Run all tests with SilentNoPlot (so not plots)
using TinyModia
using Test
@time @testset verbose=true "TinyModia (with SilentNoPlot)" begin
usePlotPackage("SilentNoPlot")
include("include_all.jl")
usePreviousPlotPackage()
end
end
|
{"hexsha": "89cc69bd1f240ea584b373d3648cac8f75f1fc14", "size": 266, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "ModiaSim/TinyModia.jl", "max_stars_repo_head_hexsha": "ad7e71ecace8d3f7ceb0d715893bfe4e3c6500bc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 21, "max_stars_repo_stars_event_min_datetime": "2021-02-05T17:32:43.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-23T08:47:22.000Z", "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "ModiaSim/TinyModia.jl", "max_issues_repo_head_hexsha": "ad7e71ecace8d3f7ceb0d715893bfe4e3c6500bc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2021-02-10T20:24:51.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-04T15:44:57.000Z", "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "ModiaSim/TinyModia.jl", "max_forks_repo_head_hexsha": "ad7e71ecace8d3f7ceb0d715893bfe4e3c6500bc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-04-17T00:18:23.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-05T02:47:49.000Z", "avg_line_length": 19.0, "max_line_length": 65, "alphanum_fraction": 0.7518796992, "num_tokens": 72}
|
require(caret)
a <- read.csv("/Users/tom/projects-workspace/set-game/data/train-out-shape.csv", header = FALSE, col.names = c("label", "v1", "v2"))
a$label <- as.factor(a$label)
set.seed(1)
aFit <- train(label ~ ., data = a, method = "svmRadial", preProc = c("center", "scale"),
tuneLength = 10,
tr = trainControl(method = "repeatedcv", repeats = 5, classProbs = TRUE))
aFit
aFitKnn <- train(label ~ ., data = a, method = "knn", preProc = c("center", "scale"))
aFitKnn
# Try using the model to predict test data labels
testData <- read.csv("/Users/tom/projects-workspace/set-game/data/test-out-shape.csv", col.names = c("label", "v1", "v2"))
testData$label <- as.factor(testData$label)
testDataNoLabel <- testData[-c(1)]
predictions <- predict(aFitKnn, newdata = testDataNoLabel)
predictions
print(postResample(pred=predictions, obs=as.factor(testData$label)))
# Plot data
qplot(v1, v2, colour = label, data = a)
|
{"hexsha": "42015c3bcb168ba4bd113ad6ba2541cc5ffb3977", "size": 946, "ext": "r", "lang": "R", "max_stars_repo_path": "R/predict-shape.r", "max_stars_repo_name": "tomwhite/set-game", "max_stars_repo_head_hexsha": "535174546788d004bdfd1c17509ccc6f5736a79a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 23, "max_stars_repo_stars_event_min_datetime": "2017-09-05T13:08:40.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-03T08:18:32.000Z", "max_issues_repo_path": "R/predict-shape.r", "max_issues_repo_name": "Q10Viking/set-game", "max_issues_repo_head_hexsha": "535174546788d004bdfd1c17509ccc6f5736a79a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "R/predict-shape.r", "max_forks_repo_name": "Q10Viking/set-game", "max_forks_repo_head_hexsha": "535174546788d004bdfd1c17509ccc6f5736a79a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2017-12-26T03:36:58.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-05T09:58:00.000Z", "avg_line_length": 37.84, "max_line_length": 132, "alphanum_fraction": 0.6723044397, "num_tokens": 280}
|
import os
from sqlalchemy import create_engine
from dotenv import load_dotenv
import pandas as pd
import psycopg2
from psycopg2.extras import execute_values
import numpy as np
psycopg2.extensions.register_adapter(np.int64, psycopg2._psycopg.AsIs)
# CONNEC TO DATABASE
load_dotenv() # looks inside the .env file for some env vars
# passes env var values to python var
DB_HOST = os.getenv("DB_HOST", default="OOPS")
DB_NAME = os.getenv("DB_NAME", default="OOPS")
DB_USER = os.getenv("DB_USER", default="OOPS")
DB_PASSWORD = os.getenv("DB_PASSWORD", default="OOPS")
DB_PORT = os.getenv("DB_PORT", default="OOPS")
connection = psycopg2.connect(dbname=DB_NAME, user=DB_USER, password=DB_PASSWORD, host=DB_HOST, port=DB_PORT)
print(type(connection))
cursor = connection.cursor()
print(type(cursor))
# MAME A POSTGRES SQL TABLE
# comment out drop table the first time you run
table_creation_query = """
DROP TABLE IF EXISTS passengers;
CREATE TABLE IF NOT EXISTS passengers (
id SERIAL PRIMARY KEY,
survived integer,
pclass integer,
name varchar NOT NULL,
gender varchar NOT NULL,
age float,
sib_spouse_count integer,
parent_child_count integer,
fare float
);
"""
cursor.execute(table_creation_query)
# READ THE CSV AND TRANSFORM THE DATA
CSV_FILEPATH = os.path.join(os.path.dirname(__file__), "data", "titanic.csv")
df = pd.read_csv(CSV_FILEPATH)
# print(df.head())
# INSERT THE DATA INTO THE TABLE
insertion_query = "INSERT INTO passengers (id, survived, pclass, name, gender, age, sib_spouse_count, parent_child_count, fare) VALUES %s"
rows_to_insert = list(df.to_records(index=False)
# execute
psycopg2.extras.execute_values(cursor, insertion_query, rows_to_insert)
# SAVE THE TRANSACTIONs
connection.commit()
|
{"hexsha": "ea88b0fa9458bc20fd7fcab7d3340479ec909c6d", "size": 1737, "ext": "py", "lang": "Python", "max_stars_repo_path": "module2-sql-for-analysis/insert_titanic.py", "max_stars_repo_name": "masonnystrom/DS-Unit-3-Sprint-2-SQL-and-Databases", "max_stars_repo_head_hexsha": "24c7b3b3c4c78112d63817ca77d054b7c8ca4e06", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "module2-sql-for-analysis/insert_titanic.py", "max_issues_repo_name": "masonnystrom/DS-Unit-3-Sprint-2-SQL-and-Databases", "max_issues_repo_head_hexsha": "24c7b3b3c4c78112d63817ca77d054b7c8ca4e06", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "module2-sql-for-analysis/insert_titanic.py", "max_forks_repo_name": "masonnystrom/DS-Unit-3-Sprint-2-SQL-and-Databases", "max_forks_repo_head_hexsha": "24c7b3b3c4c78112d63817ca77d054b7c8ca4e06", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.9482758621, "max_line_length": 138, "alphanum_fraction": 0.7720207254, "include": true, "reason": "import numpy", "num_tokens": 426}
|
PROGRAM GDRADR
C************************************************************************
C* GDRADR *
C* *
C* This program creates a RADAR mosaic grid from NEXRAD products. *
C** *
C* Log: *
C* Chiz/Unidata 3/01 Initial coding *
C* James/Unidata 2/09 Added bin mins & mstrct to CTB_DTGET CSC*
C* James/Unidata 1/14 Removed deprecated GD_CLOS call *
C* James/Unidata 6/17 Product flag for dual pol *
C************************************************************************
INCLUDE 'GEMPRM.PRM'
INCLUDE 'IMGDEF.CMN'
C*
CHARACTER*(LLMXLN) device, gdfile, radtim, raddur, radfrq,
+ stnfil, cpyfil, cpytmp, proj, gdarea,
+ kxky, filnam, cpyf(2), gfunc, filpath,
+ outstr, templ, newfil, gemfil, anlyss,
+ cnumgrd, radmode, gdpfun, ndval
C*
CHARACTER curtim*15, gdattim(2)*20, stid*8,
+ stnnam*32, coun*2, stat*2, tbchars*20,
+ parm*13, gname*20, cprj*10, errstr*24,
+ carr(10)*72, ttim*20
C*
CHARACTER imgfls*(4096), nextfile*(4096),
+ tpath*(256), tplate*(80), ctmpl*(10)
C*
INTEGER ivcord, nbits, ighdr( LLGDHD ), kx, ky
C*
LOGICAL gsflag, respnd, done, exist, proces, viewable,
+ opmode
C*
REAL rltln(4), rnvblk(LLNNAV), anlblk(LLNANL),
+ envblk(LLNNAV), rarr(256),
+ qmin, qmax, rdif, rbits, rndval(10), rndv
C
C-----------------------------------------------------------------------
C* Initialize user interface and graphics.
C
CALL IP_INIT ( respnd, iperr )
IF ( iperr .ne. 0 ) THEN
CALL ER_WMSG ( 'GDRADR', -1, ' ', ier )
CALL SS_EXIT
END IF
C
C* Initialize graphics.
C
CALL GG_INIT ( 1, iret )
IF ( iret .eq. 0 ) THEN
done = .false.
ELSE
CALL ER_WMSG ( 'GDRADR', -3, ' ', ier )
done = .true.
END IF
CALL IP_IDNT ( 'GDRADR', ier )
C
C* Find NEXRIII template
C
ctmpl = 'NEXRIII'
CALL ST_NULL ( ctmpl, ctmpl, lens, ier)
tplate = ' '
tpath = ' '
CALL CTB_DTGET ( ctmpl, tpath, tplate, ic, is, if, ir, ii, ion,
+ ihb, mnb, iha, mna, mstrct, idtmch, ier )
C
IF ( ier .ne. 0 ) THEN
tpath = '$RAD/NIDS/%SITE%/%PROD%'
tplate = '%PROD%_YYYYMMDD_HHNN'
CALL ER_WMSG ( 'GDRADR', 2, tpath, ier )
ELSE
CALL ST_RNUL ( tpath, tpath, lens, ier)
CALL ST_RNUL ( tplate, tplate, lens, ier)
END IF
C
DO i=1,LLNNAV
envblk(i) = RMISSD
END DO
C
C
DO WHILE ( .not. done )
proces = .true.
C
C* Get input parameters.
C
CALL GPINP ( proj, gdarea, kxky, gdpfun, gdfile, radtim,
+ raddur, radfrq, cpyfil, stnfil, cnumgrd,
+ radmode, ndval, iperr )
CALL ST_UCLC(radtim, radtim, ier)
IF ( radtim(1:1) .eq. 'c') THEN
C
C* Get current system time
C
itype = 1
CALL CSS_GTIM ( itype, curtim, ier )
CALL ST_RNUL ( curtim, curtim, lens, ier)
ELSE
curtim = radtim(1:15)
END IF
C
C* store ND data value input
C
CALL ST_CLST ( ndval, '!', '-9999.', 10, carr,
+ numndval, ier)
DO i=1,numndval
CALL ST_C2R (carr(i), 1, rndval(i), iout, ier)
IF (ier .ne. 0) rndval(i) = RMISSD
END DO
C
C* Get gfunc from gdpfun list
C
CALL ST_CLST ( gdpfun, '!', ' ', 10, carr, nfunc, ier )
DO npfun=1,nfunc
C
IF (npfun .le. numndval) THEN
rndv = rndval(npfun)
ELSE
rndv = rndval(numndval)
ENDIF
C
CALL ST_LCUC (carr(npfun), gfunc, ier)
C*
CALL ST_C2I (radfrq, 1, iwaitper, inum, ier)
IF (inum .ne. 1) iwaitper = 0
C*
CALL ST_C2I (raddur, 1, iraddur, inum, ier)
IF (inum .ne. 1) iraddur = 30
C*
IF (stnfil .eq. ' ') THEN
stnfil = 'nexrad.tbl'
CALL ER_WMSG ( 'GDRADR', 3, stnfil, ier )
END IF
CALL ST_NUMB ( cnumgrd, imxgrd, ier )
C
C* Set radar modes to be used
C
CALL ST_LCUC ( radmode, radmode, ier )
icair_mode = INDEX ( radmode, 'C')
iprcp_mode = INDEX ( radmode, 'P')
imntn_mode = INDEX ( radmode, 'M')
IF ( icair_mode + iprcp_mode + imntn_mode .eq. 0 ) THEN
icair_mode = 1
iprcp_mode = 1
imntn_mode = 1
END IF
IF ( iperr .eq. 0 ) THEN
C
C* Set device and projection.
C
device = 'gif|/dev/null'
CALL GG_SDEV ( device, ier )
IF ( ier .ne. 0 ) proces = .false.
IF ( proces ) THEN
C
C* Set Grid projection
C
CALL ST_CLST ( cpyfil, '|', ' ', 2, cpyf, num,
+ ier )
cpytmp = cpyf(1)
C
C* CASE 1: Build new navigation block from user input.
C
IF (cpytmp .eq. ' ') THEN
CALL GDCNAV ( proj, kxky, gdarea, cprj, kx, ky,
+ rltln, rnvblk, ier )
IF ( ier .eq. 0 ) THEN
anlyss = ' '
CALL GDCANL ( anlyss, rnvblk, anlblk, ier )
END IF
C
C* CASE 2: Build new navigation and analysis blocks from
C* grid navigation table input.
C
ELSE IF ( cpytmp(1:1) .eq. '#') THEN
CALL GDCTBL (cpytmp, gname, cprj, kx, ky, rltln,
+ rnvblk, anlblk, ier)
C
C* CASE 3: Get the navigation and analysis blocks from
C* the existing file.
C
ELSE
CALL FL_MFIL ( cpytmp, ' ', filnam, ier )
IF ( ier .eq. 0) THEN
CALL GD_OPEN ( filnam, .false., LLNANL, LLNNAV,
+ iflno, anlblk, rnvblk, maxg, ier)
IF ( ier .eq. 0) THEN
C
C* Depricated Call to gdclos
CALL GD_CLOS ( iflno, ier )
CALL GR_RNAV ( rnvblk, cprj, kx, ky, ier )
END IF
END IF
END IF
IF (ier .ne. 0) THEN
CALL ER_WMSG ( 'GDRADR', -4, cpyfil, ier )
proces = .false.
ELSE
CALL FL_MNAM(curtim, gdfile, gemfil, ier)
CALL FL_INQR (gemfil, exist, newfil, ier)
C
C* We have a nav block.... if not the same as our existing
C* nav block, we need to initialize the bounds locations.
C
CALL GR_CNAV (rnvblk, envblk, LLNNAV, gsflag, iret)
IF (.not. gsflag) CALL radar_boundsinit()
IF (.not. exist) THEN
CALL SS_ENVR(gemfil,newfil,ier)
CALL GD_CREF(newfil, LLNNAV, rnvblk, LLNANL,
+ anlblk, 2, imxgrd, igdfln, ier)
IF (ier .ne. 0) THEN
proces = .false.
CALL ER_WMSG ( 'GDRADR', -5, newfil, ier )
ELSE
DO i=1,LLNNAV
envblk(i) = rnvblk(i)
END DO
END IF
ELSE
CALL GD_OPEN ( newfil, .true., LLNANL, LLNNAV,
+ igdfln, anlblk, envblk, maxgrd, ier )
IF (ier .ne. 0) THEN
proces = .false.
CALL ER_WMSG ( 'GDRADR', -6, newfil, ier )
ELSE
CALL GR_CNAV (rnvblk, envblk, LLNNAV,
+ gsflag, iret)
IF (.not. gsflag) THEN
CALL ER_WMSG ( 'GDRADR', -7, ' ', ier )
END IF
END IF
END IF
CALL GR_SNAV(LLNNAV,rnvblk,ier)
END IF
C
C* Initialize grid
C
CALL radar_ginit(kx,ky,ier)
IF ( ier .ne. 0 ) THEN
proces = .false.
END IF
C
C* Start loop over input image files.
C
IF ( proces ) THEN
qmin = RMISSD
qmax = RMISSD
C
CALL FL_TBOP (stnfil, 'stns', ilun, ierf)
IF (ierf .ne. 0) THEN
CALL ER_WMSG ( 'GDRADR', -8, stnfil, ier )
END IF
DO WHILE ( ierf .eq. 0 )
CALL TB_RSTN (ilun, stid, stnnam, istnm,
+ stat, coun, slat, slon, selv, ispri,
+ tbchars, ierf )
IF (ierf .eq. 0) THEN
viewable = .true.
ifile = 1
CALL ST_RPST (tpath, '%SITE%', stid,
+ ipos, outstr, ier)
CALL ST_RPST (outstr, '%PROD%', gfunc,
+ ipos, filpath, ier)
CALL ST_NULL (filpath, filpath, ilen, ier)
CALL ST_RPST (tplate,'%SITE%',stid,ipos,
+ outstr, ier)
CALL ST_RPST (outstr,'%PROD%',gfunc,ipos,
+ templ, ier)
CALL ST_NULL (templ, templ, ilen, ier)
CALL ST_NULL ( curtim, ttim, ilen, ier)
CALL next_radar (filpath, templ, ttim,
+ nextfile, numc, idelt, ier)
IF (ier .eq. 0) THEN
imgfls = nextfile(1:numc)
ELSE
viewable = .false.
END IF
C
C* check if radar is within grid
C
IF (viewable) THEN
C
C* Reset the projection for each image.
C
CALL GG_MAPS ( 'RAD|D', 'dset', imgfls,
+ idrpfl, ier )
C
C* Clear the screen (not needed)
C
CALL GCLEAR ( iret )
C
C* Display satellite image
C
CALL IM_DROP ( iret )
C
IF ( ( iret .ne. 0 ) .or.
+ ( imldat .eq. 0 ) ) THEN
viewable = .false.
ELSE
CALL radar_bounds (istnm, kx, ky,
+ ier)
IF (ier .ne. 0) viewable = .false.
END IF
C
IF (idelt .gt. iraddur) THEN
CALL ER_WMSG ( 'GDRADR', 1, imgfls,
+ ier )
viewable = .false.
END IF
END IF
C
viewable = .true.
IF ( viewable ) THEN
C
C* Determine if radar mode is acceptable
C
opmode = .false.
IF ( ( immode .eq. 2 ) .and.
+ ( iprcp_mode .gt. 0 ) )
+ opmode = .true.
IF ( ( immode .eq. 1 ) .and.
+ ( icair_mode .gt. 0 ) )
+ opmode = .true.
IF ( ( immode .eq. 0 ) .and.
+ ( imntn_mode .gt. 0 ) )
+ opmode = .true.
C
IF ( opmode ) THEN
CALL ER_WMSG ( 'GDRADR', 0, imgfls,
+ ier )
DO i=1,imndlv
IF ( cmblev(i) .eq. 'ND' ) THEN
rarr(i) = rndv
ELSE
CALL ST_C2R (cmblev(i), 1,
+ rarr(i), num, ier)
IF (ier .ne. 0) rarr(i) = rndv
END IF
IF ( qmin .eq. RMISSD )
+ qmin = rarr(i)
IF ( qmax .eq. RMISSD )
+ qmax = rarr(i)
IF ( ( qmin .gt. rarr(i) ) .and.
+ ( rarr(i) .ne. RMISSD ) )
+ qmin = rarr(i)
IF ( ( qmax .lt. rarr(i) ) .and.
+ ( rarr(i) .ne. RMISSD ) )
+ qmax = rarr(i)
END DO
C Need a flag for radar_grid function
C (HHC,DVL, other high-res products)
SELECT CASE (imtype)
CASE (81,177)
CALL radar_grid(0,kx, ky, rarr)
CASE DEFAULT
CALL radar_grid(1,kx,ky,rarr)
END SELECT
ELSE
WRITE (errstr,1000) stid,immode
1000 FORMAT (A,1x,I1)
CALL ER_WMSG ( 'GDRADR', 5, errstr,
+ ier )
END IF
C
C* Flush the graphics buffer.
C
CALL GEPLOT ( iret)
END IF
END IF
END DO
IF (ilun .gt. 0) CALL FL_CLOS(ilun, iret)
C
CALL ER_WMSG ( 'GDRADR', 4, curtim, ier )
DO i=1,LLGDHD
ighdr(i) = 0
END DO
gdattim(1) = curtim(1:15)
gdattim(2) = ' '
parm = gfunc(1:12)
CALL ST_NULL ( gdattim(1), gdattim(1), lens, ier)
CALL ST_NULL ( gdattim(2), gdattim(2), lens, ier)
CALL ST_NULL ( parm, parm, lens, ier)
C
C* Compute number of packing bits
C
IF ( ( qmin .eq. RMISSD ) .or.
+ ( qmax .eq. RMISSD ) ) THEN
nbits = 16
ELSE
rdif = qmax - qmin
rbits = abs ( alog ( rdif ) ) / alog ( 2.0 )
nbits = int(rbits) + 1
END IF
IF ( ( nbits .lt. 2) .or. ( nbits .gt. 32 ) )
+ THEN
ipktyp = MDGNON
ELSE
ipktyp = MDGGRB
END IF
CALL cgdtwdt ( igdfln, kx, ky, ighdr,
+ gdattim(1), gdattim(2), parm, nbits,
+ ipktyp, ier)
C
C* Depricated Call to gd_clos
C
C CALL gd_clos(igdfln, ier)
C
END IF
END IF
END IF
END DO
C
C* Call the dynamic tutor.
C
IF (iwaitper .ne. 0) CALL wait_time(iwaitper, iret)
IF ((iwaitper .eq. 0).or.(iret .ne. 0))
+ CALL IP_DYNM ( done, iret )
END DO
C*
IF ( iperr .ne. 0 )
+ CALL ER_WMSG ( 'GDRADR', iperr, ' ', ier )
CALL GENDP ( 0, iret )
CALL IP_EXIT ( iret )
C*
STOP
END
|
{"hexsha": "91ca644dc77a08eba392a11a5d6ef32d8d0d4f48", "size": 12996, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "gempak/source/programs/gd/gdradr/gdradr.f", "max_stars_repo_name": "oxelson/gempak", "max_stars_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 42, "max_stars_repo_stars_event_min_datetime": "2015-06-03T15:26:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T22:36:03.000Z", "max_issues_repo_path": "gempak/source/programs/gd/gdradr/gdradr.f", "max_issues_repo_name": "oxelson/gempak", "max_issues_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 60, "max_issues_repo_issues_event_min_datetime": "2015-05-11T21:36:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T16:22:42.000Z", "max_forks_repo_path": "gempak/source/programs/gd/gdradr/gdradr.f", "max_forks_repo_name": "oxelson/gempak", "max_forks_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 27, "max_forks_repo_forks_event_min_datetime": "2016-06-06T21:55:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T18:23:28.000Z", "avg_line_length": 29.1390134529, "max_line_length": 77, "alphanum_fraction": 0.4708371807, "num_tokens": 4459}
|
# Copyright (C) 2019 Project AGI
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MNISTSequenceDataset class."""
import os
import re
import logging
import random
import struct
import numpy as np
import tensorflow as tf
from pagi.datasets.mnist_dataset import MNISTDataset
class MNISTSequenceDataset(MNISTDataset): # pylint: disable=W0223
"""Sequence generator for the MNIST Dataset."""
def set_batch_size(self, batch_size):
self._batch_size = batch_size
def get_train(self, preprocess=False, options=None): # pylint: disable=W0221
"""tf.data.Dataset object for MNIST training data."""
return self._dataset('train', 'train-images-idx3-ubyte', 'train-labels-idx1-ubyte', preprocess, options)
def get_test(self, preprocess=False, options=None): # pylint: disable=W0221
"""tf.data.Dataset object for MNIST test data."""
return self._dataset('test', 't10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte', preprocess, options)
def generate_random_sequence(self, num, k):
return [random.choice(range(num)) for i in range(k)]
def generate_sequence_from_text(self, text):
text = text.replace(' ', '')
return [int(d) for d in str(text)]
def generate_sequence_from_file(self, filename):
with open(filename, 'r') as f:
# Filter non-numeric characters
filtered_content = re.sub('[^0-9]', '', f.read())
return self.generate_sequence_from_text(filtered_content)
def _get_images_and_labels(self, images_file, labels_file):
"""Download and parse MNIST dataset."""
# Download the dataset
images_file = self._download(self._directory, images_file)
labels_file = self._download(self._directory, labels_file)
# Verify headers
self._check_image_file_header(images_file)
self._check_labels_file_header(labels_file)
# Read and process the images
images = self._read_file(images_file)
images = images / 255.0
images = np.reshape(images, [-1, self.IMAGE_DIM, self.IMAGE_DIM, 1])
images = images.astype(np.float32)
# Read and process the labels
labels = self._read_file(labels_file)
labels = labels.astype(np.int32)
return images, labels
def _pick_sample(self, labels, label, example_type='random'):
"""Picks a sample for specified label."""
indices = np.where(labels == label)[0]
# Pick same index everytime
if example_type == 'same':
idx = indices[0]
# Pick specified index for this label
elif example_type == 'specific':
try:
idx = indices[options['specific_examples'][label]]
except e:
print(e)
logging.error('Failed to find an example associated with the label.')
# Fallback to random index
else:
idx = np.random.choice(indices)
return idx, indices
def _get_sequence(self, sequence=None):
if sequence == 'random' or None:
return self.generate_random_sequence(self.num_classes, options['sequence_length'])
return sequence
def _init_sequences(self, batch_size, input_sequence=None):
"""Initialise the first N (=batch_size) sequences with some offset."""
sequences = []
for i in range(batch_size):
sequence = self._get_sequence(input_sequence)
offset = i % len(sequence)
sequence = sequence[offset:]
sequences.append(sequence)
return sequences
def _dataset(self, split, images_file, labels_file, preprocess, options): # pylint: disable=W0613, W0221
"""Download and parse MNIST dataset."""
# Batch size
batch_size = self._batch_size
logging.info('Batch size: %s', str(batch_size))
# Capture sequence from a file
input_sequence = None
if 'sequence' in options:
if os.path.isfile(options['sequence']):
input_sequence = self.generate_sequence_from_file(options['sequence'])
# Capture sequence from a given string
elif isinstance(options['sequence'], str) and options['sequence'] != 'random':
input_sequence = self.generate_sequence_from_text(options['sequence'])
# Generate a random sequence
elif options['sequence'] == 'random' and 'sequence_length' in options:
input_sequence = 'random'
logging.info('Sequence used: %s', str(input_sequence))
# Get the dataset
images, labels = self._get_images_and_labels(images_file, labels_file)
# Initialise the sequence list with N (=batch_size) sequences
sequences = self._init_sequences(batch_size, input_sequence)
sequence_offsets = np.zeros(self._batch_size, dtype=np.int32)
def sequence_generator():
"""Generates image and label pairs based on a given sequence of labels."""
logging.debug('Batch size [generator]: %s', str(batch_size))
# Loop indefinitely
while True:
for b in range(self._batch_size):
i = sequence_offsets[b]
# Try to get a sample from sequence
try:
sample_idx = sequences[b][i]
# Otherwise, generate a new sequence as this one has ended
except IndexError:
# Generate a new sequence, or randomly sample from bank
sequence = self._get_sequence(input_sequence)
# Append this sequence to the sequences list
sequences[b] = sequence
# Now try to get the sample again
i = 0
sample_idx = sequences[b][i]
#print('Batch:', b, ' Seq-idx: ', i, 'Sample: ', sample_idx)
sequence_offsets[b] = i + 1
idx, _ = self._pick_sample(labels, sample_idx, options['example_type'])
yield (images[idx], labels[idx])
return tf.data.Dataset.from_generator(sequence_generator, output_types=(tf.float32, tf.int32),
output_shapes=(tf.TensorShape([self.IMAGE_DIM, self.IMAGE_DIM, 1]),
tf.TensorShape([])))
def _read_file(self, filename):
with open(filename, 'rb') as f:
_, _, dims = struct.unpack('>HBB', f.read(4))
shape = tuple(struct.unpack('>I', f.read(4))[0] for d in range(dims))
return np.fromstring(f.read(), dtype=np.uint8).reshape(shape)
|
{"hexsha": "af2e6ce2e68a04092de46535ef3fda02dd6f8ac3", "size": 6709, "ext": "py", "lang": "Python", "max_stars_repo_path": "rsm/datasets/mnist_sequence_dataset.py", "max_stars_repo_name": "Cerenaut/rsm", "max_stars_repo_head_hexsha": "33bf8a3a620b46b5180280f2ca5f0b28c168b806", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "rsm/datasets/mnist_sequence_dataset.py", "max_issues_repo_name": "Cerenaut/rsm", "max_issues_repo_head_hexsha": "33bf8a3a620b46b5180280f2ca5f0b28c168b806", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rsm/datasets/mnist_sequence_dataset.py", "max_forks_repo_name": "Cerenaut/rsm", "max_forks_repo_head_hexsha": "33bf8a3a620b46b5180280f2ca5f0b28c168b806", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8770053476, "max_line_length": 109, "alphanum_fraction": 0.6676106722, "include": true, "reason": "import numpy", "num_tokens": 1511}
|
[STATEMENT]
lemma run_catch_option [simp]:
"run_option (catch_option m1 m2) = bind (run_option m1) (\<lambda>x. if x = None then run_option m2 else return x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. run_option (local.catch_option m1 m2) = bind (run_option m1) (\<lambda>x. if x = None then run_option m2 else return x)
[PROOF STEP]
by(simp add: catch_option_def)
|
{"llama_tokens": 144, "file": "Monomorphic_Monad_Monomorphic_Monad", "length": 1}
|
from tensorflow.python.layers.normalization import batch_normalization
from tensorflow.python.ops.gen_nn_ops import softplus
from model.consts import Const
from model.enc import ENCRNN
import tensorflow as tf
import numpy as np
from util.helper import tf_cov
from util.losses import mmd_loss, maximum_mean_discrepancy
import tensorflow_probability as tfp
class MMDAE(ENCRNN):
def __init__(self, n_cells, a_size, s_size, latent_size, n_T, static_loops, mmd_loss_coef):
super().__init__(n_cells, a_size, s_size, latent_size, 1, n_T, static_loops)
with tf.variable_scope('enc'):
batch_range = tf.range(tf.shape(self.rnn_in)[0])
indices = tf.stack([batch_range, self.seq_lengths], axis=1)
out = tf.gather_nd(self.rnn_out[:, :, :], indices)
# normalizing the output by seq lengths
# out = out / tf.cast(self.seq_lengths[:, np.newaxis], Const.FLOAT)
# out = tf.reshape(self.rnn_out, [tf.shape(self.rnn_out)[0], (n_T + 1) * n_cells * 2])
dense1 = tf.layers.dense(inputs=out, units=n_cells, activation=tf.nn.relu)
dense2 = tf.layers.dense(inputs=dense1, units=n_cells, activation=tf.nn.relu)
dense5 = tf.layers.dense(inputs=dense2, units=10, activation=tf.nn.softplus)
#DIM: 1 * nBatches * nLatent
self.z = tf.layers.dense(inputs=dense5, units=latent_size, activation=None)[np.newaxis]
# added diagonal noise for better numerical stability
self.z_cov = tf_cov(self.z[0]) + tf.eye(num_rows=tf.shape(self.z)[2], dtype=Const.FLOAT) * 1e-6
true_samples = tf.random_normal(tf.stack([3000, self.z.shape[2]]), dtype=Const.FLOAT)
self.loss = mmd_loss_coef * mmd_loss(true_samples, self.z[0], 1) + tfp.distributions.kl_divergence(
tfp.distributions.MultivariateNormalFullCovariance(loc=tf.reduce_mean(self.z[0], axis=0),
covariance_matrix=self.z_cov
),
tfp.distributions.MultivariateNormalDiag(loc=tf.zeros((self.z.shape[2]), dtype=Const.FLOAT))
)
self.dc_loss = tf.constant(0)
self.z_pred = self.z[0]
@staticmethod
def compute_kernel(x, y):
x_size = tf.shape(x)[0]
y_size = tf.shape(y)[0]
dim = tf.shape(x)[1]
tiled_x = tf.tile(tf.reshape(x, tf.stack([x_size, 1, dim])), tf.stack([1, y_size, 1]))
tiled_y = tf.tile(tf.reshape(y, tf.stack([1, y_size, dim])), tf.stack([x_size, 1, 1]))
return tf.exp(-tf.reduce_mean(tf.square(tiled_x - tiled_y), axis=2) / tf.cast(dim, Const.FLOAT))
@staticmethod
def compute_mmd(x, y):
x_kernel = MMDAE.compute_kernel(x, x)
y_kernel = MMDAE.compute_kernel(y, y)
xy_kernel = MMDAE.compute_kernel(x, y)
return tf.reduce_mean(x_kernel) + tf.reduce_mean(y_kernel) - 2 * tf.reduce_mean(xy_kernel)
|
{"hexsha": "69a7de85da9c845fa546f16f916e34e136901a9b", "size": 3022, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/model/mmdae_enc.py", "max_stars_repo_name": "adezfouli/rnn_hypercoder", "max_stars_repo_head_hexsha": "a5dcbe7618400cabb08ff50ad69ff8f6241dff8d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-07-20T13:39:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-14T15:09:04.000Z", "max_issues_repo_path": "src/model/mmdae_enc.py", "max_issues_repo_name": "adezfouli/rnn_hypercoder", "max_issues_repo_head_hexsha": "a5dcbe7618400cabb08ff50ad69ff8f6241dff8d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-06-08T22:48:54.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T03:23:36.000Z", "max_forks_repo_path": "src/model/mmdae_enc.py", "max_forks_repo_name": "adezfouli/rnn_hypercoder", "max_forks_repo_head_hexsha": "a5dcbe7618400cabb08ff50ad69ff8f6241dff8d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-02-03T09:10:42.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-13T04:47:13.000Z", "avg_line_length": 45.7878787879, "max_line_length": 111, "alphanum_fraction": 0.6267372601, "include": true, "reason": "import numpy", "num_tokens": 756}
|
export MaestroDataset, reader, @dataset_str
using Pkg.TOML
using DataDeps
using Random
using Transformers.Datasets
import Transformers.Datasets: trainfile, testfile, devfile, Mode
const dataset_configs = open(TOML.parse, joinpath(@__DIR__, "datasets.toml"))
# For clarity
macro dataset_str(name::String)
:(@datadep_str($name))
end
struct MaestroDataset <: Dataset end
trainfile(::MaestroDataset) = dataset"Maestro_LM/train.jld2"
devfile(::MaestroDataset) = dataset"Maestro_LM/validation.jld2"
testfile(::MaestroDataset) = dataset"Maestro_LM/test.jld2"
function reader(::Type{M}, ds::MaestroDataset; batchsize=32, shuffle=true) where {M <: Mode}
@assert batchsize >= 1
chan = Channel{Vector{Vector{Int}}}()
filename = datafile(M, ds)
num_examples = load(filename, "num_examples")
load_order = shuffle ? randperm(num_examples) : collect(1:num_examples)
loader = @async begin
for batch_load_order in Iterators.partition(load_order, batchsize)
length(batch_load_order) != batchsize && continue
data = collect(load(filename, string.(batch_load_order)...))
put!(chan, ifelse(batchsize == 1, [data], data))
end
end
bind(chan, loader)
chan
end
function register_datasets(configs)
for (dataset, config) in pairs(configs)
description = config["description"]
checksum = config["checksum"]
url = config["url"]
if startswith(url, "https://docs.google.com")
fetch_method = Transformers.Datasets.download_gdrive
else
fetch_method = DataDeps.fetch_http
end
dep = DataDep(dataset, description, url, checksum;
fetch_method=fetch_method, post_fetch_method=DataDeps.unpack)
DataDeps.register(dep)
end
end
function list_datasets()
println.(keys(dataset_configs))
return
end
|
{"hexsha": "33192e57d6dbfd4676a5741f931c19f14c0dd299", "size": 1883, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/datasets.jl", "max_stars_repo_name": "VasanthManiVasi/MusicModels.jl", "max_stars_repo_head_hexsha": "4724241286ea9047866c9216ac9d88fe15021446", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-07T07:27:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-07T07:27:15.000Z", "max_issues_repo_path": "src/datasets.jl", "max_issues_repo_name": "VasanthManiVasi/MusicTransformer.jl", "max_issues_repo_head_hexsha": "9c883663a8641970e2fcac186e9e807595145e0c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/datasets.jl", "max_forks_repo_name": "VasanthManiVasi/MusicTransformer.jl", "max_forks_repo_head_hexsha": "9c883663a8641970e2fcac186e9e807595145e0c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.8888888889, "max_line_length": 92, "alphanum_fraction": 0.6887944769, "num_tokens": 461}
|
C$Procedure ZZEKRD03 ( EK, read class 3 column entry elements )
SUBROUTINE ZZEKRD03 ( HANDLE, SEGDSC, COLDSC,
. RECPTR, CVLEN, CVAL, ISNULL )
IMPLICIT NONE
C$ Abstract
C
C Read a column entry from a specified record in a class 3 column.
C Class 3 columns contain scalar character values.
C
C$ Disclaimer
C
C THIS SOFTWARE AND ANY RELATED MATERIALS WERE CREATED BY THE
C CALIFORNIA INSTITUTE OF TECHNOLOGY (CALTECH) UNDER A U.S.
C GOVERNMENT CONTRACT WITH THE NATIONAL AERONAUTICS AND SPACE
C ADMINISTRATION (NASA). THE SOFTWARE IS TECHNOLOGY AND SOFTWARE
C PUBLICLY AVAILABLE UNDER U.S. EXPORT LAWS AND IS PROVIDED "AS-IS"
C TO THE RECIPIENT WITHOUT WARRANTY OF ANY KIND, INCLUDING ANY
C WARRANTIES OF PERFORMANCE OR MERCHANTABILITY OR FITNESS FOR A
C PARTICULAR USE OR PURPOSE (AS SET FORTH IN UNITED STATES UCC
C SECTIONS 2312-2313) OR FOR ANY PURPOSE WHATSOEVER, FOR THE
C SOFTWARE AND RELATED MATERIALS, HOWEVER USED.
C
C IN NO EVENT SHALL CALTECH, ITS JET PROPULSION LABORATORY, OR NASA
C BE LIABLE FOR ANY DAMAGES AND/OR COSTS, INCLUDING, BUT NOT
C LIMITED TO, INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND,
C INCLUDING ECONOMIC DAMAGE OR INJURY TO PROPERTY AND LOST PROFITS,
C REGARDLESS OF WHETHER CALTECH, JPL, OR NASA BE ADVISED, HAVE
C REASON TO KNOW, OR, IN FACT, SHALL KNOW OF THE POSSIBILITY.
C
C RECIPIENT BEARS ALL RISK RELATING TO QUALITY AND PERFORMANCE OF
C THE SOFTWARE AND ANY RELATED MATERIALS, AND AGREES TO INDEMNIFY
C CALTECH AND NASA FOR ALL THIRD-PARTY CLAIMS RESULTING FROM THE
C ACTIONS OF RECIPIENT IN THE USE OF THE SOFTWARE.
C
C$ Required_Reading
C
C EK
C
C$ Keywords
C
C EK
C PRIVATE
C
C$ Declarations
INCLUDE 'ekbool.inc'
INCLUDE 'ekcoldsc.inc'
INCLUDE 'ekcnamsz.inc'
INCLUDE 'ekdatpag.inc'
INCLUDE 'ekrecptr.inc'
INCLUDE 'eksegdsc.inc'
INCLUDE 'ektype.inc'
INTEGER HANDLE
INTEGER SEGDSC ( SDSCSZ )
INTEGER COLDSC ( CDSCSZ )
INTEGER RECPTR
INTEGER CVLEN
CHARACTER*(*) CVAL
LOGICAL ISNULL
C$ Brief_I/O
C
C Variable I/O Description
C -------- --- --------------------------------------------------
C HANDLE I Handle attached to EK file.
C SEGDSC I Segment descriptor.
C COLDSC I Column descriptor.
C RECPTR I Record pointer.
C CVLEN O Length of returned character value.
C CVAL O Character value in column entry.
C ISNULL O Flag indicating whether column entry is null.
C
C$ Detailed_Input
C
C HANDLE is an EK file handle.
C
C SEGDSC is the descriptor of the segment from which data is
C to be read.
C
C COLDSC is the descriptor of the column from which data is
C to be read.
C
C RECPTR is a pointer to the record containing the column
C entry to be written.
C
C$ Detailed_Output
C
C CVLEN is the length of the returned string value. This
C is the index of the last non-blank character of
C the string. This definition applies to both fixed-
C and variable-length strings.
C
C CVLEN is set to 1 if the column entry is null.
C
C CVAL is the value read from the specified column entry.
C If CVAL has insufficient length to hold the
C returned string value, the output value is
C truncated on the right. Entries that are shorter
C than the string length of CVAL are padded with
C trailing blanks.
C
C ISNULL is a logical flag indicating whether the entry is
C null.
C
C$ Parameters
C
C None.
C
C$ Exceptions
C
C 1) If HANDLE is invalid, the error will be diagnosed by routines
C called by this routine.
C
C 2) If the specified column entry has not been initialized, the
C error SPICE(UNINITIALIZED) is signaled.
C
C 3) If the ordinal position of the column specified by COLDSC
C is out of range, the error SPICE(INVALIDINDEX) is signaled.
C
C 4) If the output string CVAL is too short to accommodate the
C returned string value, the output value is truncated on the
C right. No error is signaled.
C
C 5) If an I/O error occurs while reading the indicated file,
C the error will be diagnosed by routines called by this
C routine.
C
C$ Files
C
C See the EK Required Reading for a discussion of the EK file
C format.
C
C$ Particulars
C
C This routine is a utility for reading data from class 3 columns.
C
C$ Examples
C
C See EKRCEC.
C
C$ Restrictions
C
C None.
C
C$ Literature_References
C
C None.
C
C$ Author_and_Institution
C
C N.J. Bachman (JPL)
C
C$ Version
C
C- SPICELIB Version 1.4.0, 07-FEB-2015 (NJB)
C
C Now uses ERRHAN to insert DAS file name into
C long error messages.
C
C Bug fix: changed max column index in long error
C message from NREC to NCOLS.
C
C- SPICELIB Version 1.3.0, 31-MAY-2010 (NJB)
C
C Bug fix: call to DASRDI was overwriting local memory. This
C problem did not affect operation of the routine except on
C the Mac/Intel/OSX/ifort/32-bit platform, on which it caused
C a segmentation fault when this routine was compiled with
C default optimization.
C
C- SPICELIB Version 1.2.0, 23-JUL-1999 (NJB)
C
C Error check for string truncation on output was removed.
C This error check interfered with the use of this routine
C (via a call to ZZEKRSC) within ZZEKJSRT, which relies on
C being able to read into a buffer initial substrings of scalar
C data.
C
C- SPICELIB Version 1.1.0, 28-JUL-1997 (NJB)
C
C Error check for string truncation on output was added.
C SHORT error message SPICE(UNINITIALIZEDVALUE) was shortened
C to SPICE(UNINITIALIZED). Error messages were enhanced so
C as to use column names rather than indices. Miscellaneous
C header fixes were made.
C
C- SPICELIB Version 1.0.0, 18-OCT-1995 (NJB)
C
C-&
C$ Revisions
C
C- SPICELIB Version 1.2.0, 23-JUL-1999 (NJB)
C
C Error check for string truncation on output was removed.
C This error check interfered with the use of this routine
C (via a call to ZZEKRSC) within ZZEKJSRT, which relies on
C being able to read into a buffer initial substrings of scalar
C data.
C
C- SPICELIB Version 1.1.0, 25-JUL-1997 (NJB)
C
C Error check for string truncation on output was added.
C SHORT error message SPICE(UNINITIALIZEDVALUE) was shortened
C to SPICE(UNINITIALIZED), since the previous string exceeded
C the maximum allowed length for the short error message.
C
C Error messages were enhanced so as to use column names rather
C than indices.
C
C-&
C
C Non-SPICELIB functions
C
INTEGER ZZEKRP2N
C
C Local variables
C
CHARACTER*(CNAMSZ) COLUMN
INTEGER AVAIL
INTEGER B
INTEGER BPOS
INTEGER COLIDX
INTEGER DATPTR
INTEGER E
INTEGER EPOS
INTEGER L
INTEGER N
INTEGER NCOLS
INTEGER P
INTEGER PBASE
INTEGER RELPTR
INTEGER PTRLOC
INTEGER RECNO
C
C Use discovery check-in.
C
C Make sure the column exists.
C
NCOLS = SEGDSC ( NCIDX )
COLIDX = COLDSC ( ORDIDX )
IF ( ( COLIDX .LT. 1 ) .OR. ( COLIDX .GT. NCOLS ) ) THEN
RECNO = ZZEKRP2N ( HANDLE, SEGDSC(SNOIDX), RECPTR )
CALL CHKIN ( 'ZZEKRD03' )
CALL SETMSG ( 'Column index = #; valid range is 1:#.' //
. 'SEGNO = #; RECNO = #; EK = #' )
CALL ERRINT ( '#', COLIDX )
CALL ERRINT ( '#', NCOLS )
CALL ERRINT ( '#', SEGDSC(SNOIDX) )
CALL ERRINT ( '#', RECNO )
CALL ERRHAN ( '#', HANDLE )
CALL SIGERR ( 'SPICE(INVALIDINDEX)' )
CALL CHKOUT ( 'ZZEKRD03' )
RETURN
END IF
C
C Compute the data pointer location, and read both the pointer
C and the stored string size.
C
PTRLOC = RECPTR + DPTBAS + COLIDX
CALL DASRDI ( HANDLE, PTRLOC, PTRLOC, DATPTR )
IF ( DATPTR .GT. 0 ) THEN
C
C Read the value. This is slightly more complicated than
C the numeric cases, because the value may be spread across
C multiple pages. Also, we must not write past the end of the
C output string.
C
C We'll need the number of the page at which the first character
C of the string is stored. This page contains at least one
C character of the data value.
C
CALL ZZEKGEI ( HANDLE, DATPTR, CVLEN )
C
C Set the data pointer to the start of the string data, skipping
C over the encoded string length.
C
DATPTR = DATPTR + ENCSIZ
N = MIN ( CVLEN, LEN(CVAL) )
C
C Read the available data from the page under consideration.
C
CALL ZZEKPGPG ( CHR, DATPTR, P, PBASE )
RELPTR = DATPTR - PBASE
AVAIL = MIN ( N, CPSIZE - RELPTR + 1 )
B = DATPTR
E = DATPTR + AVAIL - 1
BPOS = 1
EPOS = AVAIL
L = EPOS - BPOS + 1
CALL DASRDC ( HANDLE, B, E, BPOS, EPOS, CVAL )
N = N - L
DO WHILE ( N .GT. 0 )
C
C Read the forward page pointer from the current page; find
C the base address of the referenced page.
C
CALL ZZEKGEI ( HANDLE, PBASE+CFPIDX, P )
CALL ZZEKPGBS ( CHR, P, PBASE )
AVAIL = MIN ( N, CPSIZE )
B = PBASE + 1
E = PBASE + AVAIL
BPOS = EPOS + 1
EPOS = EPOS + AVAIL
CALL DASRDC ( HANDLE, B, E, BPOS, EPOS, CVAL )
N = N - AVAIL
BPOS = EPOS + 1
END DO
C
C Blank-pad CVAL if required.
C
IF ( LEN(CVAL) .GT. EPOS ) THEN
CVAL( EPOS+1 : ) = ' '
END IF
ISNULL = .FALSE.
ELSE IF ( DATPTR .EQ. NULL ) THEN
C
C The value is null.
C
ISNULL = .TRUE.
CVLEN = 1
ELSE IF ( ( DATPTR .EQ. UNINIT )
. .OR. ( DATPTR .EQ. NOBACK ) ) THEN
C
C The data value is absent. This is an error.
C
RECNO = ZZEKRP2N ( HANDLE, SEGDSC(SNOIDX), RECPTR )
CALL ZZEKCNAM ( HANDLE, COLDSC, COLUMN )
CALL CHKIN ( 'ZZEKRD03' )
CALL SETMSG ( 'Attempted to read uninitialized column ' //
. 'entry. SEGNO = #; COLUMN = #; RECNO = #; ' //
. 'EK = #' )
CALL ERRINT ( '#', SEGDSC(SNOIDX) )
CALL ERRCH ( '#', COLUMN )
CALL ERRINT ( '#', RECNO )
CALL ERRHAN ( '#', HANDLE )
CALL SIGERR ( 'SPICE(UNINITIALIZED)' )
CALL CHKOUT ( 'ZZEKRD03' )
RETURN
ELSE
C
C The data pointer is corrupted.
C
RECNO = ZZEKRP2N ( HANDLE, SEGDSC(SNOIDX), RECPTR )
CALL ZZEKCNAM ( HANDLE, COLDSC, COLUMN )
CALL CHKIN ( 'ZZEKRD03' )
CALL SETMSG ( 'Data pointer is corrupted. SEGNO = #; ' //
. 'COLUMN = #; RECNO = #; EK = #' )
CALL ERRINT ( '#', SEGDSC(SNOIDX) )
CALL ERRCH ( '#', COLUMN )
CALL ERRINT ( '#', RECNO )
CALL ERRHAN ( '#', HANDLE )
CALL SIGERR ( 'SPICE(BUG)' )
CALL CHKOUT ( 'ZZEKRD03' )
RETURN
END IF
RETURN
END
|
{"hexsha": "f83882f1d30d3f5a5ff907ec99eb28d2fb7c0ce6", "size": 12892, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "source/nasa_f/zzekrd03.f", "max_stars_repo_name": "agforero/FTFramework", "max_stars_repo_head_hexsha": "6caf0bc7bae8dc54a62da62df37e852625f0427d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-08-19T21:43:50.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-20T02:57:25.000Z", "max_issues_repo_path": "source/nasa_f/zzekrd03.f", "max_issues_repo_name": "agforero/fortran-testing-framework", "max_issues_repo_head_hexsha": "6caf0bc7bae8dc54a62da62df37e852625f0427d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-08-07T21:17:16.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-09T02:18:07.000Z", "max_forks_repo_path": "source/nasa_f/zzekrd03.f", "max_forks_repo_name": "agforero/fortran-testing-framework", "max_forks_repo_head_hexsha": "6caf0bc7bae8dc54a62da62df37e852625f0427d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-31T08:41:53.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T08:41:53.000Z", "avg_line_length": 31.6756756757, "max_line_length": 72, "alphanum_fraction": 0.5463853553, "num_tokens": 3471}
|
(* Title: HOL/Algebra/Embedded_Algebras.thy
Author: Paulo Emílio de Vilhena
*)
theory Embedded_Algebras
imports Subrings Generated_Groups
begin
section \<open>Definitions\<close>
locale embedded_algebra =
K?: subfield K R + R?: ring R for K :: "'a set" and R :: "('a, 'b) ring_scheme" (structure)
definition (in ring) line_extension :: "'a set \<Rightarrow> 'a \<Rightarrow> 'a set \<Rightarrow> 'a set"
where "line_extension K a E = (K #> a) <+>\<^bsub>R\<^esub> E"
fun (in ring) Span :: "'a set \<Rightarrow> 'a list \<Rightarrow> 'a set"
where "Span K Us = foldr (line_extension K) Us { \<zero> }"
fun (in ring) combine :: "'a list \<Rightarrow> 'a list \<Rightarrow> 'a"
where
"combine (k # Ks) (u # Us) = (k \<otimes> u) \<oplus> (combine Ks Us)"
| "combine Ks Us = \<zero>"
inductive (in ring) independent :: "'a set \<Rightarrow> 'a list \<Rightarrow> bool"
where
li_Nil [simp, intro]: "independent K []"
| li_Cons: "\<lbrakk> u \<in> carrier R; u \<notin> Span K Us; independent K Us \<rbrakk> \<Longrightarrow> independent K (u # Us)"
inductive (in ring) dimension :: "nat \<Rightarrow> 'a set \<Rightarrow> 'a set \<Rightarrow> bool"
where
zero_dim [simp, intro]: "dimension 0 K { \<zero> }"
| Suc_dim: "\<lbrakk> v \<in> carrier R; v \<notin> E; dimension n K E \<rbrakk> \<Longrightarrow> dimension (Suc n) K (line_extension K v E)"
subsubsection \<open>Syntactic Definitions\<close>
abbreviation (in ring) dependent :: "'a set \<Rightarrow> 'a list \<Rightarrow> bool"
where "dependent K U \<equiv> \<not> independent K U"
definition over :: "('a \<Rightarrow> 'b) \<Rightarrow> 'a \<Rightarrow> 'b" (infixl "over" 65)
where "f over a = f a"
context ring
begin
subsection \<open>Basic Properties - First Part\<close>
lemma line_extension_consistent:
assumes "subring K R" shows "ring.line_extension (R \<lparr> carrier := K \<rparr>) = line_extension"
unfolding ring.line_extension_def[OF subring_is_ring[OF assms]] line_extension_def
by (simp add: set_add_def set_mult_def)
lemma Span_consistent:
assumes "subring K R" shows "ring.Span (R \<lparr> carrier := K \<rparr>) = Span"
unfolding ring.Span.simps[OF subring_is_ring[OF assms]] Span.simps
line_extension_consistent[OF assms] by simp
lemma combine_in_carrier [simp, intro]:
"\<lbrakk> set Ks \<subseteq> carrier R; set Us \<subseteq> carrier R \<rbrakk> \<Longrightarrow> combine Ks Us \<in> carrier R"
by (induct Ks Us rule: combine.induct) (auto)
lemma combine_r_distr:
"\<lbrakk> set Ks \<subseteq> carrier R; set Us \<subseteq> carrier R \<rbrakk> \<Longrightarrow>
k \<in> carrier R \<Longrightarrow> k \<otimes> (combine Ks Us) = combine (map ((\<otimes>) k) Ks) Us"
by (induct Ks Us rule: combine.induct) (auto simp add: m_assoc r_distr)
lemma combine_l_distr:
"\<lbrakk> set Ks \<subseteq> carrier R; set Us \<subseteq> carrier R \<rbrakk> \<Longrightarrow>
u \<in> carrier R \<Longrightarrow> (combine Ks Us) \<otimes> u = combine Ks (map (\<lambda>u'. u' \<otimes> u) Us)"
by (induct Ks Us rule: combine.induct) (auto simp add: m_assoc l_distr)
lemma combine_eq_foldr:
"combine Ks Us = foldr (\<lambda>(k, u). \<lambda>l. (k \<otimes> u) \<oplus> l) (zip Ks Us) \<zero>"
by (induct Ks Us rule: combine.induct) (auto)
lemma combine_replicate:
"set Us \<subseteq> carrier R \<Longrightarrow> combine (replicate (length Us) \<zero>) Us = \<zero>"
by (induct Us) (auto)
lemma combine_take:
"combine (take (length Us) Ks) Us = combine Ks Us"
by (induct Us arbitrary: Ks)
(auto, metis combine.simps(1) list.exhaust take.simps(1) take_Suc_Cons)
lemma combine_append_zero:
"set Us \<subseteq> carrier R \<Longrightarrow> combine (Ks @ [ \<zero> ]) Us = combine Ks Us"
proof (induct Ks arbitrary: Us)
case Nil thus ?case by (induct Us) (auto)
next
case Cons thus ?case by (cases Us) (auto)
qed
lemma combine_prepend_replicate:
"\<lbrakk> set Ks \<subseteq> carrier R; set Us \<subseteq> carrier R \<rbrakk> \<Longrightarrow>
combine ((replicate n \<zero>) @ Ks) Us = combine Ks (drop n Us)"
proof (induct n arbitrary: Us, simp)
case (Suc n) thus ?case
by (cases Us) (auto, meson combine_in_carrier ring_simprules(8) set_drop_subset subset_trans)
qed
lemma combine_append_replicate:
"set Us \<subseteq> carrier R \<Longrightarrow> combine (Ks @ (replicate n \<zero>)) Us = combine Ks Us"
by (induct n) (auto, metis append.assoc combine_append_zero replicate_append_same)
lemma combine_append:
assumes "length Ks = length Us"
and "set Ks \<subseteq> carrier R" "set Us \<subseteq> carrier R"
and "set Ks' \<subseteq> carrier R" "set Vs \<subseteq> carrier R"
shows "(combine Ks Us) \<oplus> (combine Ks' Vs) = combine (Ks @ Ks') (Us @ Vs)"
using assms
proof (induct Ks arbitrary: Us)
case Nil thus ?case by auto
next
case (Cons k Ks)
then obtain u Us' where Us: "Us = u # Us'"
by (metis length_Suc_conv)
hence u: "u \<in> carrier R" and Us': "set Us' \<subseteq> carrier R"
using Cons(4) by auto
then show ?case
using combine_in_carrier[OF _ Us', of Ks] Cons
combine_in_carrier[OF Cons(5-6)] unfolding Us
by (auto, simp add: add.m_assoc)
qed
lemma combine_add:
assumes "length Ks = length Us" and "length Ks' = length Us"
and "set Ks \<subseteq> carrier R" "set Ks' \<subseteq> carrier R" "set Us \<subseteq> carrier R"
shows "(combine Ks Us) \<oplus> (combine Ks' Us) = combine (map2 (\<oplus>) Ks Ks') Us"
using assms
proof (induct Us arbitrary: Ks Ks')
case Nil thus ?case by simp
next
case (Cons u Us)
then obtain c c' Cs Cs' where Ks: "Ks = c # Cs" and Ks': "Ks' = c' # Cs'"
by (metis length_Suc_conv)
hence in_carrier:
"c \<in> carrier R" "set Cs \<subseteq> carrier R"
"c' \<in> carrier R" "set Cs' \<subseteq> carrier R"
"u \<in> carrier R" "set Us \<subseteq> carrier R"
using Cons(4-6) by auto
hence lc_in_carrier: "combine Cs Us \<in> carrier R" "combine Cs' Us \<in> carrier R"
using combine_in_carrier by auto
have "combine Ks (u # Us) \<oplus> combine Ks' (u # Us) =
((c \<otimes> u) \<oplus> combine Cs Us) \<oplus> ((c' \<otimes> u) \<oplus> combine Cs' Us)"
unfolding Ks Ks' by auto
also have " ... = ((c \<oplus> c') \<otimes> u \<oplus> (combine Cs Us \<oplus> combine Cs' Us))"
using lc_in_carrier in_carrier(1,3,5) by (simp add: l_distr ring_simprules(7,22))
also have " ... = combine (map2 (\<oplus>) Ks Ks') (u # Us)"
using Cons unfolding Ks Ks' by auto
finally show ?case .
qed
lemma combine_normalize:
assumes "set Ks \<subseteq> carrier R" "set Us \<subseteq> carrier R" "combine Ks Us = a"
obtains Ks'
where "set (take (length Us) Ks) \<subseteq> set Ks'" "set Ks' \<subseteq> set (take (length Us) Ks) \<union> { \<zero> }"
and "length Ks' = length Us" "combine Ks' Us = a"
proof -
define Ks'
where "Ks' = (if length Ks \<le> length Us
then Ks @ (replicate (length Us - length Ks) \<zero>) else take (length Us) Ks)"
hence "set (take (length Us) Ks) \<subseteq> set Ks'" "set Ks' \<subseteq> set (take (length Us) Ks) \<union> { \<zero> }"
"length Ks' = length Us" "a = combine Ks' Us"
using combine_append_replicate[OF assms(2)] combine_take assms(3) by auto
thus thesis
using that by blast
qed
lemma line_extension_mem_iff: "u \<in> line_extension K a E \<longleftrightarrow> (\<exists>k \<in> K. \<exists>v \<in> E. u = k \<otimes> a \<oplus> v)"
unfolding line_extension_def set_add_def'[of R "K #> a" E] unfolding r_coset_def by blast
lemma line_extension_in_carrier:
assumes "K \<subseteq> carrier R" "a \<in> carrier R" "E \<subseteq> carrier R"
shows "line_extension K a E \<subseteq> carrier R"
using set_add_closed[OF r_coset_subset_G[OF assms(1-2)] assms(3)]
by (simp add: line_extension_def)
lemma Span_in_carrier:
assumes "K \<subseteq> carrier R" "set Us \<subseteq> carrier R"
shows "Span K Us \<subseteq> carrier R"
using assms by (induct Us) (auto simp add: line_extension_in_carrier)
subsection \<open>Some Basic Properties of Linear Independence\<close>
lemma independent_in_carrier: "independent K Us \<Longrightarrow> set Us \<subseteq> carrier R"
by (induct Us rule: independent.induct) (simp_all)
lemma independent_backwards:
"independent K (u # Us) \<Longrightarrow> u \<notin> Span K Us"
"independent K (u # Us) \<Longrightarrow> independent K Us"
"independent K (u # Us) \<Longrightarrow> u \<in> carrier R"
by (cases rule: independent.cases, auto)+
lemma dimension_independent [intro]: "independent K Us \<Longrightarrow> dimension (length Us) K (Span K Us)"
proof (induct Us)
case Nil thus ?case by simp
next
case Cons thus ?case
using Suc_dim independent_backwards[OF Cons(2)] by auto
qed
text \<open>Now, we fix K, a subfield of the ring. Many lemmas would also be true for weaker
structures, but our interest is to work with subfields, so generalization could
be the subject of a future work.\<close>
context
fixes K :: "'a set" assumes K: "subfield K R"
begin
subsection \<open>Basic Properties - Second Part\<close>
lemmas subring_props [simp] =
subringE[OF subfieldE(1)[OF K]]
lemma line_extension_is_subgroup:
assumes "subgroup E (add_monoid R)" "a \<in> carrier R"
shows "subgroup (line_extension K a E) (add_monoid R)"
proof (rule add.subgroupI)
show "line_extension K a E \<subseteq> carrier R"
by (simp add: assms add.subgroupE(1) line_extension_def r_coset_subset_G set_add_closed)
next
have "\<zero> = \<zero> \<otimes> a \<oplus> \<zero>"
using assms(2) by simp
hence "\<zero> \<in> line_extension K a E"
using line_extension_mem_iff subgroup.one_closed[OF assms(1)] by auto
thus "line_extension K a E \<noteq> {}" by auto
next
fix u1 u2
assume "u1 \<in> line_extension K a E" and "u2 \<in> line_extension K a E"
then obtain k1 k2 v1 v2
where u1: "k1 \<in> K" "v1 \<in> E" "u1 = (k1 \<otimes> a) \<oplus> v1"
and u2: "k2 \<in> K" "v2 \<in> E" "u2 = (k2 \<otimes> a) \<oplus> v2"
and in_carr: "k1 \<in> carrier R" "v1 \<in> carrier R" "k2 \<in> carrier R" "v2 \<in> carrier R"
using line_extension_mem_iff by (meson add.subgroupE(1)[OF assms(1)] subring_props(1) subsetCE)
hence "u1 \<oplus> u2 = ((k1 \<oplus> k2) \<otimes> a) \<oplus> (v1 \<oplus> v2)"
using assms(2) by algebra
moreover have "k1 \<oplus> k2 \<in> K" and "v1 \<oplus> v2 \<in> E"
using add.subgroupE(4)[OF assms(1)] u1 u2 by auto
ultimately show "u1 \<oplus> u2 \<in> line_extension K a E"
using line_extension_mem_iff by auto
have "\<ominus> u1 = ((\<ominus> k1) \<otimes> a) \<oplus> (\<ominus> v1)"
using in_carr(1-2) u1(3) assms(2) by algebra
moreover have "\<ominus> k1 \<in> K" and "\<ominus> v1 \<in> E"
using add.subgroupE(3)[OF assms(1)] u1 by auto
ultimately show "(\<ominus> u1) \<in> line_extension K a E"
using line_extension_mem_iff by auto
qed
corollary Span_is_add_subgroup:
"set Us \<subseteq> carrier R \<Longrightarrow> subgroup (Span K Us) (add_monoid R)"
using line_extension_is_subgroup normal_imp_subgroup[OF add.one_is_normal] by (induct Us) (auto)
lemma line_extension_smult_closed:
assumes "\<And>k v. \<lbrakk> k \<in> K; v \<in> E \<rbrakk> \<Longrightarrow> k \<otimes> v \<in> E" and "E \<subseteq> carrier R" "a \<in> carrier R"
shows "\<And>k u. \<lbrakk> k \<in> K; u \<in> line_extension K a E \<rbrakk> \<Longrightarrow> k \<otimes> u \<in> line_extension K a E"
proof -
fix k u assume A: "k \<in> K" "u \<in> line_extension K a E"
then obtain k' v'
where u: "k' \<in> K" "v' \<in> E" "u = k' \<otimes> a \<oplus> v'"
and in_carr: "k \<in> carrier R" "k' \<in> carrier R" "v' \<in> carrier R"
using line_extension_mem_iff assms(2) by (meson subring_props(1) subsetCE)
hence "k \<otimes> u = (k \<otimes> k') \<otimes> a \<oplus> (k \<otimes> v')"
using assms(3) by algebra
thus "k \<otimes> u \<in> line_extension K a E"
using assms(1)[OF A(1) u(2)] line_extension_mem_iff u(1) A(1) by auto
qed
lemma Span_subgroup_props [simp]:
assumes "set Us \<subseteq> carrier R"
shows "Span K Us \<subseteq> carrier R"
and "\<zero> \<in> Span K Us"
and "\<And>v1 v2. \<lbrakk> v1 \<in> Span K Us; v2 \<in> Span K Us \<rbrakk> \<Longrightarrow> (v1 \<oplus> v2) \<in> Span K Us"
and "\<And>v. v \<in> Span K Us \<Longrightarrow> (\<ominus> v) \<in> Span K Us"
using add.subgroupE subgroup.one_closed[of _ "add_monoid R"]
Span_is_add_subgroup[OF assms(1)] by auto
lemma Span_smult_closed [simp]:
assumes "set Us \<subseteq> carrier R"
shows "\<And>k v. \<lbrakk> k \<in> K; v \<in> Span K Us \<rbrakk> \<Longrightarrow> k \<otimes> v \<in> Span K Us"
using assms
proof (induct Us)
case Nil thus ?case
using r_null subring_props(1) by (auto, blast)
next
case Cons thus ?case
using Span_subgroup_props(1) line_extension_smult_closed by auto
qed
lemma Span_m_inv_simprule [simp]:
assumes "set Us \<subseteq> carrier R"
shows "\<lbrakk> k \<in> K - { \<zero> }; a \<in> carrier R \<rbrakk> \<Longrightarrow> k \<otimes> a \<in> Span K Us \<Longrightarrow> a \<in> Span K Us"
proof -
assume k: "k \<in> K - { \<zero> }" and a: "a \<in> carrier R" and ka: "k \<otimes> a \<in> Span K Us"
have inv_k: "inv k \<in> K" "inv k \<otimes> k = \<one>"
using subfield_m_inv[OF K k] by simp+
hence "inv k \<otimes> (k \<otimes> a) \<in> Span K Us"
using Span_smult_closed[OF assms _ ka] by simp
thus ?thesis
using inv_k subring_props(1)a k
by (metis (no_types, lifting) DiffE l_one m_assoc subset_iff)
qed
subsection \<open>Span as Linear Combinations\<close>
text \<open>We show that Span is the set of linear combinations\<close>
lemma line_extension_of_combine_set:
assumes "u \<in> carrier R"
shows "line_extension K u { combine Ks Us | Ks. set Ks \<subseteq> K } =
{ combine Ks (u # Us) | Ks. set Ks \<subseteq> K }"
(is "?line_extension = ?combinations")
proof
show "?line_extension \<subseteq> ?combinations"
proof
fix v assume "v \<in> ?line_extension"
then obtain k Ks
where "k \<in> K" "set Ks \<subseteq> K" and "v = combine (k # Ks) (u # Us)"
using line_extension_mem_iff by auto
thus "v \<in> ?combinations"
by (metis (mono_tags, lifting) insert_subset list.simps(15) mem_Collect_eq)
qed
next
show "?combinations \<subseteq> ?line_extension"
proof
fix v assume "v \<in> ?combinations"
then obtain Ks where v: "set Ks \<subseteq> K" "v = combine Ks (u # Us)"
by auto
thus "v \<in> ?line_extension"
proof (cases Ks)
case Cons thus ?thesis
using v line_extension_mem_iff by auto
next
case Nil
hence "v = \<zero>"
using v by simp
moreover have "combine [] Us = \<zero>" by simp
hence "\<zero> \<in> { combine Ks Us | Ks. set Ks \<subseteq> K }"
by (metis (mono_tags, lifting) local.Nil mem_Collect_eq v(1))
hence "(\<zero> \<otimes> u) \<oplus> \<zero> \<in> ?line_extension"
using line_extension_mem_iff subring_props(2) by blast
hence "\<zero> \<in> ?line_extension"
using assms by auto
ultimately show ?thesis by auto
qed
qed
qed
lemma Span_eq_combine_set:
assumes "set Us \<subseteq> carrier R" shows "Span K Us = { combine Ks Us | Ks. set Ks \<subseteq> K }"
using assms line_extension_of_combine_set
by (induct Us) (auto, metis empty_set empty_subsetI)
lemma line_extension_of_combine_set_length_version:
assumes "u \<in> carrier R"
shows "line_extension K u { combine Ks Us | Ks. length Ks = length Us \<and> set Ks \<subseteq> K } =
{ combine Ks (u # Us) | Ks. length Ks = length (u # Us) \<and> set Ks \<subseteq> K }"
(is "?line_extension = ?combinations")
proof
show "?line_extension \<subseteq> ?combinations"
proof
fix v assume "v \<in> ?line_extension"
then obtain k Ks
where "v = combine (k # Ks) (u # Us)" "length (k # Ks) = length (u # Us)" "set (k # Ks) \<subseteq> K"
using line_extension_mem_iff by auto
thus "v \<in> ?combinations" by blast
qed
next
show "?combinations \<subseteq> ?line_extension"
proof
fix c assume "c \<in> ?combinations"
then obtain Ks where c: "c = combine Ks (u # Us)" "length Ks = length (u # Us)" "set Ks \<subseteq> K"
by blast
then obtain k Ks' where k: "Ks = k # Ks'"
by (metis length_Suc_conv)
thus "c \<in> ?line_extension"
using c line_extension_mem_iff unfolding k by auto
qed
qed
lemma Span_eq_combine_set_length_version:
assumes "set Us \<subseteq> carrier R"
shows "Span K Us = { combine Ks Us | Ks. length Ks = length Us \<and> set Ks \<subseteq> K }"
using assms line_extension_of_combine_set_length_version by (induct Us) (auto)
subsubsection \<open>Corollaries\<close>
corollary Span_mem_iff_length_version:
assumes "set Us \<subseteq> carrier R"
shows "a \<in> Span K Us \<longleftrightarrow> (\<exists>Ks. set Ks \<subseteq> K \<and> length Ks = length Us \<and> a = combine Ks Us)"
using Span_eq_combine_set_length_version[OF assms] by blast
corollary Span_mem_imp_non_trivial_combine:
assumes "set Us \<subseteq> carrier R" and "a \<in> Span K Us"
obtains k Ks
where "k \<in> K - { \<zero> }" "set Ks \<subseteq> K" "length Ks = length Us" "combine (k # Ks) (a # Us) = \<zero>"
proof -
obtain Ks where Ks: "set Ks \<subseteq> K" "length Ks = length Us" "a = combine Ks Us"
using Span_mem_iff_length_version[OF assms(1)] assms(2) by auto
hence "((\<ominus> \<one>) \<otimes> a) \<oplus> a = combine ((\<ominus> \<one>) # Ks) (a # Us)"
by auto
moreover have "((\<ominus> \<one>) \<otimes> a) \<oplus> a = \<zero>"
using assms(2) Span_subgroup_props(1)[OF assms(1)] l_minus l_neg by auto
moreover have "\<ominus> \<one> \<noteq> \<zero>"
using subfieldE(6)[OF K] l_neg by force
ultimately show ?thesis
using that subring_props(3,5) Ks(1-2) by (force simp del: combine.simps)
qed
corollary Span_mem_iff:
assumes "set Us \<subseteq> carrier R" and "a \<in> carrier R"
shows "a \<in> Span K Us \<longleftrightarrow> (\<exists>k \<in> K - { \<zero> }. \<exists>Ks. set Ks \<subseteq> K \<and> combine (k # Ks) (a # Us) = \<zero>)"
(is "?in_Span \<longleftrightarrow> ?exists_combine")
proof
assume "?in_Span"
then obtain Ks where Ks: "set Ks \<subseteq> K" "a = combine Ks Us"
using Span_eq_combine_set[OF assms(1)] by auto
hence "((\<ominus> \<one>) \<otimes> a) \<oplus> a = combine ((\<ominus> \<one>) # Ks) (a # Us)"
by auto
moreover have "((\<ominus> \<one>) \<otimes> a) \<oplus> a = \<zero>"
using assms(2) l_minus l_neg by auto
moreover have "\<ominus> \<one> \<noteq> \<zero>"
using subfieldE(6)[OF K] l_neg by force
ultimately show "?exists_combine"
using subring_props(3,5) Ks(1) by (force simp del: combine.simps)
next
assume "?exists_combine"
then obtain k Ks
where k: "k \<in> K" "k \<noteq> \<zero>" and Ks: "set Ks \<subseteq> K" and a: "(k \<otimes> a) \<oplus> combine Ks Us = \<zero>"
by auto
hence "combine Ks Us \<in> Span K Us"
using Span_eq_combine_set[OF assms(1)] by auto
hence "k \<otimes> a \<in> Span K Us"
using Span_subgroup_props[OF assms(1)] k Ks a
by (metis (no_types, lifting) assms(2) contra_subsetD m_closed minus_equality subring_props(1))
thus "?in_Span"
using Span_m_inv_simprule[OF assms(1) _ assms(2), of k] k by auto
qed
subsection \<open>Span as the minimal subgroup that contains \<^term>\<open>K <#> (set Us)\<close>\<close>
text \<open>Now we show the link between Span and Group.generate\<close>
lemma mono_Span:
assumes "set Us \<subseteq> carrier R" and "u \<in> carrier R"
shows "Span K Us \<subseteq> Span K (u # Us)"
proof
fix v assume v: "v \<in> Span K Us"
hence "(\<zero> \<otimes> u) \<oplus> v \<in> Span K (u # Us)"
using line_extension_mem_iff by auto
thus "v \<in> Span K (u # Us)"
using Span_subgroup_props(1)[OF assms(1)] assms(2) v
by (auto simp del: Span.simps)
qed
lemma Span_min:
assumes "set Us \<subseteq> carrier R" and "subgroup E (add_monoid R)"
shows "K <#> (set Us) \<subseteq> E \<Longrightarrow> Span K Us \<subseteq> E"
proof -
assume "K <#> (set Us) \<subseteq> E" show "Span K Us \<subseteq> E"
proof
fix v assume "v \<in> Span K Us"
then obtain Ks where v: "set Ks \<subseteq> K" "v = combine Ks Us"
using Span_eq_combine_set[OF assms(1)] by auto
from \<open>set Ks \<subseteq> K\<close> \<open>set Us \<subseteq> carrier R\<close> and \<open>K <#> (set Us) \<subseteq> E\<close>
show "v \<in> E" unfolding v(2)
proof (induct Ks Us rule: combine.induct)
case (1 k Ks u Us)
hence "k \<in> K" and "u \<in> set (u # Us)" by auto
hence "k \<otimes> u \<in> E"
using 1(4) unfolding set_mult_def by auto
moreover have "K <#> set Us \<subseteq> E"
using 1(4) unfolding set_mult_def by auto
hence "combine Ks Us \<in> E"
using 1 by auto
ultimately show ?case
using add.subgroupE(4)[OF assms(2)] by auto
next
case "2_1" thus ?case
using subgroup.one_closed[OF assms(2)] by auto
next
case "2_2" thus ?case
using subgroup.one_closed[OF assms(2)] by auto
qed
qed
qed
lemma Span_eq_generate:
assumes "set Us \<subseteq> carrier R" shows "Span K Us = generate (add_monoid R) (K <#> (set Us))"
proof (rule add.generateI)
show "subgroup (Span K Us) (add_monoid R)"
using Span_is_add_subgroup[OF assms] .
next
show "\<And>E. \<lbrakk> subgroup E (add_monoid R); K <#> set Us \<subseteq> E \<rbrakk> \<Longrightarrow> Span K Us \<subseteq> E"
using Span_min assms by blast
next
show "K <#> set Us \<subseteq> Span K Us"
using assms
proof (induct Us)
case Nil thus ?case
unfolding set_mult_def by auto
next
case (Cons u Us)
have "K <#> set (u # Us) = (K <#> { u }) \<union> (K <#> set Us)"
unfolding set_mult_def by auto
moreover have "\<And>k. k \<in> K \<Longrightarrow> k \<otimes> u \<in> Span K (u # Us)"
proof -
fix k assume k: "k \<in> K"
hence "combine [ k ] (u # Us) \<in> Span K (u # Us)"
using Span_eq_combine_set[OF Cons(2)] by (auto simp del: combine.simps)
moreover have "k \<in> carrier R" and "u \<in> carrier R"
using Cons(2) k subring_props(1) by (blast, auto)
ultimately show "k \<otimes> u \<in> Span K (u # Us)"
by (auto simp del: Span.simps)
qed
hence "K <#> { u } \<subseteq> Span K (u # Us)"
unfolding set_mult_def by auto
moreover have "K <#> set Us \<subseteq> Span K (u # Us)"
using mono_Span[of Us u] Cons by (auto simp del: Span.simps)
ultimately show ?case
using Cons by (auto simp del: Span.simps)
qed
qed
subsubsection \<open>Corollaries\<close>
corollary Span_same_set:
assumes "set Us \<subseteq> carrier R"
shows "set Us = set Vs \<Longrightarrow> Span K Us = Span K Vs"
using Span_eq_generate assms by auto
corollary Span_incl: "set Us \<subseteq> carrier R \<Longrightarrow> K <#> (set Us) \<subseteq> Span K Us"
using Span_eq_generate generate.incl[of _ _ "add_monoid R"] by auto
corollary Span_base_incl: "set Us \<subseteq> carrier R \<Longrightarrow> set Us \<subseteq> Span K Us"
proof -
assume A: "set Us \<subseteq> carrier R"
hence "{ \<one> } <#> set Us = set Us"
unfolding set_mult_def by force
moreover have "{ \<one> } <#> set Us \<subseteq> K <#> set Us"
using subring_props(3) unfolding set_mult_def by blast
ultimately show ?thesis
using Span_incl[OF A] by auto
qed
corollary mono_Span_sublist:
assumes "set Us \<subseteq> set Vs" "set Vs \<subseteq> carrier R"
shows "Span K Us \<subseteq> Span K Vs"
using add.mono_generate[OF mono_set_mult[OF _ assms(1), of K K R]]
Span_eq_generate[OF assms(2)] Span_eq_generate[of Us] assms by auto
corollary mono_Span_append:
assumes "set Us \<subseteq> carrier R" "set Vs \<subseteq> carrier R"
shows "Span K Us \<subseteq> Span K (Us @ Vs)"
and "Span K Us \<subseteq> Span K (Vs @ Us)"
using mono_Span_sublist[of Us "Us @ Vs"] assms
Span_same_set[of "Us @ Vs" "Vs @ Us"] by auto
corollary mono_Span_subset:
assumes "set Us \<subseteq> Span K Vs" "set Vs \<subseteq> carrier R"
shows "Span K Us \<subseteq> Span K Vs"
proof (rule Span_min[OF _ Span_is_add_subgroup[OF assms(2)]])
show "set Us \<subseteq> carrier R"
using Span_subgroup_props(1)[OF assms(2)] assms by auto
show "K <#> set Us \<subseteq> Span K Vs"
using Span_smult_closed[OF assms(2)] assms(1) unfolding set_mult_def by blast
qed
lemma Span_strict_incl:
assumes "set Us \<subseteq> carrier R" "set Vs \<subseteq> carrier R"
shows "Span K Us \<subset> Span K Vs \<Longrightarrow> (\<exists>v \<in> set Vs. v \<notin> Span K Us)"
proof -
assume "Span K Us \<subset> Span K Vs" show "\<exists>v \<in> set Vs. v \<notin> Span K Us"
proof (rule ccontr)
assume "\<not> (\<exists>v \<in> set Vs. v \<notin> Span K Us)"
hence "Span K Vs \<subseteq> Span K Us"
using mono_Span_subset[OF _ assms(1), of Vs] by auto
from \<open>Span K Us \<subset> Span K Vs\<close> and \<open>Span K Vs \<subseteq> Span K Us\<close>
show False by simp
qed
qed
lemma Span_append_eq_set_add:
assumes "set Us \<subseteq> carrier R" and "set Vs \<subseteq> carrier R"
shows "Span K (Us @ Vs) = (Span K Us <+>\<^bsub>R\<^esub> Span K Vs)"
using assms
proof (induct Us)
case Nil thus ?case
using Span_subgroup_props(1)[OF Nil(2)] unfolding set_add_def' by force
next
case (Cons u Us)
hence in_carrier:
"u \<in> carrier R" "set Us \<subseteq> carrier R" "set Vs \<subseteq> carrier R"
by auto
have "line_extension K u (Span K Us <+>\<^bsub>R\<^esub> Span K Vs) = (Span K (u # Us) <+>\<^bsub>R\<^esub> Span K Vs)"
proof
show "line_extension K u (Span K Us <+>\<^bsub>R\<^esub> Span K Vs) \<subseteq> (Span K (u # Us) <+>\<^bsub>R\<^esub> Span K Vs)"
proof
fix v assume "v \<in> line_extension K u (Span K Us <+>\<^bsub>R\<^esub> Span K Vs)"
then obtain k u' v'
where v: "k \<in> K" "u' \<in> Span K Us" "v' \<in> Span K Vs" "v = k \<otimes> u \<oplus> (u' \<oplus> v')"
using line_extension_mem_iff[of v _ u "Span K Us <+>\<^bsub>R\<^esub> Span K Vs"]
unfolding set_add_def' by blast
hence "v = (k \<otimes> u \<oplus> u') \<oplus> v'"
using in_carrier(2-3)[THEN Span_subgroup_props(1)] in_carrier(1) subring_props(1)
by (metis (no_types, lifting) rev_subsetD ring_simprules(7) semiring_simprules(3))
moreover have "k \<otimes> u \<oplus> u' \<in> Span K (u # Us)"
using line_extension_mem_iff v(1-2) by auto
ultimately show "v \<in> Span K (u # Us) <+>\<^bsub>R\<^esub> Span K Vs"
unfolding set_add_def' using v(3) by auto
qed
next
show "Span K (u # Us) <+>\<^bsub>R\<^esub> Span K Vs \<subseteq> line_extension K u (Span K Us <+>\<^bsub>R\<^esub> Span K Vs)"
proof
fix v assume "v \<in> Span K (u # Us) <+>\<^bsub>R\<^esub> Span K Vs"
then obtain k u' v'
where v: "k \<in> K" "u' \<in> Span K Us" "v' \<in> Span K Vs" "v = (k \<otimes> u \<oplus> u') \<oplus> v'"
using line_extension_mem_iff[of _ _ u "Span K Us"] unfolding set_add_def' by auto
hence "v = (k \<otimes> u) \<oplus> (u' \<oplus> v')"
using in_carrier(2-3)[THEN Span_subgroup_props(1)] in_carrier(1) subring_props(1)
by (metis (no_types, lifting) rev_subsetD ring_simprules(5,7))
thus "v \<in> line_extension K u (Span K Us <+>\<^bsub>R\<^esub> Span K Vs)"
using line_extension_mem_iff[of "(k \<otimes> u) \<oplus> (u' \<oplus> v')" K u "Span K Us <+>\<^bsub>R\<^esub> Span K Vs"]
unfolding set_add_def' using v by auto
qed
qed
thus ?case
using Cons by auto
qed
subsection \<open>Characterisation of Linearly Independent "Sets"\<close>
declare independent_backwards [intro]
declare independent_in_carrier [intro]
lemma independent_distinct: "independent K Us \<Longrightarrow> distinct Us"
proof (induct Us rule: list.induct)
case Nil thus ?case by simp
next
case Cons thus ?case
using independent_backwards[OF Cons(2)]
independent_in_carrier[OF Cons(2)]
Span_base_incl
by auto
qed
lemma independent_strict_incl:
assumes "independent K (u # Us)" shows "Span K Us \<subset> Span K (u # Us)"
proof -
have "u \<in> Span K (u # Us)"
using Span_base_incl[OF independent_in_carrier[OF assms]] by auto
moreover have "Span K Us \<subseteq> Span K (u # Us)"
using mono_Span independent_in_carrier[OF assms] by auto
ultimately show ?thesis
using independent_backwards(1)[OF assms] by auto
qed
corollary independent_replacement:
assumes "independent K (u # Us)" and "independent K Vs"
shows "Span K (u # Us) \<subseteq> Span K Vs \<Longrightarrow> (\<exists>v \<in> set Vs. independent K (v # Us))"
proof -
assume "Span K (u # Us) \<subseteq> Span K Vs"
hence "Span K Us \<subset> Span K Vs"
using independent_strict_incl[OF assms(1)] by auto
then obtain v where v: "v \<in> set Vs" "v \<notin> Span K Us"
using Span_strict_incl[of Us Vs] assms[THEN independent_in_carrier] by auto
thus ?thesis
using li_Cons[of v K Us] assms independent_in_carrier[OF assms(2)] by auto
qed
lemma independent_split:
assumes "independent K (Us @ Vs)"
shows "independent K Vs"
and "independent K Us"
and "Span K Us \<inter> Span K Vs = { \<zero> }"
proof -
from assms show "independent K Vs"
by (induct Us) (auto)
next
from assms show "independent K Us"
proof (induct Us)
case Nil thus ?case by simp
next
case (Cons u Us')
hence u: "u \<in> carrier R" and "set Us' \<subseteq> carrier R" "set Vs \<subseteq> carrier R"
using independent_in_carrier[of K "(u # Us') @ Vs"] by auto
hence "Span K Us' \<subseteq> Span K (Us' @ Vs)"
using mono_Span_append(1) by simp
thus ?case
using independent_backwards[of K u "Us' @ Vs"] Cons li_Cons[OF u] by auto
qed
next
from assms show "Span K Us \<inter> Span K Vs = { \<zero> }"
proof (induct Us rule: list.induct)
case Nil thus ?case
using Span_subgroup_props(2)[OF independent_in_carrier[of K Vs]] by simp
next
case (Cons u Us)
hence IH: "Span K Us \<inter> Span K Vs = {\<zero>}" by auto
have in_carrier:
"u \<in> carrier R" "set Us \<subseteq> carrier R" "set Vs \<subseteq> carrier R" "set (u # Us) \<subseteq> carrier R"
using Cons(2)[THEN independent_in_carrier] by auto
hence "{ \<zero> } \<subseteq> Span K (u # Us) \<inter> Span K Vs"
using in_carrier(3-4)[THEN Span_subgroup_props(2)] by auto
moreover have "Span K (u # Us) \<inter> Span K Vs \<subseteq> { \<zero> }"
proof (rule ccontr)
assume "\<not> Span K (u # Us) \<inter> Span K Vs \<subseteq> {\<zero>}"
hence "\<exists>a. a \<noteq> \<zero> \<and> a \<in> Span K (u # Us) \<and> a \<in> Span K Vs" by auto
then obtain k u' v'
where u': "u' \<in> Span K Us" "u' \<in> carrier R"
and v': "v' \<in> Span K Vs" "v' \<in> carrier R" "v' \<noteq> \<zero>"
and k: "k \<in> K" "(k \<otimes> u \<oplus> u') = v'"
using line_extension_mem_iff[of _ _ u "Span K Us"] in_carrier(2-3)[THEN Span_subgroup_props(1)]
subring_props(1) by force
hence "v' = \<zero>" if "k = \<zero>"
using in_carrier(1) that IH by auto
hence diff_zero: "k \<noteq> \<zero>" using v'(3) by auto
have "k \<in> carrier R"
using subring_props(1) k(1) by blast
hence "k \<otimes> u = (\<ominus> u') \<oplus> v'"
using in_carrier(1) k(2) u'(2) v'(2) add.m_comm r_neg1 by auto
hence "k \<otimes> u \<in> Span K (Us @ Vs)"
using Span_subgroup_props(4)[OF in_carrier(2) u'(1)] v'(1)
Span_append_eq_set_add[OF in_carrier(2-3)] unfolding set_add_def' by blast
hence "u \<in> Span K (Us @ Vs)"
using Cons(2) Span_m_inv_simprule[OF _ _ in_carrier(1), of "Us @ Vs" k]
diff_zero k(1) in_carrier(2-3) by auto
moreover have "u \<notin> Span K (Us @ Vs)"
using independent_backwards(1)[of K u "Us @ Vs"] Cons(2) by auto
ultimately show False by simp
qed
ultimately show ?case by auto
qed
qed
lemma independent_append:
assumes "independent K Us" and "independent K Vs" and "Span K Us \<inter> Span K Vs = { \<zero> }"
shows "independent K (Us @ Vs)"
using assms
proof (induct Us rule: list.induct)
case Nil thus ?case by simp
next
case (Cons u Us)
hence in_carrier:
"u \<in> carrier R" "set Us \<subseteq> carrier R" "set Vs \<subseteq> carrier R" "set (u # Us) \<subseteq> carrier R"
using Cons(2-3)[THEN independent_in_carrier] by auto
hence "Span K Us \<subseteq> Span K (u # Us)"
using mono_Span by auto
hence "Span K Us \<inter> Span K Vs = { \<zero> }"
using Cons(4) Span_subgroup_props(2)[OF in_carrier(2)] by auto
hence "independent K (Us @ Vs)"
using Cons by auto
moreover have "u \<notin> Span K (Us @ Vs)"
proof (rule ccontr)
assume "\<not> u \<notin> Span K (Us @ Vs)"
then obtain u' v'
where u': "u' \<in> Span K Us" "u' \<in> carrier R"
and v': "v' \<in> Span K Vs" "v' \<in> carrier R" and u:"u = u' \<oplus> v'"
using Span_append_eq_set_add[OF in_carrier(2-3)] in_carrier(2-3)[THEN Span_subgroup_props(1)]
unfolding set_add_def' by blast
hence "u \<oplus> (\<ominus> u') = v'"
using in_carrier(1) by algebra
moreover have "u \<in> Span K (u # Us)" and "u' \<in> Span K (u # Us)"
using Span_base_incl[OF in_carrier(4)] mono_Span[OF in_carrier(2,1)] u'(1)
by (auto simp del: Span.simps)
hence "u \<oplus> (\<ominus> u') \<in> Span K (u # Us)"
using Span_subgroup_props(3-4)[OF in_carrier(4)] by (auto simp del: Span.simps)
ultimately have "u \<oplus> (\<ominus> u') = \<zero>"
using Cons(4) v'(1) by auto
hence "u = u'"
using Cons(4) v'(1) in_carrier(1) u'(2) \<open>u \<oplus> \<ominus> u' = v'\<close> u by auto
thus False
using u'(1) independent_backwards(1)[OF Cons(2)] by simp
qed
ultimately show ?case
using in_carrier(1) li_Cons by simp
qed
lemma independent_imp_trivial_combine:
assumes "independent K Us"
shows "\<And>Ks. \<lbrakk> set Ks \<subseteq> K; combine Ks Us = \<zero> \<rbrakk> \<Longrightarrow> set (take (length Us) Ks) \<subseteq> { \<zero> }"
using assms
proof (induct Us rule: list.induct)
case Nil thus ?case by simp
next
case (Cons u Us) thus ?case
proof (cases "Ks = []")
assume "Ks = []" thus ?thesis by auto
next
assume "Ks \<noteq> []"
then obtain k Ks' where k: "k \<in> K" and Ks': "set Ks' \<subseteq> K" and Ks: "Ks = k # Ks'"
using Cons(2) by (metis insert_subset list.exhaust_sel list.simps(15))
hence Us: "set Us \<subseteq> carrier R" and u: "u \<in> carrier R"
using independent_in_carrier[OF Cons(4)] by auto
have "u \<in> Span K Us" if "k \<noteq> \<zero>"
using that Span_mem_iff[OF Us u] Cons(3-4) Ks' k unfolding Ks by blast
hence k_zero: "k = \<zero>"
using independent_backwards[OF Cons(4)] by blast
hence "combine Ks' Us = \<zero>"
using combine_in_carrier[OF _ Us, of Ks'] Ks' u Cons(3) subring_props(1) unfolding Ks by auto
hence "set (take (length Us) Ks') \<subseteq> { \<zero> }"
using Cons(1)[OF Ks' _ independent_backwards(2)[OF Cons(4)]] by simp
thus ?thesis
using k_zero unfolding Ks by auto
qed
qed
lemma non_trivial_combine_imp_dependent:
assumes "set Ks \<subseteq> K" and "combine Ks Us = \<zero>" and "\<not> set (take (length Us) Ks) \<subseteq> { \<zero> }"
shows "dependent K Us"
using independent_imp_trivial_combine[OF _ assms(1-2)] assms(3) by blast
lemma trivial_combine_imp_independent:
assumes "set Us \<subseteq> carrier R"
and "\<And>Ks. \<lbrakk> set Ks \<subseteq> K; combine Ks Us = \<zero> \<rbrakk> \<Longrightarrow> set (take (length Us) Ks) \<subseteq> { \<zero> }"
shows "independent K Us"
using assms
proof (induct Us)
case Nil thus ?case by simp
next
case (Cons u Us)
hence Us: "set Us \<subseteq> carrier R" and u: "u \<in> carrier R" by auto
have "\<And>Ks. \<lbrakk> set Ks \<subseteq> K; combine Ks Us = \<zero> \<rbrakk> \<Longrightarrow> set (take (length Us) Ks) \<subseteq> { \<zero> }"
proof -
fix Ks assume Ks: "set Ks \<subseteq> K" and lin_c: "combine Ks Us = \<zero>"
hence "combine (\<zero> # Ks) (u # Us) = \<zero>"
using u subring_props(1) combine_in_carrier[OF _ Us] by auto
hence "set (take (length (u # Us)) (\<zero> # Ks)) \<subseteq> { \<zero> }"
using Cons(3)[of "\<zero> # Ks"] subring_props(2) Ks by auto
thus "set (take (length Us) Ks) \<subseteq> { \<zero> }" by auto
qed
hence "independent K Us"
using Cons(1)[OF Us] by simp
moreover have "u \<notin> Span K Us"
proof (rule ccontr)
assume "\<not> u \<notin> Span K Us"
then obtain k Ks where k: "k \<in> K" "k \<noteq> \<zero>" and Ks: "set Ks \<subseteq> K" and u: "combine (k # Ks) (u # Us) = \<zero>"
using Span_mem_iff[OF Us u] by auto
have "set (take (length (u # Us)) (k # Ks)) \<subseteq> { \<zero> }"
using Cons(3)[OF _ u] k(1) Ks by auto
hence "k = \<zero>" by auto
from \<open>k = \<zero>\<close> and \<open>k \<noteq> \<zero>\<close> show False by simp
qed
ultimately show ?case
using li_Cons[OF u] by simp
qed
corollary dependent_imp_non_trivial_combine:
assumes "set Us \<subseteq> carrier R" and "dependent K Us"
obtains Ks where "length Ks = length Us" "combine Ks Us = \<zero>" "set Ks \<subseteq> K" "set Ks \<noteq> { \<zero> }"
proof -
obtain Ks
where Ks: "set Ks \<subseteq> carrier R" "set Ks \<subseteq> K" "combine Ks Us = \<zero>" "\<not> set (take (length Us) Ks) \<subseteq> { \<zero> }"
using trivial_combine_imp_independent[OF assms(1)] assms(2) subring_props(1) by blast
obtain Ks'
where Ks': "set (take (length Us) Ks) \<subseteq> set Ks'" "set Ks' \<subseteq> set (take (length Us) Ks) \<union> { \<zero> }"
"length Ks' = length Us" "combine Ks' Us = \<zero>"
using combine_normalize[OF Ks(1) assms(1) Ks(3)] by metis
have "set (take (length Us) Ks) \<subseteq> set Ks"
by (simp add: set_take_subset)
hence "set Ks' \<subseteq> K"
using Ks(2) Ks'(2) subring_props(2) Un_commute by blast
moreover have "set Ks' \<noteq> { \<zero> }"
using Ks'(1) Ks(4) by auto
ultimately show thesis
using that Ks' by blast
qed
corollary unique_decomposition:
assumes "independent K Us"
shows "a \<in> Span K Us \<Longrightarrow> \<exists>!Ks. set Ks \<subseteq> K \<and> length Ks = length Us \<and> a = combine Ks Us"
proof -
note in_carrier = independent_in_carrier[OF assms]
assume "a \<in> Span K Us"
then obtain Ks where Ks: "set Ks \<subseteq> K" "length Ks = length Us" "a = combine Ks Us"
using Span_mem_iff_length_version[OF in_carrier] by blast
moreover
have "\<And>Ks'. \<lbrakk> set Ks' \<subseteq> K; length Ks' = length Us; a = combine Ks' Us \<rbrakk> \<Longrightarrow> Ks = Ks'"
proof -
fix Ks' assume Ks': "set Ks' \<subseteq> K" "length Ks' = length Us" "a = combine Ks' Us"
hence set_Ks: "set Ks \<subseteq> carrier R" and set_Ks': "set Ks' \<subseteq> carrier R"
using subring_props(1) Ks(1) by blast+
have same_length: "length Ks = length Ks'"
using Ks Ks' by simp
have "(combine Ks Us) \<oplus> ((\<ominus> \<one>) \<otimes> (combine Ks' Us)) = \<zero>"
using combine_in_carrier[OF set_Ks in_carrier]
combine_in_carrier[OF set_Ks' in_carrier] Ks(3) Ks'(3) by algebra
hence "(combine Ks Us) \<oplus> (combine (map ((\<otimes>) (\<ominus> \<one>)) Ks') Us) = \<zero>"
using combine_r_distr[OF set_Ks' in_carrier, of "\<ominus> \<one>"] subring_props by auto
moreover have set_map: "set (map ((\<otimes>) (\<ominus> \<one>)) Ks') \<subseteq> K"
using Ks'(1) subring_props by (induct Ks') (auto)
hence "set (map ((\<otimes>) (\<ominus> \<one>)) Ks') \<subseteq> carrier R"
using subring_props(1) by blast
ultimately have "combine (map2 (\<oplus>) Ks (map ((\<otimes>) (\<ominus> \<one>)) Ks')) Us = \<zero>"
using combine_add[OF Ks(2) _ set_Ks _ in_carrier, of "map ((\<otimes>) (\<ominus> \<one>)) Ks'"] Ks'(2) by auto
moreover have "set (map2 (\<oplus>) Ks (map ((\<otimes>) (\<ominus> \<one>)) Ks')) \<subseteq> K"
using Ks(1) set_map subring_props(7)
by (induct Ks) (auto, metis contra_subsetD in_set_zipE local.set_map set_ConsD subring_props(7))
ultimately have "set (take (length Us) (map2 (\<oplus>) Ks (map ((\<otimes>) (\<ominus> \<one>)) Ks'))) \<subseteq> { \<zero> }"
using independent_imp_trivial_combine[OF assms] by auto
hence "set (map2 (\<oplus>) Ks (map ((\<otimes>) (\<ominus> \<one>)) Ks')) \<subseteq> { \<zero> }"
using Ks(2) Ks'(2) by auto
thus "Ks = Ks'"
using set_Ks set_Ks' same_length
proof (induct Ks arbitrary: Ks')
case Nil thus?case by simp
next
case (Cons k Ks)
then obtain k' Ks'' where k': "Ks' = k' # Ks''"
by (metis Suc_length_conv)
have "Ks = Ks''"
using Cons unfolding k' by auto
moreover have "k = k'"
using Cons(2-4) l_minus minus_equality unfolding k' by (auto, fastforce)
ultimately show ?case
unfolding k' by simp
qed
qed
ultimately show ?thesis by blast
qed
subsection \<open>Replacement Theorem\<close>
lemma independent_rotate1_aux:
"independent K (u # Us @ Vs) \<Longrightarrow> independent K ((Us @ [u]) @ Vs)"
proof -
assume "independent K (u # Us @ Vs)"
hence li: "independent K [u]" "independent K Us" "independent K Vs"
and inter: "Span K [u] \<inter> Span K Us = { \<zero> }"
"Span K (u # Us) \<inter> Span K Vs = { \<zero> }"
using independent_split[of "u # Us" Vs] independent_split[of "[u]" Us] by auto
hence "independent K (Us @ [u])"
using independent_append[OF li(2,1)] by auto
moreover have "Span K (Us @ [u]) \<inter> Span K Vs = { \<zero> }"
using Span_same_set[of "u # Us" "Us @ [u]"] li(1-2)[THEN independent_in_carrier] inter(2) by auto
ultimately show "independent K ((Us @ [u]) @ Vs)"
using independent_append[OF _ li(3), of "Us @ [u]"] by simp
qed
corollary independent_rotate1:
"independent K (Us @ Vs) \<Longrightarrow> independent K ((rotate1 Us) @ Vs)"
using independent_rotate1_aux by (cases Us) (auto)
(*
corollary independent_rotate:
"independent K (Us @ Vs) \<Longrightarrow> independent K ((rotate n Us) @ Vs)"
using independent_rotate1 by (induct n) auto
lemma rotate_append: "rotate (length l) (l @ q) = q @ l"
by (induct l arbitrary: q) (auto simp add: rotate1_rotate_swap)
*)
corollary independent_same_set:
assumes "set Us = set Vs" and "length Us = length Vs"
shows "independent K Us \<Longrightarrow> independent K Vs"
proof -
assume "independent K Us" thus ?thesis
using assms
proof (induct Us arbitrary: Vs rule: list.induct)
case Nil thus ?case by simp
next
case (Cons u Us)
then obtain Vs' Vs'' where Vs: "Vs = Vs' @ (u # Vs'')"
by (metis list.set_intros(1) split_list)
have in_carrier: "u \<in> carrier R" "set Us \<subseteq> carrier R"
using independent_in_carrier[OF Cons(2)] by auto
have "distinct Vs"
using Cons(3-4) independent_distinct[OF Cons(2)]
by (metis card_distinct distinct_card)
hence "u \<notin> set (Vs' @ Vs'')" and "u \<notin> set Us"
using independent_distinct[OF Cons(2)] unfolding Vs by auto
hence set_eq: "set Us = set (Vs' @ Vs'')" and "length (Vs' @ Vs'') = length Us"
using Cons(3-4) unfolding Vs by auto
hence "independent K (Vs' @ Vs'')"
using Cons(1)[OF independent_backwards(2)[OF Cons(2)]] unfolding Vs by simp
hence "independent K (u # (Vs' @ Vs''))"
using li_Cons Span_same_set[OF _ set_eq] independent_backwards(1)[OF Cons(2)] in_carrier by auto
hence "independent K (Vs' @ (u # Vs''))"
using independent_rotate1[of "u # Vs'" Vs''] by auto
thus ?case unfolding Vs .
qed
qed
lemma replacement_theorem:
assumes "independent K (Us' @ Us)" and "independent K Vs"
and "Span K (Us' @ Us) \<subseteq> Span K Vs"
shows "\<exists>Vs'. set Vs' \<subseteq> set Vs \<and> length Vs' = length Us' \<and> independent K (Vs' @ Us)"
using assms
proof (induct "length Us'" arbitrary: Us' Us)
case 0 thus ?case by auto
next
case (Suc n)
then obtain u Us'' where Us'': "Us' = Us'' @ [u]"
by (metis list.size(3) nat.simps(3) rev_exhaust)
then obtain Vs' where Vs': "set Vs' \<subseteq> set Vs" "length Vs' = n" "independent K (Vs' @ (u # Us))"
using Suc(1)[of Us'' "u # Us"] Suc(2-5) by auto
hence li: "independent K ((u # Vs') @ Us)"
using independent_same_set[OF _ _ Vs'(3), of "(u # Vs') @ Us"] by auto
moreover have in_carrier:
"u \<in> carrier R" "set Us \<subseteq> carrier R" "set Us' \<subseteq> carrier R" "set Vs \<subseteq> carrier R"
using Suc(3-4)[THEN independent_in_carrier] Us'' by auto
moreover have "Span K ((u # Vs') @ Us) \<subseteq> Span K Vs"
proof -
have "set Us \<subseteq> Span K Vs" "u \<in> Span K Vs"
using Suc(5) Span_base_incl[of "Us' @ Us"] Us'' in_carrier(2-3) by auto
moreover have "set Vs' \<subseteq> Span K Vs"
using Span_base_incl[OF in_carrier(4)] Vs'(1) by auto
ultimately have "set ((u # Vs') @ Us) \<subseteq> Span K Vs" by auto
thus ?thesis
using mono_Span_subset[OF _ in_carrier(4)] by (simp del: Span.simps)
qed
ultimately obtain v where "v \<in> set Vs" "independent K ((v # Vs') @ Us)"
using independent_replacement[OF _ Suc(4), of u "Vs' @ Us"] by auto
thus ?case
using Vs'(1-2) Suc(2)
by (metis (mono_tags, lifting) insert_subset length_Cons list.simps(15))
qed
corollary independent_length_le:
assumes "independent K Us" and "independent K Vs"
shows "set Us \<subseteq> Span K Vs \<Longrightarrow> length Us \<le> length Vs"
proof -
assume "set Us \<subseteq> Span K Vs"
hence "Span K Us \<subseteq> Span K Vs"
using mono_Span_subset[OF _ independent_in_carrier[OF assms(2)]] by simp
then obtain Vs' where Vs': "set Vs' \<subseteq> set Vs" "length Vs' = length Us" "independent K Vs'"
using replacement_theorem[OF _ assms(2), of Us "[]"] assms(1) by auto
hence "card (set Vs') \<le> card (set Vs)"
by (simp add: card_mono)
thus "length Us \<le> length Vs"
using independent_distinct assms(2) Vs'(2-3) by (simp add: distinct_card)
qed
subsection \<open>Dimension\<close>
lemma exists_base:
assumes "dimension n K E"
shows "\<exists>Vs. set Vs \<subseteq> carrier R \<and> independent K Vs \<and> length Vs = n \<and> Span K Vs = E"
(is "\<exists>Vs. ?base K Vs E n")
using assms
proof (induct E rule: dimension.induct)
case zero_dim thus ?case by auto
next
case (Suc_dim v E n K)
then obtain Vs where Vs: "set Vs \<subseteq> carrier R" "independent K Vs" "length Vs = n" "Span K Vs = E"
by auto
hence "?base K (v # Vs) (line_extension K v E) (Suc n)"
using Suc_dim li_Cons by auto
thus ?case by blast
qed
lemma dimension_zero: "dimension 0 K E \<Longrightarrow> E = { \<zero> }"
proof -
assume "dimension 0 K E"
then obtain Vs where "length Vs = 0" "Span K Vs = E"
using exists_base by blast
thus ?thesis
by auto
qed
lemma dimension_one [iff]: "dimension 1 K K"
proof -
have "K = Span K [ \<one> ]"
using line_extension_mem_iff[of _ K \<one> "{ \<zero> }"] subfieldE(3)[OF K] by (auto simp add: rev_subsetD)
thus ?thesis
using dimension.Suc_dim[OF one_closed _ dimension.zero_dim, of K] subfieldE(6)[OF K] by auto
qed
lemma dimensionI:
assumes "independent K Us" "Span K Us = E"
shows "dimension (length Us) K E"
using dimension_independent[OF assms(1)] assms(2) by simp
lemma space_subgroup_props:
assumes "dimension n K E"
shows "E \<subseteq> carrier R"
and "\<zero> \<in> E"
and "\<And>v1 v2. \<lbrakk> v1 \<in> E; v2 \<in> E \<rbrakk> \<Longrightarrow> (v1 \<oplus> v2) \<in> E"
and "\<And>v. v \<in> E \<Longrightarrow> (\<ominus> v) \<in> E"
and "\<And>k v. \<lbrakk> k \<in> K; v \<in> E \<rbrakk> \<Longrightarrow> k \<otimes> v \<in> E"
and "\<lbrakk> k \<in> K - { \<zero> }; a \<in> carrier R \<rbrakk> \<Longrightarrow> k \<otimes> a \<in> E \<Longrightarrow> a \<in> E"
using exists_base[OF assms] Span_subgroup_props Span_smult_closed Span_m_inv_simprule by auto
lemma independent_length_le_dimension:
assumes "dimension n K E" and "independent K Us" "set Us \<subseteq> E"
shows "length Us \<le> n"
proof -
obtain Vs where Vs: "set Vs \<subseteq> carrier R" "independent K Vs" "length Vs = n" "Span K Vs = E"
using exists_base[OF assms(1)] by auto
thus ?thesis
using independent_length_le assms(2-3) by auto
qed
lemma dimension_is_inj:
assumes "dimension n K E" and "dimension m K E"
shows "n = m"
proof -
{ fix n m assume n: "dimension n K E" and m: "dimension m K E"
then obtain Vs
where Vs: "set Vs \<subseteq> carrier R" "independent K Vs" "length Vs = n" "Span K Vs = E"
using exists_base by meson
hence "n \<le> m"
using independent_length_le_dimension[OF m Vs(2)] Span_base_incl[OF Vs(1)] by auto
} note aux_lemma = this
show ?thesis
using aux_lemma[OF assms] aux_lemma[OF assms(2,1)] by simp
qed
corollary independent_length_eq_dimension:
assumes "dimension n K E" and "independent K Us" "set Us \<subseteq> E"
shows "length Us = n \<longleftrightarrow> Span K Us = E"
proof
assume len: "length Us = n" show "Span K Us = E"
proof (rule ccontr)
assume "Span K Us \<noteq> E"
hence "Span K Us \<subset> E"
using mono_Span_subset[of Us] exists_base[OF assms(1)] assms(3) by blast
then obtain v where v: "v \<in> E" "v \<notin> Span K Us"
using Span_strict_incl exists_base[OF assms(1)] space_subgroup_props(1)[OF assms(1)] assms by blast
hence "independent K (v # Us)"
using li_Cons[OF _ _ assms(2)] space_subgroup_props(1)[OF assms(1)] by auto
hence "length (v # Us) \<le> n"
using independent_length_le_dimension[OF assms(1)] v(1) assms(2-3) by fastforce
moreover have "length (v # Us) = Suc n"
using len by simp
ultimately show False by simp
qed
next
assume "Span K Us = E"
hence "dimension (length Us) K E"
using dimensionI assms by auto
thus "length Us = n"
using dimension_is_inj[OF assms(1)] by auto
qed
lemma complete_base:
assumes "dimension n K E" and "independent K Us" "set Us \<subseteq> E"
shows "\<exists>Vs. length (Vs @ Us) = n \<and> independent K (Vs @ Us) \<and> Span K (Vs @ Us) = E"
proof -
{ fix Us k assume "k \<le> n" "independent K Us" "set Us \<subseteq> E" "length Us = k"
hence "\<exists>Vs. length (Vs @ Us) = n \<and> independent K (Vs @ Us) \<and> Span K (Vs @ Us) = E"
proof (induct arbitrary: Us rule: inc_induct)
case base thus ?case
using independent_length_eq_dimension[OF assms(1) base(1-2)] by auto
next
case (step m)
have "Span K Us \<subseteq> E"
using mono_Span_subset step(4-6) exists_base[OF assms(1)] by blast
hence "Span K Us \<subset> E"
using independent_length_eq_dimension[OF assms(1) step(4-5)] step(2,6) assms(1) by blast
then obtain v where v: "v \<in> E" "v \<notin> Span K Us"
using Span_strict_incl exists_base[OF assms(1)] by blast
hence "independent K (v # Us)"
using space_subgroup_props(1)[OF assms(1)] li_Cons[OF _ v(2) step(4)] by auto
then obtain Vs
where "length (Vs @ (v # Us)) = n" "independent K (Vs @ (v # Us))" "Span K (Vs @ (v # Us)) = E"
using step(3)[of "v # Us"] step(1-2,4-6) v by auto
thus ?case
by (metis append.assoc append_Cons append_Nil)
qed } note aux_lemma = this
have "length Us \<le> n"
using independent_length_le_dimension[OF assms] .
thus ?thesis
using aux_lemma[OF _ assms(2-3)] by auto
qed
lemma filter_base:
assumes "set Us \<subseteq> carrier R"
obtains Vs where "set Vs \<subseteq> carrier R" and "independent K Vs" and "Span K Vs = Span K Us"
proof -
from \<open>set Us \<subseteq> carrier R\<close> have "\<exists>Vs. independent K Vs \<and> Span K Vs = Span K Us"
proof (induction Us)
case Nil thus ?case by auto
next
case (Cons u Us)
then obtain Vs where Vs: "independent K Vs" "Span K Vs = Span K Us"
by auto
show ?case
proof (cases "u \<in> Span K Us")
case True
hence "Span K (u # Us) = Span K Us"
using Span_base_incl mono_Span_subset
by (metis Cons.prems insert_subset list.simps(15) subset_antisym)
thus ?thesis
using Vs by blast
next
case False
hence "Span K (u # Vs) = Span K (u # Us)" and "independent K (u # Vs)"
using li_Cons[of u K Vs] Cons(2) Vs by auto
thus ?thesis
by blast
qed
qed
thus ?thesis
using independent_in_carrier that by auto
qed
lemma dimension_backwards:
"dimension (Suc n) K E \<Longrightarrow> \<exists>v \<in> carrier R. \<exists>E'. dimension n K E' \<and> v \<notin> E' \<and> E = line_extension K v E'"
by (cases rule: dimension.cases) (auto)
lemma dimension_direct_sum_space:
assumes "dimension n K E" and "dimension m K F" and "E \<inter> F = { \<zero> }"
shows "dimension (n + m) K (E <+>\<^bsub>R\<^esub> F)"
proof -
obtain Us Vs
where Vs: "set Vs \<subseteq> carrier R" "independent K Vs" "length Vs = n" "Span K Vs = E"
and Us: "set Us \<subseteq> carrier R" "independent K Us" "length Us = m" "Span K Us = F"
using assms(1-2)[THEN exists_base] by auto
hence "Span K (Vs @ Us) = E <+>\<^bsub>R\<^esub> F"
using Span_append_eq_set_add by auto
moreover have "independent K (Vs @ Us)"
using assms(3) independent_append[OF Vs(2) Us(2)] unfolding Vs(4) Us(4) by simp
ultimately show "dimension (n + m) K (E <+>\<^bsub>R\<^esub> F)"
using dimensionI[of "Vs @ Us"] Vs(3) Us(3) by auto
qed
lemma dimension_sum_space:
assumes "dimension n K E" and "dimension m K F" and "dimension k K (E \<inter> F)"
shows "dimension (n + m - k) K (E <+>\<^bsub>R\<^esub> F)"
proof -
obtain Bs
where Bs: "set Bs \<subseteq> carrier R" "length Bs = k" "independent K Bs" "Span K Bs = E \<inter> F"
using exists_base[OF assms(3)] by blast
then obtain Us Vs
where Us: "length (Us @ Bs) = n" "independent K (Us @ Bs)" "Span K (Us @ Bs) = E"
and Vs: "length (Vs @ Bs) = m" "independent K (Vs @ Bs)" "Span K (Vs @ Bs) = F"
using Span_base_incl[OF Bs(1)] assms(1-2)[THEN complete_base] by (metis le_infE)
hence in_carrier: "set Us \<subseteq> carrier R" "set (Vs @ Bs) \<subseteq> carrier R"
using independent_in_carrier[OF Us(2)] independent_in_carrier[OF Vs(2)] by auto
hence "Span K Us \<inter> (Span K (Vs @ Bs)) \<subseteq> Span K Bs"
using Bs(4) Us(3) Vs(3) mono_Span_append(1)[OF _ Bs(1), of Us] by auto
hence "Span K Us \<inter> (Span K (Vs @ Bs)) \<subseteq> { \<zero> }"
using independent_split(3)[OF Us(2)] by blast
hence "Span K Us \<inter> (Span K (Vs @ Bs)) = { \<zero> }"
using in_carrier[THEN Span_subgroup_props(2)] by auto
hence dim: "dimension (n + m - k) K (Span K (Us @ (Vs @ Bs)))"
using independent_append[OF independent_split(2)[OF Us(2)] Vs(2)] Us(1) Vs(1) Bs(2)
dimension_independent[of K "Us @ (Vs @ Bs)"] by auto
have "(Span K Us) <+>\<^bsub>R\<^esub> F \<subseteq> E <+>\<^bsub>R\<^esub> F"
using mono_Span_append(1)[OF in_carrier(1) Bs(1)] Us(3) unfolding set_add_def' by auto
moreover have "E <+>\<^bsub>R\<^esub> F \<subseteq> (Span K Us) <+>\<^bsub>R\<^esub> F"
proof
fix v assume "v \<in> E <+>\<^bsub>R\<^esub> F"
then obtain u' v' where v: "u' \<in> E" "v' \<in> F" "v = u' \<oplus> v'"
unfolding set_add_def' by auto
then obtain u1' u2' where u1': "u1' \<in> Span K Us" and u2': "u2' \<in> Span K Bs" and u': "u' = u1' \<oplus> u2'"
using Span_append_eq_set_add[OF in_carrier(1) Bs(1)] Us(3) unfolding set_add_def' by blast
have "v = u1' \<oplus> (u2' \<oplus> v')"
using Span_subgroup_props(1)[OF Bs(1)] Span_subgroup_props(1)[OF in_carrier(1)]
space_subgroup_props(1)[OF assms(2)] u' v u1' u2' a_assoc[of u1' u2' v'] by auto
moreover have "u2' \<oplus> v' \<in> F"
using space_subgroup_props(3)[OF assms(2) _ v(2)] u2' Bs(4) by auto
ultimately show "v \<in> (Span K Us) <+>\<^bsub>R\<^esub> F"
using u1' unfolding set_add_def' by auto
qed
ultimately have "Span K (Us @ (Vs @ Bs)) = E <+>\<^bsub>R\<^esub> F"
using Span_append_eq_set_add[OF in_carrier] Vs(3) by auto
thus ?thesis using dim by simp
qed
end (* of fixed K context. *)
end (* of ring context. *)
lemma (in ring) telescopic_base_aux:
assumes "subfield K R" "subfield F R"
and "dimension n K F" and "dimension 1 F E"
shows "dimension n K E"
proof -
obtain Us u
where Us: "set Us \<subseteq> carrier R" "length Us = n" "independent K Us" "Span K Us = F"
and u: "u \<in> carrier R" "independent F [u]" "Span F [u] = E"
using exists_base[OF assms(2,4)] exists_base[OF assms(1,3)] independent_backwards(3) assms(2)
by (metis One_nat_def length_0_conv length_Suc_conv)
have in_carrier: "set (map (\<lambda>u'. u' \<otimes> u) Us) \<subseteq> carrier R"
using Us(1) u(1) by (induct Us) (auto)
have li: "independent K (map (\<lambda>u'. u' \<otimes> u) Us)"
proof (rule trivial_combine_imp_independent[OF assms(1) in_carrier])
fix Ks assume Ks: "set Ks \<subseteq> K" and "combine Ks (map (\<lambda>u'. u' \<otimes> u) Us) = \<zero>"
hence "(combine Ks Us) \<otimes> u = \<zero>"
using combine_l_distr[OF _ Us(1) u(1)] subring_props(1)[OF assms(1)] by auto
hence "combine [ combine Ks Us ] [ u ] = \<zero>"
by simp
moreover have "combine Ks Us \<in> F"
using Us(4) Ks(1) Span_eq_combine_set[OF assms(1) Us(1)] by auto
ultimately have "combine Ks Us = \<zero>"
using independent_imp_trivial_combine[OF assms(2) u(2), of "[ combine Ks Us ]"] by auto
hence "set (take (length Us) Ks) \<subseteq> { \<zero> }"
using independent_imp_trivial_combine[OF assms(1) Us(3) Ks(1)] by simp
thus "set (take (length (map (\<lambda>u'. u' \<otimes> u) Us)) Ks) \<subseteq> { \<zero> }" by simp
qed
have "E \<subseteq> Span K (map (\<lambda>u'. u' \<otimes> u) Us)"
proof
fix v assume "v \<in> E"
then obtain f where f: "f \<in> F" "v = f \<otimes> u \<oplus> \<zero>"
using u(1,3) line_extension_mem_iff by auto
then obtain Ks where Ks: "set Ks \<subseteq> K" "f = combine Ks Us"
using Span_eq_combine_set[OF assms(1) Us(1)] Us(4) by auto
have "v = f \<otimes> u"
using subring_props(1)[OF assms(2)] f u(1) by auto
hence "v = combine Ks (map (\<lambda>u'. u' \<otimes> u) Us)"
using combine_l_distr[OF _ Us(1) u(1), of Ks] Ks(1-2)
subring_props(1)[OF assms(1)] by blast
thus "v \<in> Span K (map (\<lambda>u'. u' \<otimes> u) Us)"
unfolding Span_eq_combine_set[OF assms(1) in_carrier] using Ks(1) by blast
qed
moreover have "Span K (map (\<lambda>u'. u' \<otimes> u) Us) \<subseteq> E"
proof
fix v assume "v \<in> Span K (map (\<lambda>u'. u' \<otimes> u) Us)"
then obtain Ks where Ks: "set Ks \<subseteq> K" "v = combine Ks (map (\<lambda>u'. u' \<otimes> u) Us)"
unfolding Span_eq_combine_set[OF assms(1) in_carrier] by blast
hence "v = (combine Ks Us) \<otimes> u"
using combine_l_distr[OF _ Us(1) u(1), of Ks] subring_props(1)[OF assms(1)] by auto
moreover have "combine Ks Us \<in> F"
using Us(4) Span_eq_combine_set[OF assms(1) Us(1)] Ks(1) by blast
ultimately have "v = (combine Ks Us) \<otimes> u \<oplus> \<zero>" and "combine Ks Us \<in> F"
using subring_props(1)[OF assms(2)] u(1) by auto
thus "v \<in> E"
using u(3) line_extension_mem_iff by auto
qed
ultimately have "Span K (map (\<lambda>u'. u' \<otimes> u) Us) = E" by auto
thus ?thesis
using dimensionI[OF assms(1) li] Us(2) by simp
qed
lemma (in ring) telescopic_base:
assumes "subfield K R" "subfield F R"
and "dimension n K F" and "dimension m F E"
shows "dimension (n * m) K E"
using assms(4)
proof (induct m arbitrary: E)
case 0 thus ?case
using dimension_zero[OF assms(2)] zero_dim by auto
next
case (Suc m)
obtain Vs
where Vs: "set Vs \<subseteq> carrier R" "length Vs = Suc m" "independent F Vs" "Span F Vs = E"
using exists_base[OF assms(2) Suc(2)] by blast
then obtain v Vs' where v: "Vs = v # Vs'"
by (meson length_Suc_conv)
hence li: "independent F [ v ]" "independent F Vs'" and inter: "Span F [ v ] \<inter> Span F Vs' = { \<zero> }"
using Vs(3) independent_split[OF assms(2), of "[ v ]" Vs'] by auto
have "dimension n K (Span F [ v ])"
using dimension_independent[OF li(1)] telescopic_base_aux[OF assms(1-3)] by simp
moreover have "dimension (n * m) K (Span F Vs')"
using Suc(1) dimension_independent[OF li(2)] Vs(2) unfolding v by auto
ultimately have "dimension (n * Suc m) K (Span F [ v ] <+>\<^bsub>R\<^esub> Span F Vs')"
using dimension_direct_sum_space[OF assms(1) _ _ inter] by auto
thus "dimension (n * Suc m) K E"
using Span_append_eq_set_add[OF assms(2) li[THEN independent_in_carrier]] Vs(4) v by auto
qed
context ring_hom_ring
begin
lemma combine_hom:
"\<lbrakk> set Ks \<subseteq> carrier R; set Us \<subseteq> carrier R \<rbrakk> \<Longrightarrow> combine (map h Ks) (map h Us) = h (R.combine Ks Us)"
by (induct Ks Us rule: R.combine.induct) (auto)
lemma line_extension_hom:
assumes "K \<subseteq> carrier R" "a \<in> carrier R" "E \<subseteq> carrier R"
shows "line_extension (h ` K) (h a) (h ` E) = h ` R.line_extension K a E"
using set_add_hom[OF homh R.r_coset_subset_G[OF assms(1-2)] assms(3)]
coset_hom(2)[OF ring_hom_in_hom(1)[OF homh] assms(1-2)]
unfolding R.line_extension_def S.line_extension_def
by simp
lemma Span_hom:
assumes "K \<subseteq> carrier R" "set Us \<subseteq> carrier R"
shows "Span (h ` K) (map h Us) = h ` R.Span K Us"
using assms line_extension_hom R.Span_in_carrier by (induct Us) (auto)
lemma inj_on_subgroup_iff_trivial_ker:
assumes "subgroup H (add_monoid R)"
shows "inj_on h H \<longleftrightarrow> a_kernel (R \<lparr> carrier := H \<rparr>) S h = { \<zero> }"
using group_hom.inj_on_subgroup_iff_trivial_ker[OF a_group_hom assms]
unfolding a_kernel_def[of "R \<lparr> carrier := H \<rparr>" S h] by simp
corollary inj_on_Span_iff_trivial_ker:
assumes "subfield K R" "set Us \<subseteq> carrier R"
shows "inj_on h (R.Span K Us) \<longleftrightarrow> a_kernel (R \<lparr> carrier := R.Span K Us \<rparr>) S h = { \<zero> }"
using inj_on_subgroup_iff_trivial_ker[OF R.Span_is_add_subgroup[OF assms]] .
context
fixes K :: "'a set" assumes K: "subfield K R" and one_zero: "\<one>\<^bsub>S\<^esub> \<noteq> \<zero>\<^bsub>S\<^esub>"
begin
lemma inj_hom_preserves_independent:
assumes "inj_on h (R.Span K Us)"
and "R.independent K Us" shows "independent (h ` K) (map h Us)"
proof (rule ccontr)
have in_carrier: "set Us \<subseteq> carrier R" "set (map h Us) \<subseteq> carrier S"
using R.independent_in_carrier[OF assms(2)] by auto
assume ld: "dependent (h ` K) (map h Us)"
obtain Ks :: "'c list"
where Ks: "length Ks = length Us" "combine Ks (map h Us) = \<zero>\<^bsub>S\<^esub>" "set Ks \<subseteq> h ` K" "set Ks \<noteq> { \<zero>\<^bsub>S\<^esub> }"
using dependent_imp_non_trivial_combine[OF img_is_subfield(2)[OF K one_zero] in_carrier(2) ld]
by (metis length_map)
obtain Ks' where Ks': "set Ks' \<subseteq> K" "Ks = map h Ks'"
using Ks(3) by (induct Ks) (auto, metis insert_subset list.simps(15,9))
hence "h (R.combine Ks' Us) = \<zero>\<^bsub>S\<^esub>"
using combine_hom[OF _ in_carrier(1)] Ks(2) subfieldE(3)[OF K] by (metis subset_trans)
moreover have "R.combine Ks' Us \<in> R.Span K Us"
using R.Span_eq_combine_set[OF K in_carrier(1)] Ks'(1) by auto
ultimately have "R.combine Ks' Us = \<zero>"
using assms hom_zero R.Span_subgroup_props(2)[OF K in_carrier(1)] by (auto simp add: inj_on_def)
hence "set Ks' \<subseteq> { \<zero> }"
using R.independent_imp_trivial_combine[OF K assms(2)] Ks' Ks(1)
by (metis length_map order_refl take_all)
hence "set Ks \<subseteq> { \<zero>\<^bsub>S\<^esub> }"
unfolding Ks' using hom_zero by (induct Ks') (auto)
hence "Ks = []"
using Ks(4) by (metis set_empty2 subset_singletonD)
hence "independent (h ` K) (map h Us)"
using independent.li_Nil Ks(1) by simp
from \<open>dependent (h ` K) (map h Us)\<close> and this show False by simp
qed
corollary inj_hom_dimension:
assumes "inj_on h E"
and "R.dimension n K E" shows "dimension n (h ` K) (h ` E)"
proof -
obtain Us
where Us: "set Us \<subseteq> carrier R" "R.independent K Us" "length Us = n" "R.Span K Us = E"
using R.exists_base[OF K assms(2)] by blast
hence "dimension n (h ` K) (Span (h ` K) (map h Us))"
using dimension_independent[OF inj_hom_preserves_independent[OF _ Us(2)]] assms(1) by auto
thus ?thesis
using Span_hom[OF subfieldE(3)[OF K] Us(1)] Us(4) by simp
qed
corollary rank_nullity_theorem:
assumes "R.dimension n K E" and "R.dimension m K (a_kernel (R \<lparr> carrier := E \<rparr>) S h)"
shows "dimension (n - m) (h ` K) (h ` E)"
proof -
obtain Us
where Us: "set Us \<subseteq> carrier R" "R.independent K Us" "length Us = m"
"R.Span K Us = a_kernel (R \<lparr> carrier := E \<rparr>) S h"
using R.exists_base[OF K assms(2)] by blast
obtain Vs
where Vs: "R.independent K (Vs @ Us)" "length (Vs @ Us) = n" "R.Span K (Vs @ Us) = E"
using R.complete_base[OF K assms(1) Us(2)] R.Span_base_incl[OF K Us(1)] Us(4)
unfolding a_kernel_def' by auto
have set_Vs: "set Vs \<subseteq> carrier R"
using R.independent_in_carrier[OF Vs(1)] by auto
have "R.Span K Vs \<inter> a_kernel (R \<lparr> carrier := E \<rparr>) S h = { \<zero> }"
using R.independent_split[OF K Vs(1)] Us(4) by simp
moreover have "R.Span K Vs \<subseteq> E"
using R.mono_Span_append(1)[OF K set_Vs Us(1)] Vs(3) by auto
ultimately have "a_kernel (R \<lparr> carrier := R.Span K Vs \<rparr>) S h \<subseteq> { \<zero> }"
unfolding a_kernel_def' by (simp del: R.Span.simps, blast)
hence "a_kernel (R \<lparr> carrier := R.Span K Vs \<rparr>) S h = { \<zero> }"
using R.Span_subgroup_props(2)[OF K set_Vs]
unfolding a_kernel_def' by (auto simp del: R.Span.simps)
hence "inj_on h (R.Span K Vs)"
using inj_on_Span_iff_trivial_ker[OF K set_Vs] by simp
moreover have "R.dimension (n - m) K (R.Span K Vs)"
using R.dimension_independent[OF R.independent_split(2)[OF K Vs(1)]] Vs(2) Us(3) by auto
ultimately have "dimension (n - m) (h ` K) (h ` (R.Span K Vs))"
using assms(1) inj_hom_dimension by simp
have "h ` E = h ` (R.Span K Vs <+>\<^bsub>R\<^esub> R.Span K Us)"
using R.Span_append_eq_set_add[OF K set_Vs Us(1)] Vs(3) by simp
hence "h ` E = h ` (R.Span K Vs) <+>\<^bsub>S\<^esub> h ` (R.Span K Us)"
using R.Span_subgroup_props(1)[OF K] set_Vs Us(1) set_add_hom[OF homh] by auto
moreover have "h ` (R.Span K Us) = { \<zero>\<^bsub>S\<^esub> }"
using R.space_subgroup_props(2)[OF K assms(1)] unfolding Us(4) a_kernel_def' by force
ultimately have "h ` E = h ` (R.Span K Vs) <+>\<^bsub>S\<^esub> { \<zero>\<^bsub>S\<^esub> }"
by simp
hence "h ` E = h ` (R.Span K Vs)"
using R.Span_subgroup_props(1-2)[OF K set_Vs] unfolding set_add_def' by force
from \<open>dimension (n - m) (h ` K) (h ` (R.Span K Vs))\<close> and this show ?thesis by simp
qed
end (* of fixed K context. *)
end (* of ring_hom_ring context. *)
lemma (in ring_hom_ring)
assumes "subfield K R" and "set Us \<subseteq> carrier R" and "\<one>\<^bsub>S\<^esub> \<noteq> \<zero>\<^bsub>S\<^esub>"
and "independent (h ` K) (map h Us)" shows "R.independent K Us"
proof (rule ccontr)
assume "R.dependent K Us"
then obtain Ks
where "length Ks = length Us" and "R.combine Ks Us = \<zero>" and "set Ks \<subseteq> K" and "set Ks \<noteq> { \<zero> }"
using R.dependent_imp_non_trivial_combine[OF assms(1-2)] by metis
hence "combine (map h Ks) (map h Us) = \<zero>\<^bsub>S\<^esub>"
using combine_hom[OF _ assms(2), of Ks] subfieldE(3)[OF assms(1)] by simp
moreover from \<open>set Ks \<subseteq> K\<close> have "set (map h Ks) \<subseteq> h ` K"
by (induction Ks) (auto)
moreover have "\<not> set (map h Ks) \<subseteq> { h \<zero> }"
proof (rule ccontr)
assume "\<not> \<not> set (map h Ks) \<subseteq> { h \<zero> }" then have "set (map h Ks) \<subseteq> { h \<zero> }"
by simp
moreover from \<open>R.dependent K Us\<close> and \<open>length Ks = length Us\<close> have "Ks \<noteq> []"
by auto
ultimately have "set (map h Ks) = { h \<zero> }"
using subset_singletonD by fastforce
with \<open>set Ks \<subseteq> K\<close> have "set Ks = { \<zero> }"
using inj_onD[OF _ _ _ subringE(2)[OF subfieldE(1)[OF assms(1)]], of h]
img_is_subfield(1)[OF assms(1,3)] subset_singletonD
by (induction Ks) (auto simp add: subset_singletonD, fastforce)
with \<open>set Ks \<noteq> { \<zero> }\<close> show False
by simp
qed
with \<open>length Ks = length Us\<close> have "\<not> set (take (length (map h Us)) (map h Ks)) \<subseteq> { h \<zero> }"
by auto
ultimately have "dependent (h ` K) (map h Us)"
using non_trivial_combine_imp_dependent[OF img_is_subfield(2)[OF assms(1,3)], of "map h Ks"] by simp
with \<open>independent (h ` K) (map h Us)\<close> show False
by simp
qed
subsection \<open>Finite Dimension\<close>
definition (in ring) finite_dimension :: "'a set \<Rightarrow> 'a set \<Rightarrow> bool"
where "finite_dimension K E \<longleftrightarrow> (\<exists>n. dimension n K E)"
abbreviation (in ring) infinite_dimension :: "'a set \<Rightarrow> 'a set \<Rightarrow> bool"
where "infinite_dimension K E \<equiv> \<not> finite_dimension K E"
definition (in ring) dim :: "'a set \<Rightarrow> 'a set \<Rightarrow> nat"
where "dim K E = (THE n. dimension n K E)"
locale subalgebra = subgroup V "add_monoid R" for K and V and R (structure) +
assumes smult_closed: "\<lbrakk> k \<in> K; v \<in> V \<rbrakk> \<Longrightarrow> k \<otimes> v \<in> V"
subsubsection \<open>Basic Properties\<close>
lemma (in ring) unique_dimension:
assumes "subfield K R" and "finite_dimension K E" shows "\<exists>!n. dimension n K E"
using assms(2) dimension_is_inj[OF assms(1)] unfolding finite_dimension_def by auto
lemma (in ring) finite_dimensionI:
assumes "dimension n K E" shows "finite_dimension K E"
using assms unfolding finite_dimension_def by auto
lemma (in ring) finite_dimensionE:
assumes "subfield K R" and "finite_dimension K E" shows "dimension ((dim over K) E) K E"
using theI'[OF unique_dimension[OF assms]] unfolding over_def dim_def by simp
lemma (in ring) dimI:
assumes "subfield K R" and "dimension n K E" shows "(dim over K) E = n"
using finite_dimensionE[OF assms(1) finite_dimensionI] dimension_is_inj[OF assms(1)] assms(2)
unfolding over_def dim_def by auto
lemma (in ring) finite_dimensionE' [elim]:
assumes "finite_dimension K E" and "\<And>n. dimension n K E \<Longrightarrow> P" shows P
using assms unfolding finite_dimension_def by auto
lemma (in ring) Span_finite_dimension:
assumes "subfield K R" and "set Us \<subseteq> carrier R"
shows "finite_dimension K (Span K Us)"
using filter_base[OF assms] finite_dimensionI[OF dimension_independent[of K]] by metis
lemma (in ring) carrier_is_subalgebra:
assumes "K \<subseteq> carrier R" shows "subalgebra K (carrier R) R"
using assms subalgebra.intro[OF add.group_incl_imp_subgroup[of "carrier R"], of K] add.group_axioms
unfolding subalgebra_axioms_def by auto
lemma (in ring) subalgebra_in_carrier:
assumes "subalgebra K V R" shows "V \<subseteq> carrier R"
using subgroup.subset[OF subalgebra.axioms(1)[OF assms]] by simp
lemma (in ring) subalgebra_inter:
assumes "subalgebra K V R" and "subalgebra K V' R" shows "subalgebra K (V \<inter> V') R"
using add.subgroups_Inter_pair assms unfolding subalgebra_def subalgebra_axioms_def by auto
lemma (in ring_hom_ring) img_is_subalgebra:
assumes "K \<subseteq> carrier R" and "subalgebra K V R" shows "subalgebra (h ` K) (h ` V) S"
proof (intro subalgebra.intro)
have "group_hom (add_monoid R) (add_monoid S) h"
using ring_hom_in_hom(2)[OF homh] R.add.group_axioms add.group_axioms
unfolding group_hom_def group_hom_axioms_def by auto
thus "subgroup (h ` V) (add_monoid S)"
using group_hom.subgroup_img_is_subgroup[OF _ subalgebra.axioms(1)[OF assms(2)]] by force
next
show "subalgebra_axioms (h ` K) (h ` V) S"
using R.subalgebra_in_carrier[OF assms(2)] subalgebra.axioms(2)[OF assms(2)] assms(1)
unfolding subalgebra_axioms_def
by (auto, metis hom_mult image_eqI subset_iff)
qed
lemma (in ring) ideal_is_subalgebra:
assumes "K \<subseteq> carrier R" "ideal I R" shows "subalgebra K I R"
using ideal.axioms(1)[OF assms(2)] ideal.I_l_closed[OF assms(2)] assms(1)
unfolding subalgebra_def subalgebra_axioms_def additive_subgroup_def by auto
lemma (in ring) Span_is_subalgebra:
assumes "subfield K R" "set Us \<subseteq> carrier R" shows "subalgebra K (Span K Us) R"
using Span_smult_closed[OF assms] Span_is_add_subgroup[OF assms]
unfolding subalgebra_def subalgebra_axioms_def by auto
lemma (in ring) finite_dimension_imp_subalgebra:
assumes "subfield K R" "finite_dimension K E" shows "subalgebra K E R"
using exists_base[OF assms(1) finite_dimensionE[OF assms]] Span_is_subalgebra[OF assms(1)] by auto
lemma (in ring) subalgebra_Span_incl:
assumes "subfield K R" and "subalgebra K V R" "set Us \<subseteq> V" shows "Span K Us \<subseteq> V"
proof -
have "K <#> (set Us) \<subseteq> V"
using subalgebra.smult_closed[OF assms(2)] assms(3) unfolding set_mult_def by blast
moreover have "set Us \<subseteq> carrier R"
using subalgebra_in_carrier[OF assms(2)] assms(3) by auto
ultimately show ?thesis
using subalgebra.axioms(1)[OF assms(2)] Span_min[OF assms(1)] by blast
qed
lemma (in ring) Span_subalgebra_minimal:
assumes "subfield K R" "set Us \<subseteq> carrier R"
shows "Span K Us = \<Inter> { V. subalgebra K V R \<and> set Us \<subseteq> V }"
using Span_is_subalgebra[OF assms] Span_base_incl[OF assms] subalgebra_Span_incl[OF assms(1)]
by blast
lemma (in ring) Span_subalgebraI:
assumes "subfield K R"
and "subalgebra K E R" "set Us \<subseteq> E"
and "\<And>V. \<lbrakk> subalgebra K V R; set Us \<subseteq> V \<rbrakk> \<Longrightarrow> E \<subseteq> V"
shows "E = Span K Us"
proof -
have "\<Inter> { V. subalgebra K V R \<and> set Us \<subseteq> V } = E"
using assms(2-4) by auto
thus "E = Span K Us"
using Span_subalgebra_minimal subalgebra_in_carrier[of K E] assms by auto
qed
lemma (in ring) subalbegra_incl_imp_finite_dimension:
assumes "subfield K R" and "finite_dimension K E"
and "subalgebra K V R" "V \<subseteq> E" shows "finite_dimension K V"
proof -
obtain n where n: "dimension n K E"
using assms(2) by auto
define S where "S = { Us. set Us \<subseteq> V \<and> independent K Us }"
have "length ` S \<subseteq> {..n}"
unfolding S_def using independent_length_le_dimension[OF assms(1) n] assms(4) by auto
moreover have "[] \<in> S"
unfolding S_def by simp
hence "length ` S \<noteq> {}" by blast
ultimately obtain m where m: "m \<in> length ` S" and greatest: "\<And>k. k \<in> length ` S \<Longrightarrow> k \<le> m"
by (meson Max_ge Max_in finite_atMost rev_finite_subset)
then obtain Us where Us: "set Us \<subseteq> V" "independent K Us" "m = length Us"
unfolding S_def by auto
have "Span K Us = V"
proof (rule ccontr)
assume "\<not> Span K Us = V" then have "Span K Us \<subset> V"
using subalgebra_Span_incl[OF assms(1,3) Us(1)] by blast
then obtain v where v:"v \<in> V" "v \<notin> Span K Us"
by blast
hence "independent K (v # Us)"
using independent.li_Cons[OF _ _ Us(2)] subalgebra_in_carrier[OF assms(3)] by auto
hence "(v # Us) \<in> S"
unfolding S_def using Us(1) v(1) by auto
hence "length (v # Us) \<le> m"
using greatest by blast
moreover have "length (v # Us) = Suc m"
using Us(3) by auto
ultimately show False by simp
qed
thus ?thesis
using finite_dimensionI[OF dimension_independent[OF Us(2)]] by simp
qed
lemma (in ring_hom_ring) infinite_dimension_hom:
assumes "subfield K R" and "\<one>\<^bsub>S\<^esub> \<noteq> \<zero>\<^bsub>S\<^esub>" and "inj_on h E" and "subalgebra K E R"
shows "R.infinite_dimension K E \<Longrightarrow> infinite_dimension (h ` K) (h ` E)"
proof -
note subfield = img_is_subfield(2)[OF assms(1-2)]
assume "R.infinite_dimension K E"
show "infinite_dimension (h ` K) (h ` E)"
proof (rule ccontr)
assume "\<not> infinite_dimension (h ` K) (h ` E)"
then obtain Vs where "set Vs \<subseteq> carrier S" and "Span (h ` K) Vs = h ` E"
using exists_base[OF subfield] by blast
hence "set Vs \<subseteq> h ` E"
using Span_base_incl[OF subfield] by blast
hence "\<exists>Us. set Us \<subseteq> E \<and> Vs = map h Us"
by (induct Vs) (auto, metis insert_subset list.simps(9,15))
then obtain Us where "set Us \<subseteq> E" and "Vs = map h Us"
by blast
with \<open>Span (h ` K) Vs = h ` E\<close> have "h ` (R.Span K Us) = h ` E"
using R.subalgebra_in_carrier[OF assms(4)] Span_hom assms(1) by auto
moreover from \<open>set Us \<subseteq> E\<close> have "R.Span K Us \<subseteq> E"
using R.subalgebra_Span_incl assms(1-4) by blast
ultimately have "R.Span K Us = E"
proof (auto simp del: R.Span.simps)
fix a assume "a \<in> E"
with \<open>h ` (R.Span K Us) = h ` E\<close> obtain b where "b \<in> R.Span K Us" and "h a = h b"
by auto
with \<open>R.Span K Us \<subseteq> E\<close> and \<open>a \<in> E\<close> have "a = b"
using inj_onD[OF assms(3)] by auto
with \<open>b \<in> R.Span K Us\<close> show "a \<in> R.Span K Us"
by simp
qed
with \<open>set Us \<subseteq> E\<close> have "R.finite_dimension K E"
using R.Span_finite_dimension[OF assms(1)] R.subalgebra_in_carrier[OF assms(4)] by auto
with \<open>R.infinite_dimension K E\<close> show False
by simp
qed
qed
subsubsection \<open>Reformulation of some lemmas in this new language.\<close>
lemma (in ring) sum_space_dim:
assumes "subfield K R" "finite_dimension K E" "finite_dimension K F"
shows "finite_dimension K (E <+>\<^bsub>R\<^esub> F)"
and "((dim over K) (E <+>\<^bsub>R\<^esub> F)) = ((dim over K) E) + ((dim over K) F) - ((dim over K) (E \<inter> F))"
proof -
obtain n m k where n: "dimension n K E" and m: "dimension m K F" and k: "dimension k K (E \<inter> F)"
using assms(2-3) subalbegra_incl_imp_finite_dimension[OF assms(1-2)
subalgebra_inter[OF assms(2-3)[THEN finite_dimension_imp_subalgebra[OF assms(1)]]]]
by (meson inf_le1 finite_dimension_def)
hence "dimension (n + m - k) K (E <+>\<^bsub>R\<^esub> F)"
using dimension_sum_space[OF assms(1)] by simp
thus "finite_dimension K (E <+>\<^bsub>R\<^esub> F)"
and "((dim over K) (E <+>\<^bsub>R\<^esub> F)) = ((dim over K) E) + ((dim over K) F) - ((dim over K) (E \<inter> F))"
using finite_dimensionI dimI[OF assms(1)] n m k by auto
qed
lemma (in ring) telescopic_base_dim:
assumes "subfield K R" "subfield F R" and "finite_dimension K F" and "finite_dimension F E"
shows "finite_dimension K E" and "(dim over K) E = ((dim over K) F) * ((dim over F) E)"
using telescopic_base[OF assms(1-2)
finite_dimensionE[OF assms(1,3)]
finite_dimensionE[OF assms(2,4)]]
dimI[OF assms(1)] finite_dimensionI
by auto
end
|
{"author": "seL4", "repo": "isabelle", "sha": "e1ab32a3bb41728cd19541063283e37919978a4c", "save_path": "github-repos/isabelle/seL4-isabelle", "path": "github-repos/isabelle/seL4-isabelle/isabelle-e1ab32a3bb41728cd19541063283e37919978a4c/src/HOL/Algebra/Embedded_Algebras.thy"}
|
# Unit test for EOP C04 file reading
@testset "EOP C04 file reading" begin
data = loadeop(joinpath(dirname(@__DIR__),"test","input","eop_data.c04"));
@test size(data) == (11,14)
@test data[:datetime][1] == DateTime(1962,1,1)
@test data[:datetime][end] == DateTime(1962,1,11)
@test sum(data[:x]) ≈ -0.297387
@test data[:dYErr][end] ≈ 0.002000
end
|
{"hexsha": "5f3faaddcb52c4954df0ef44fb7e2e046c90b121", "size": 352, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/eopfile.jl", "max_stars_repo_name": "emenems/FileTools.jl", "max_stars_repo_head_hexsha": "c1431aa2a861480e665d937bec644e07d665b9f8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/eopfile.jl", "max_issues_repo_name": "emenems/FileTools.jl", "max_issues_repo_head_hexsha": "c1431aa2a861480e665d937bec644e07d665b9f8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/eopfile.jl", "max_forks_repo_name": "emenems/FileTools.jl", "max_forks_repo_head_hexsha": "c1431aa2a861480e665d937bec644e07d665b9f8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.2, "max_line_length": 75, "alphanum_fraction": 0.6704545455, "num_tokens": 128}
|
import jsonlines
import argparse
import collections
import json
import numpy as np
import os
import re
import string
import sys
OPTS = None
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r'\b(a|an|the)\b', re.UNICODE)
return re.sub(regex, ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def get_tokens(s):
if not s: return []
return normalize_answer(s).split()
def compute_exact(a_gold, a_pred):
return int(normalize_answer(a_gold) == normalize_answer(a_pred))
def compute_f1(a_gold, a_pred):
gold_toks = get_tokens(a_gold)
pred_toks = get_tokens(a_pred)
common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
return f1
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--input', default=None, type=str)
args = parser.parse_args()
with jsonlines.open(args.input ,'r') as f:
data = [line for line in f.iter()]
em = 0.
f1 = 0.
count = 0.
key = 'answer:'
wrong_type = 0.
for i, line in enumerate(data):
# if i % 3 != 0:
# continue
if i % 1000 == 0:
print(line["tgt"].replace(key, '').lstrip())
print(line["gen"].replace(key, '').lstrip())
if key not in line["tgt"]:
continue
count += 1
if key in line["gen"]:
em += compute_exact(line["tgt"].replace(key, '').lstrip(), line["gen"].replace(key, '').lstrip())
f1 += compute_f1(line["tgt"].replace(key, '').lstrip(), line["gen"].replace(key, '').lstrip())
else:
em += 0.
f1 += 0.
wrong_type += 1
print(f"EM accuracy is: {em / count}")
print(f"F1 is: {f1 / count}")
print(f"Wrong generated type: {wrong_type / count}")
|
{"hexsha": "8f07896f517546cc2ba1a3323223f5d95725cda1", "size": 2566, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/answer_em_score.py", "max_stars_repo_name": "awslabs/durepa-hybrid-qa", "max_stars_repo_head_hexsha": "3706c983b6b23e7dbc0fab33b01ecee1a25f91f6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-24T09:23:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T09:23:15.000Z", "max_issues_repo_path": "scripts/answer_em_score.py", "max_issues_repo_name": "awslabs/durepa-hybrid-qa", "max_issues_repo_head_hexsha": "3706c983b6b23e7dbc0fab33b01ecee1a25f91f6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-02-09T06:27:39.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-09T06:27:39.000Z", "max_forks_repo_path": "scripts/answer_em_score.py", "max_forks_repo_name": "awslabs/durepa-hybrid-qa", "max_forks_repo_head_hexsha": "3706c983b6b23e7dbc0fab33b01ecee1a25f91f6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.5111111111, "max_line_length": 109, "alphanum_fraction": 0.6013250195, "include": true, "reason": "import numpy", "num_tokens": 677}
|
function value = box_01_contains_point_nd ( dim_num, p )
%*****************************************************************************80
%
%% BOX_01_CONTAINS_POINT_ND determines if a point is inside a unit box in ND.
%
% Discussion:
%
% A unit box is assumed to be a rectangle with sides aligned on coordinate
% axes. It can be described by its low and high corner, P1 and P2:
%
% 0 <= P(1:DIM_NUM) <= 1
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 16 June 2005
%
% Author:
%
% John Burkardt
%
% Parameters:
%
% Input, integer DIM_NUM, the spatial dimension.
%
% Input, real P(DIM_NUM), the point to be checked.
%
% Output, logical VALUE, is TRUE if P is inside the box.
%
value = 0;
for i = 1 : dim_num
if ( p(i) < 0.0 | 1.0 < p(i) )
return
end
end
value = 1;
return
end
|
{"author": "johannesgerer", "repo": "jburkardt-m", "sha": "1726deb4a34dd08a49c26359d44ef47253f006c1", "save_path": "github-repos/MATLAB/johannesgerer-jburkardt-m", "path": "github-repos/MATLAB/johannesgerer-jburkardt-m/jburkardt-m-1726deb4a34dd08a49c26359d44ef47253f006c1/geometry/box_01_contains_point_nd.m"}
|
from flask import Flask, Response, render_template, request, jsonify
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from simulation import Simulation
import numpy as np
import io
import webbrowser
import json
app = Flask(__name__)
current_simulation = None
@app.route("/", methods=["GET", "POST"])
def index():
global current_simulation
if request.method == "POST":
data = json.loads(request.data)
print(data)
current_simulation = Simulation(
bottle_volume=data["bottle_volume"],
inside_pressure=data["bottle_pressure"],
nozzle_area=data["nozzle_area"],
launch_angle=data["launch_angle"],
rocket_mass=data["rocket_mass"],
fill_factor=data["fill_factor"],
rocket_face_area=data["face_area"],
)
current_simulation.run()
return jsonify(success=True)
return render_template("index.html")
@app.route("/trajectory.png")
def trajectory():
if current_simulation is None:
return jsonify(success=False)
fig = current_simulation.calc_trajectory(plot=True)
output = io.BytesIO()
FigureCanvas(fig).print_png(output)
return Response(output.getvalue(), mimetype="image/png")
@app.route("/drag.png")
def drag():
if current_simulation is None:
return jsonify(success=False)
fig = current_simulation.calc_rocket_drag(plot=True)
output = io.BytesIO()
FigureCanvas(fig).print_png(output)
return Response(output.getvalue(), mimetype="image/png")
@app.route("/thrust.png")
def thrust():
if current_simulation is None:
return jsonify(success=False)
fig = current_simulation.calc_rocket_thrust(plot=True)
output = io.BytesIO()
FigureCanvas(fig).print_png(output)
return Response(output.getvalue(), mimetype="image/png")
@app.route("/water-speed.png")
def water_speed():
if current_simulation is None:
return jsonify(success=False)
fig = current_simulation.calc_water_speed(plot=True)
output = io.BytesIO()
FigureCanvas(fig).print_png(output)
return Response(output.getvalue(), mimetype="image/png")
@app.route("/water-vol.png")
def water_vol():
if current_simulation is None:
return jsonify(success=False)
fig = current_simulation.calc_water_volume(plot=True)
output = io.BytesIO()
FigureCanvas(fig).print_png(output)
return Response(output.getvalue(), mimetype="image/png")
@app.route("/animation")
def trajectory_animation():
if current_simulation is None:
return jsonify(success=False)
times, points = current_simulation.calc_trajectory()
return jsonify(times=times.tolist(), points=points.tolist())
if __name__ == "__main__":
webbrowser.open("127.0.0.1:5000")
app.run()
|
{"hexsha": "f35dac43c1a5c0473626b35f1fc6b7e22c71e782", "size": 2809, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/server.py", "max_stars_repo_name": "mbucchi/water-rocket", "max_stars_repo_head_hexsha": "4d46449fb724b52fc4f51670f4a733b56bf68024", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/server.py", "max_issues_repo_name": "mbucchi/water-rocket", "max_issues_repo_head_hexsha": "4d46449fb724b52fc4f51670f4a733b56bf68024", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/server.py", "max_forks_repo_name": "mbucchi/water-rocket", "max_forks_repo_head_hexsha": "4d46449fb724b52fc4f51670f4a733b56bf68024", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.5684210526, "max_line_length": 75, "alphanum_fraction": 0.6959772161, "include": true, "reason": "import numpy", "num_tokens": 565}
|
# -*- coding: utf-8 -*-
import os
import numpy as np
import astropy.table
from scipy.interpolate import interp1d
from scipy.ndimage import convolve
from scipy.optimize import differential_evolution
from astropy.io import fits
from astropy.table import Table
from astropy import units as u
import matplotlib.pyplot as plt
from ISPy.spec import atlas
def get_calibration(wave_obs, spec_obs, wave_atlas, spec_atlas, mu=1.0,
calib_at_dc=False, wave_idx=None, extra_weight=20., bounds=None):
"""
Get calibration offsets from fitting `spec_obs` to `spec_atlas`, assuming
wavelength grids `wave_obs` and `wave_atlas`
Parameters
----------
wave_obs : array_like
1D array with observed wavelengths. Must be of same size as `spec_obs`.
spec_obs : array_like
1D array with observed intensities.
wave_atlas : array_like
1D array with wavelengths corresponding to `spec_atlas`.
spec_atlas : array_like
1D array with atlas intensity profile (e.g. from `ISpy.spec.atlas`)
mu : float, optional
cosine of heliocentric viewing angle of the observations (defaults 1.0
-> disc centre)
calib_at_dc : bool, optional
calibrate assuming `spec_avg` (or `spec`, if `spec_avg` is
None) was taken at disc centre (defaults False).
wave_idx : array_like, optional
wavelength indices that will get `extra_weight` during while fitting the
intensity profile (default None -> all wavelengths get equal weight)
extra_weight : float, optional
amount of extra weight to give selected wavelength positions as
specified by `wave_idx` (default 20)
bounds : list of tuples, optional
list of tuples [(ifact_low, ifact_upp), (woff_low, woff_upp)] suggesting
lower and upper bounds for fitting the intensity factor and wavelength
offset (defaults to 1/50th and 50 times the fraction of `spec_atlas` to
`spec_obs` for ifact, and ±0.3 for woff)
Returns
-------
calibration : 2-element array
multiplication factor and wavelength offset [ifact, woff] to be applied
to `spec_obs` and `wave_obs` respectively.
Example
-------
>>> calibration = get_calibration(wave, spec, wave_atlas, spec_atlas, mu=0.4, calib_at_dc=True)
:Authors:
Carlos Diaz (ISP/SU 2020), Gregal Vissers (ISP/SU 2020)
"""
if wave_idx is None:
wave_idx = np.arange(wave_obs.size)
else:
wave_idx = np.atleast_1d(wave_idx)
# Correct for limb-darkening if profile to calibrate on is not from
# disc centre (and presumably at same mu as observations)
if calib_at_dc is False:
spec_atlas = spec_atlas * limbdarkening(wave_atlas, mu=mu)
weights = np.ones_like(wave_obs)
if wave_idx.size is not wave_obs.size:
weights[wave_idx] = extra_weight
def func_to_optimise(x):
x0 = x[0]
x1 = x[1]
ospec = spec_obs * x0
atlas = np.interp(wave_obs, wave_atlas-x1, spec_atlas)
chi2 = np.sum( (atlas-ospec)**2 * weights)
return chi2
if bounds is None:
bounds = [(spec_atlas[0]/spec_obs[0]*0.02, spec_atlas[0]/spec_obs[0]*50.), (-0.3, 0.3)]
optim = differential_evolution(func_to_optimise, bounds)
calibration = optim.x
# Apply limb-darkening correction if calibration was on profile from disc
# centre and data was not; for calib_at_dc is False, limb-darkening is already in
# optimised offset
if (calib_at_dc is True) and (mu != 1.0):
calibration[0] *= np.mean(limbdarkening(wave_atlas, mu=mu))
return calibration
def convolve_atlas(wave_atlas, spec_atlas, instrument_profile, mode='nearest'):
"""
Convolve spectral atlas profile with instrument profile (after interpolation
to the atlas wavelength grid)
Parameters
----------
wave_atlas : array_like
1D array with wavelengths corresponding to `spec_atlas`.
spec_atlas : array_like
1D array with atlas intensity profile (e.g. from `ISpy.spec.atlas`)
instrument_profile : ndarray
2D array [wave, profile] with wavelength spacing (starting at 0) and
instrumental profile to convolve the atlas profile with
mode : str, optional
set interpolation in call to `np.interp` (defaults 'nearest')
Returns
-------
spec_convolved : array_like
1D array with convolved profile
Example
-------
>>> convolved = calib.convolve_atlas(wave_atlas, spec_atlas, ipr, mode='cubic')
:Author:
Gregal Vissers (ISP/SU 2020)
"""
wave_ipr_spacing = np.diff(instrument_profile[:,0]).mean()
wave_atlas_spacing = np.diff(wave_atlas).mean()
nw_ipr = instrument_profile.shape[0]
wave_ipr_fine = np.arange((nw_ipr-1) * wave_ipr_spacing / wave_atlas_spacing + 1) \
* wave_atlas_spacing
kernel = np.interp(wave_ipr_fine, instrument_profile[:,0],
instrument_profile[:,1])
kernel /= np.sum(kernel)
spec_convolved = convolve(spec_atlas, kernel, mode=mode)
return spec_convolved
def spectrum(wave, spec, mu=1.0, spec_avg=None, calib_at_dc=False,
atlas_range=0.5, wave_idx=None, extra_weight=20., bounds=None,
instrument_profile=None, calib_wave=False, cgs=True, si=False,
perHz=True, qsdc_calib=False, verbose=False):
"""
Calibrate spectrum intensity (in SI or cgs units) and wavelength by
simultaneously fitting offsets given an atlas profile
Parameters
----------
wave : array_like
1D array with wavelengths.
spec : ndarray
data (cube) with intensity profile(s) in counts to apply calibration to.
May be higher dimension cube (e.g. [nt, ny, nx, nwave, nstokes]). If
keyword argument `spec_avg` is None, then `spec` is assumed to be a 1D
array of Stokes I intesities of same size as `wave` and will be used to
determine the intensity calibration offset factor.
mu : float, optional
cosine of heliocentric viewing angle of the observations (defaults 1.0
-> disc centre)
spec_avg : array_like, optional
averaged intensity profile to use for calibration
(default None -> use `spec` to calibrate on)
calib_at_dc : bool, optional
calibrate assuming `spec_avg` (or `spec`, if `spec_avg` is None) was
taken at disc centre (defaults False).
atlas_range : float, optional
get atlas profile with for the range +/- this value (defaults 0.5)
wave_idx: array_like, optional
wavelength indices that will get `extra_weight` during while fitting the
intensity profile (default None -> all wavelengths get equal weight)
extra_weight : float, optional
amount of extra weight to give selected wavelength positions as
specified by `wave_idx` (default 20)
bounds : list of tuples, optional
[(ifact_low, ifact_upp), (woff_low, woff_upp)] suggesting lower and
upper bounds for fitting the intensity factor and wavelength offset
(defaults None)
instrument_profile : ndarray, optional
2D array [wave, profile] with wavelength spacing (starting at 0) and
instrumental profile to convolve the atlas profile with
calib_wave : bool, optional
perform wavelength calibration prior to intensity calibration (default
False)
cgs : bool, optional
output calibration in cgs units (default True)
si : bool, optional
output calibration in SI units (default False)
perHz : bool, optional
output calibration per frequency unit (default True)
qsdc_calib : bool, optional
output calibration as fraction of quiet Sun disc centre continuum
intensity (default False). If set, overrides `cgs`, `si` and `perHz`
verbose : bool, optional
output calibration plot and offset values to command line (defaults
False)
Returns
-------
wave : array_like
calibrated wavelength array
spec : array_like
calibrated intensity profile array
factor : float
offset factor converting data counts to absolute intensity
spec_fts : array_like
atlas profile at wavelengths given by `wave`. Convolved with
instrument profile if instrument_profile is not None.
unit : object
intensity units
Example
-------
>>> wave_cal, spec_cal, factor, spec_fts, units = calib.spectrum(wave, ispec,
cgs=True, calib_wave=True, wave_idx=[0,1,-2,-1])
:Author:
Gregal Vissers, Carlos Diaz (ISP/SU 2019-2020)
"""
if spec_avg is not None:
profile = np.copy(spec_avg)
else:
if spec.ndim == 1:
profile = np.copy(spec)
else:
raise ValueError("`spec` must be a 1D array when `spec_avg` is not set")
# Get atlas profile for range +/- 0.3
fts = atlas.atlas()
atlas_range = np.abs(atlas_range)
wave_fts, spec_fts_dc, cont_fts = fts.get(wave[0]-atlas_range,
wave[-1]+atlas_range, cgs=cgs, perHz=perHz)
# Apply instrument profile if provided
if instrument_profile is not None:
spec_fts = convolve_atlas(wave_fts, spec_fts_dc, instrument_profile)
else:
spec_fts = np.copy(spec_fts_dc)
# Get calibration offset factor and shift
calibration = get_calibration(wave, profile, wave_fts, spec_fts,
bounds=bounds, calib_at_dc=calib_at_dc, mu=mu, wave_idx=wave_idx,
extra_weight=extra_weight)
# Apply calibration and prepare output
if calib_wave is True:
wave = wave + calibration[1]
spec = spec * calibration[0]
# Apply limb-darkening correction on atlas if need be
if (mu != 1.0):
limbdark_factor = np.mean(limbdarkening(wave_fts, mu=mu))
spec_fts *= limbdark_factor
spec_fts_dc *= limbdark_factor
spec_fts_sel = []
for ww in range(wave.size):
widx = np.argmin(np.abs(wave_fts - wave[ww]))
spec_fts_sel.append(spec_fts[widx])
if qsdc_calib is True:
spec /= cont_fts[0]
spec_fts /= cont_fts[0]
spec_fts_dc /= cont_fts[0]
spec_fts_sel /= cont_fts[0]
calibration[0] /= cont_fts[0]
sunit = u.dimensionless_unscaled
else:
sunit = fts.sunit
if verbose is True:
if calib_wave is True:
print("spectrum: wavelength calibration offset: {0} (added to input wavelengths)".format(calibration[1]))
print("spectrum: intensity calibration offset factor: {0}".format(calibration[0]))
if qsdc_calib is True:
print("spectrum: STiC calibration offset factor: {0}".format(cont_fts[0]))
plot_scale_factor = 1.0
else:
plot_scale_factor = 1.e-5
profile *= calibration[0]
fig, ax = plt.subplots()
legend_items = ('observed profile', 'selected points',
'atlas profile at '+u'μ={0}'.format(mu))
ax.plot(wave, profile/plot_scale_factor, '.')
ax.plot(wave[wave_idx], profile[wave_idx]/plot_scale_factor, '+')
ax.plot(wave_fts, spec_fts_dc/plot_scale_factor)
if instrument_profile is not None:
ax.plot(wave_fts, spec_fts/plot_scale_factor,'--')
legend_items += ('atlas convolved with instrument profile',)
if qsdc_calib is True:
ax.set_ylabel('intensity relative to disc centre continuum [dimensionless]')
else:
ax.set_ylabel('intensity ['+r'$\times10^{-5}$'+' {0}]'.format(sunit.to_string()))
ax.set_xlabel('wavelength [{0}]'.format(fts.wunit.to_string()))
ax.legend(legend_items)
ax.set_title('ISPy: calib.spectrum() results')
plt.show()
return wave, spec, calibration, spec_fts_sel, sunit
def limbdarkening(wave, mu=1.0, nm=False):
"""
Return limb-darkening factor given wavelength and viewing angle
mu=cos(theta)
Parameters
----------
wave : float or array_like
scalar or 1D array with wavelength(s).
mu : float, optional
cosine of heliocentric viewing angle (default 1.0 -> disc centre)
nm : bool, optional
input wavelength units are nanometers (default False)
Returns
-------
factor : float or array_like
scaling factor(s) to be applied for given input wavelengths. Has as many
elements as `wave`.
Example
-------
>>> factor = limbdarkening(630.25, mu=0.7, nm=True)
:Author:
Gregal Vissers (ISP/SU 2020)
"""
this_dir, this_filename = os.path.split(__file__)
DATA_PATH = os.path.join(this_dir, "../data/limbdarkening_Neckel_Labs_1994.fits")
wave = np.atleast_1d(wave) # Ensure input is iterable
table = Table(fits.getdata(DATA_PATH))
wavetable = np.array(table['wavelength'])
if nm is False:
wavetable *= 10.
# Get table into 2D numpy array
Atable = np.array([ table['A0'], table['A1'], table['A2'],
table['A3'], table['A4'], table['A5'] ])
factor = np.zeros((wave.size), dtype='float64')
for ii in range(6):
Aint = np.interp(wave, wavetable, Atable[ii,:])
factor += Aint * mu**ii
return factor
|
{"hexsha": "d12166bc46b76e27593011c00488b0947ae75c06", "size": 13208, "ext": "py", "lang": "Python", "max_stars_repo_path": "ISPy/spec/calib.py", "max_stars_repo_name": "cdiazbas/ISPy", "max_stars_repo_head_hexsha": "de3f5deb2a1aa8a022f5a13d749fb6c21d08235f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-11-27T05:29:28.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-08T10:55:34.000Z", "max_issues_repo_path": "ISPy/spec/calib.py", "max_issues_repo_name": "cdiazbas/ISPy", "max_issues_repo_head_hexsha": "de3f5deb2a1aa8a022f5a13d749fb6c21d08235f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 29, "max_issues_repo_issues_event_min_datetime": "2019-10-21T13:57:42.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-19T11:13:35.000Z", "max_forks_repo_path": "ISPy/spec/calib.py", "max_forks_repo_name": "cdiazbas/ISPy", "max_forks_repo_head_hexsha": "de3f5deb2a1aa8a022f5a13d749fb6c21d08235f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-08-28T11:01:30.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-20T14:31:04.000Z", "avg_line_length": 36.9971988796, "max_line_length": 117, "alphanum_fraction": 0.6600545124, "include": true, "reason": "import numpy,from scipy,import astropy,from astropy", "num_tokens": 3376}
|
import torch
import torch.nn as nn
import numpy as np
import random
import time
from dataset import ConcatDataset, ImglistToTensor, VideoFrameDataset, HeatmapDataset
from torchvision import transforms
from torch.utils.data import DataLoader
from FER.utils import ROOT_PATH, save_to_json
import torch.nn.functional as F
from utils import accuracy, device, AverageMeter, dir_path, write_log, save_checkpoint
from models.c3d import C3D_VIDEO, C3DFusionBaseline
import pandas as pd
import os
os.chdir(ROOT_PATH)
# set seed, make result reporducable
SEED = 1234
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
def train(teacher_model, student_model, data_loader, criterion, optimizer, epoch=0, to_log=None, print_freq=25):
# create Average Meters
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
train_loss = []
# switch to train mode
teacher_model.eval()
student_model.train()
# record start time
start = time.time()
for i, conc_data in enumerate(data_loader):
h_data, v_data = conc_data
azi, ele, targets = h_data
v_inputs, _ = v_data
# prepare input and target
azi = azi.to(device, dtype=torch.float)
ele = ele.to(device, dtype=torch.float)
v_inputs = torch.permute(v_inputs, (0, 2, 1, 3, 4)).to(device)
targets = targets.to(device, dtype=torch.long)
# measure data loading time
data_time.update(time.time() - start)
# zero the parameter gradients
optimizer.zero_grad()
# gradient and do SGD step
pseudo_targets = teacher_model(v_inputs)
outputs = student_model(azi, ele)
loss = criterion(outputs, pseudo_targets, targets)
train_loss.append(loss.item())
loss.backward()
optimizer.step()
# measure accuracy and record loss
prec1, prec5 = accuracy(outputs.data, targets, topk=(1, 5))
losses.update(loss.item(), data_loader.batch_size)
top1.update(prec1.item(), data_loader.batch_size)
top5.update(prec5.item(), data_loader.batch_size)
# measure elapsed time
batch_time.update(time.time() - start)
start = time.time()
# print training info
if i % print_freq == 0:
str = ('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:3.3f} ({top1.avg:3.3f})\t'
'Prec@5 {top5.val:3.3f} ({top5.avg:3.3f})'.format(
epoch, i, len(data_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
print(str)
if to_log is not None:
write_log(str + '\n', to_log)
return train_loss
def test(model, test_loader, criterion, to_log=None):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for (azi, ele, target) in test_loader:
# prepare input and target to device
azi = azi.to(device, dtype=torch.float)
ele = ele.to(device, dtype=torch.float)
target = target.to(device, dtype=torch.long)
output = model(azi, ele)
loss = criterion(output, target)
test_loss += loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.sampler)
test_loss *= test_loader.batch_size
acc = 100. * correct / len(test_loader.sampler)
format_str = 'Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, correct, len(test_loader.sampler), acc
)
print(format_str)
if to_log is not None:
write_log(format_str, to_log)
return test_loss.item(), acc
def _make_criterion(alpha=0.5, T=4.0, mode='cse'):
def criterion(outputs, targets, labels):
if mode == 'cse':
_p = F.log_softmax(outputs / T, dim=1)
_q = F.softmax(targets / T, dim=1)
_soft_loss = -torch.mean(torch.sum(_q * _p, dim=1))
elif mode == 'mse':
_p = F.softmax(outputs / T, dim=1)
_q = F.softmax(targets / T, dim=1)
_soft_loss = nn.MSELoss()(_p, _q) / 2
else:
raise NotImplementedError()
_soft_loss = _soft_loss * T * T
_hard_loss = F.cross_entropy(outputs, labels)
loss = alpha * _soft_loss + (1. - alpha) * _hard_loss
return loss
return criterion
if __name__ == "__main__":
config = dict(num_epochs=60,
lr=0.001,
lr_step_size=20,
lr_decay_gamma=0.2,
batch_size=16,
num_classes=7,
v_num_frames=30,
h_num_frames=100,
imag_size=224,
weight_alpha=0.7,
softmax_temperature=16.0,
loss_mode='cse')
# results dir
result_dir = "FER/results"
path = dir_path("C3D_Supervision", result_dir)
# save training config
save_to_json(config, path['config'])
# load data
videos_root = 'C:\\Users\\Zber\\Desktop\\Subjects_Frames\\'
v_train_ann = os.path.join(videos_root, 'annotations_att_train.txt')
v_test_ann = os.path.join(videos_root, 'annotations_att_test.txt')
heatmap_root = "C:/Users/Zber/Desktop/Subjects_Heatmap"
h_train_ann = os.path.join(heatmap_root, "heatmap_annotation_train.txt")
h_test_ann = os.path.join(heatmap_root, "heatmap_annotation_test.txt")
preprocess = transforms.Compose([
ImglistToTensor(), # list of PIL images to (FRAMES x CHANNELS x HEIGHT x WIDTH) tensor
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
# video datasets
video_train = VideoFrameDataset(
root_path=videos_root,
annotationfile_path=v_train_ann,
num_segments=1,
frames_per_segment=config['v_num_frames'],
imagefile_template='frame_{0:012d}.jpg',
transform=preprocess,
random_shift=False,
test_mode=False
)
video_test = VideoFrameDataset(
root_path=videos_root,
annotationfile_path=v_test_ann,
num_segments=1,
frames_per_segment=config['v_num_frames'],
imagefile_template='frame_{0:012d}.jpg',
transform=preprocess,
random_shift=False,
test_mode=True
)
# heatmap datasets
heatmap_train = HeatmapDataset(heatmap_root, h_train_ann)
heatmap_test = HeatmapDataset(heatmap_root, h_test_ann)
dataset_train = ConcatDataset(heatmap_train, video_train)
train_loader = DataLoader(dataset_train, num_workers=4, pin_memory=True, batch_size=config['batch_size'])
test_loader = DataLoader(heatmap_test, num_workers=4, pin_memory=True, batch_size=config['batch_size'])
# create model
teacher_model = C3D_VIDEO(sample_size=config['imag_size'], sample_duration=config['v_num_frames'], num_classes=config['num_classes'])
checkpoint = os.path.join(result_dir, "C3D_Video_att_20211205-002110", 'best_model.pt')
assert os.path.exists(checkpoint), 'Error: no checkpoint directory found!'
teacher_model.load_state_dict(torch.load(checkpoint))
teacher_model = teacher_model.to(device)
student_model = C3DFusionBaseline(sample_duration=config['h_num_frames'], num_classes=config['num_classes'])
student_model = student_model.to(device)
# initialize critierion and optimizer
criterion = _make_criterion(alpha=config['weight_alpha'], T=config['softmax_temperature'], mode=config['loss_mode'])
test_criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(student_model.parameters(), lr=config['lr'])
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=config['lr_step_size'], gamma=config['lr_decay_gamma'])
metrics_dic = {
'loss': [],
'precision': [],
}
best_acc = 0
for epoch in range(config['num_epochs']):
train_loss = train(teacher_model, student_model, data_loader=train_loader, criterion=criterion,
optimizer=optimizer, epoch=epoch,
to_log=path['log'])
test_loss, acc = test(student_model, test_loader=test_loader, criterion=test_criterion, to_log=path['log'])
if acc >= best_acc:
best_acc = acc
save_checkpoint(student_model.state_dict(), is_best=True, checkpoint=path['dir'])
else:
save_checkpoint(student_model.state_dict(), is_best=False, checkpoint=path['dir'])
lr_scheduler.step()
metrics_dic['loss'].append(test_loss)
metrics_dic['precision'].append(acc)
# print best acc after training
write_log("<<<<< Best Accuracy = {:.2f} >>>>>".format(best_acc), path['log'])
# save csv log
df = pd.DataFrame.from_dict(metrics_dic)
df.to_csv(path['metrics'], sep='\t', encoding='utf-8')
|
{"hexsha": "f4e7796f80400d56dbe7187706e4d28f8241123f", "size": 9358, "ext": "py", "lang": "Python", "max_stars_repo_path": "FER/em_network/train_supervision.py", "max_stars_repo_name": "Zber5/OpenRadar", "max_stars_repo_head_hexsha": "701cf166203c3f3e1ba4873cd132a7ccba4f0863", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-09T18:40:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-09T18:40:24.000Z", "max_issues_repo_path": "FER/em_network/train_supervision.py", "max_issues_repo_name": "Zber5/OpenRadar", "max_issues_repo_head_hexsha": "701cf166203c3f3e1ba4873cd132a7ccba4f0863", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "FER/em_network/train_supervision.py", "max_forks_repo_name": "Zber5/OpenRadar", "max_forks_repo_head_hexsha": "701cf166203c3f3e1ba4873cd132a7ccba4f0863", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-13T05:33:50.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-13T05:33:50.000Z", "avg_line_length": 35.7175572519, "max_line_length": 137, "alphanum_fraction": 0.6364607822, "include": true, "reason": "import numpy", "num_tokens": 2265}
|
import argparse
import datetime
import gym
import numpy as np
import itertools
import torch
import json
import matplotlib.pyplot as plt
import pandas as pd
from BAC import BAC
from tensorboardX import SummaryWriter
from replay_memory import ReplayMemory
import pybullet as p2
p2.connect(p2.UDP)
import pybullet_envs
import torch.optim as optim
l=torch.nn.MSELoss()
ll=torch.nn.PairwiseDistance(p=2,keepdim=True)
from autoencoder import autoencoder
parser = argparse.ArgumentParser(description='PyTorch Soft Actor-Critic Args')
parser.add_argument('--env-name', default="HalfCheetahBulletEnv-v0",
help='Mujoco Gym environment (default: HalfCheetah-v2)')
parser.add_argument('--policy', default="Gaussian",
help='Policy Type: Gaussian | Deterministic (default: Gaussian)')
parser.add_argument('--eval', type=bool, default=True,
help='Evaluates a policy a policy every 10 episode (default: True)')
parser.add_argument('--gamma', type=float, default=0.99, metavar='G',
help='discount factor for reward (default: 0.99)')
parser.add_argument('--tau', type=float, default=0.005, metavar='G',
help='target smoothing coefficient(τ) (default: 0.005)')
parser.add_argument('--lr', type=float, default=0.0003, metavar='G',
help='learning rate (default: 0.0003)')
parser.add_argument('--alpha', type=float, default=0.03, metavar='G', #0.05 hopper, 0.04 walker, 0.05 reacher, 0.26 Ant with normalization, 0.03 HalfCheetah
help='Temperature parameter α determines the relative importance of the entropy\
term against the reward (default: 0.2)')
parser.add_argument('--automatic_entropy_tuning', type=bool, default=False, metavar='G',
help='Automaically adjust α (default: False)')
parser.add_argument('--seed', type=int, default=50, metavar='N',
help='random seed (default: 123456)')
parser.add_argument('--batch_size', type=int, default=256, metavar='N',
help='batch size (default: 256)')
parser.add_argument('--num_steps', type=int, default=1500000, metavar='N',
help='maximum number of steps (default: 1000000)')
parser.add_argument('--hidden_size', type=int, default=256, metavar='N',
help='hidden size (default: 256)')
parser.add_argument('--updates_per_step', type=int, default=1, metavar='N', ####################4 for humanoid
help='model updates per simulator step (default: 1)')
parser.add_argument('--start_steps', type=int, default=10000, metavar='N',
help='Steps sampling random actions (default: 10000)')
parser.add_argument('--target_update_interval', type=int, default=1, metavar='N',
help='Value target update per no. of updates per step (default: 1)')
parser.add_argument('--replay_size', type=int, default=1000000, metavar='N',
help='size of replay buffer (default: 10000000)')
parser.add_argument('--cuda', action="store_true",
help='run on CUDA (default: False)')
args = parser.parse_args()
score=[]
sc=[]
# Environment
env = gym.make(args.env_name)
#seed
torch.manual_seed(args.seed)
np.random.seed(args.seed)
env.seed(args.seed)
# Agent
agent = BAC(env.observation_space.shape[0], env.action_space, args)
enco=autoencoder(env.observation_space.shape[0]+env.action_space.shape[0])
c=optim.Adam(enco.parameters(), lr=0.003, weight_decay=0.001)
#TensorboardX
writer = SummaryWriter(logdir='runs/{}_BAC_{}_{}_{}'.format(datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"), args.env_name,
args.policy, "autotune" if args.automatic_entropy_tuning else ""))
# Memory
memory = ReplayMemory(args.replay_size)
# Training Loop
total_numsteps = 0
updates = 0
x=0
for i_episode in itertools.count(1):
episode_reward = 0
episode_steps = 0
done = False
state = env.reset()
while not done:
if args.start_steps > total_numsteps:
action = env.action_space.sample() # SamNo documeple random action
else:
action = agent.select_action(state) # Sample action from policy
if len(memory) > args.batch_size:
# Number of updates per step in environment
for i in range(args.updates_per_step):
# Update parameters of all the networks
if total_numsteps>30000:
critic_1_loss, critic_2_loss, policy_loss, ent_loss, alpha = agent.update_parametersafter(memory, args.batch_size, updates,env,enco)
else:
critic_1_loss, critic_2_loss, policy_loss, ent_loss, alpha = agent.update_parametersbefore(memory, args.batch_size, updates)
writer.add_scalar('loss/critic_1', critic_1_loss, updates)
writer.add_scalar('loss/critic_2', critic_2_loss, updates)
writer.add_scalar('loss/policy', policy_loss, updates)
writer.add_scalar('loss/entropy_loss', ent_loss, updates)
writer.add_scalar('entropy_temprature/alpha', alpha, updates)
updates += 1
next_state, reward, done, _ = env.step(action.flatten()) # Step
episode_steps += 1
total_numsteps += 1
x+=1
episode_reward += reward
mask = 1 if episode_steps == env._max_episode_steps else float(not done)
memory.push(state, action, reward, next_state, mask) # Append transition to memory
state = next_state
if total_numsteps > args.num_steps:
break
writer.add_scalar('reward/train', episode_reward, i_episode)
#Update the autoencoder network Periodically
if i_episode% 5 ==0 and total_numsteps>args.start_steps :
episodes=15
for _ in range(episodes):
state = env.reset()
episode_reward = 0
done = False
while not done:
action = agent.select_action(state, evaluate=True)
next_state, reward, done, _ = env.step(action.flatten())
a=torch.Tensor(action.flatten()).unsqueeze(0)
cat=torch.cat((torch.Tensor(state).unsqueeze(0),a),dim=-1)
s=enco(cat)
f=l(cat,s)
c.zero_grad()
f.backward(retain_graph=True)
c.step()
state = next_state
agent.save_model(args.env_name,enco,suffix="")
env.close()
|
{"hexsha": "c0faa7bbcee9d8c13113f21586e77ab7faab8550", "size": 6706, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "AmmarFayad/Behavioral-Actor-Critic", "max_stars_repo_head_hexsha": "30ee138e5f10d03956aa8ac2f13fb01ab53d412b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-03T19:07:46.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-03T19:07:46.000Z", "max_issues_repo_path": "main.py", "max_issues_repo_name": "AmmarFayad/Behavioral-Actor-Critic", "max_issues_repo_head_hexsha": "30ee138e5f10d03956aa8ac2f13fb01ab53d412b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "AmmarFayad/Behavioral-Actor-Critic", "max_forks_repo_head_hexsha": "30ee138e5f10d03956aa8ac2f13fb01ab53d412b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.9125, "max_line_length": 156, "alphanum_fraction": 0.6266030421, "include": true, "reason": "import numpy", "num_tokens": 1517}
|
# Ignoring some linting rules in tests
# pylint: disable=redefined-outer-name
# pylint: disable=missing-docstring
import numpy as np
import pytest
from bingo.symbolic_regression.agraph.operator_definitions import *
from bingo.symbolic_regression.agraph.evaluation_backend \
import evaluation_backend as py_eval_backend
try:
from bingocpp import evaluation_backend as cpp_eval_backend
except ImportError:
cpp_eval_backend = None
CPP_PARAM = pytest.param("Cpp",
marks=pytest.mark.skipif(not cpp_eval_backend,
reason='BingoCpp import '
'failure'))
OPERATOR_LIST = [INTEGER, VARIABLE, CONSTANT, ADDITION, SUBTRACTION,
MULTIPLICATION, DIVISION, SIN, COS, EXPONENTIAL, LOGARITHM,
POWER, ABS, SQRT]
@pytest.fixture(params=["Python", CPP_PARAM])
def engine(request):
return request.param
@pytest.fixture
def eval_backend(engine):
if engine == "Python":
return py_eval_backend
return cpp_eval_backend
@pytest.fixture
def all_funcs_command_array():
return np.array([[INTEGER, 5, 5],
[VARIABLE, 0, 0],
[CONSTANT, 0, 0],
[ADDITION, 1, 0],
[SUBTRACTION, 2, 3],
[MULTIPLICATION, 4, 1],
[DIVISION, 5, 1],
[SIN, 6, 0],
[COS, 7, 0],
[EXPONENTIAL, 8, 0],
[LOGARITHM, 9, 0],
[SAFE_POWER, 10, 0],
[ABS, 11, 0],
[SQRT, 12, 0]])
@pytest.fixture
def higher_dim_command_array():
return np.array([[VARIABLE, 0, 0],
[VARIABLE, 1, 1],
[CONSTANT, 0, 0],
[CONSTANT, 1, 1],
[MULTIPLICATION, 0, 2],
[MULTIPLICATION, 1, 3],
[ADDITION, 4, 5]])
@pytest.fixture
def sample_x():
return np.vstack((np.linspace(-1.0, 0.0, 11),
np.linspace(0.0, 1.0, 11))).transpose()
@pytest.fixture
def sample_constants():
return np.array([10, 3.14])
def test_all_funcs_eval(eval_backend, all_funcs_command_array):
x = np.arange(1, 6).reshape((-1, 1))
constants = (10, )
expected_f_of_x = np.array([[0.45070097],
[0.9753327],
[0.29576841],
[0.36247937],
[1.0]])
f_of_x = eval_backend.evaluate(all_funcs_command_array,
x, constants)
np.testing.assert_array_almost_equal(f_of_x, expected_f_of_x)
def test_higher_dim_func_eval(eval_backend, higher_dim_command_array):
x = np.arange(8).reshape((-1, 2))
constants = (10, 100)
expected_f_of_x = np.sum(x*constants, axis=1).reshape((-1, 1))
f_of_x = eval_backend.evaluate(higher_dim_command_array,
x, constants)
np.testing.assert_array_almost_equal(f_of_x, expected_f_of_x)
def test_all_funcs_deriv_x(eval_backend, all_funcs_command_array):
x = np.arange(1, 6).reshape((-1, 1))
constants = (10, )
expected_f_of_x = np.array([[0.45070097],
[0.9753327],
[0.29576841],
[0.36247937],
[1.0]])
expected_df_dx = np.array([[0.69553357],
[-0.34293336],
[-0.39525239],
[0.54785643],
[0.0]])
f_of_x, df_dx = eval_backend.evaluate_with_derivative(
all_funcs_command_array, x, constants, True)
np.testing.assert_array_almost_equal(f_of_x, expected_f_of_x)
np.testing.assert_array_almost_equal(df_dx, expected_df_dx)
def test_all_funcs_deriv_c(eval_backend, all_funcs_command_array):
x = np.arange(1, 6).reshape((-1, 1))
constants = (10, )
expected_f_of_x = np.array([[0.45070097],
[0.9753327],
[0.29576841],
[0.36247937],
[1.0]])
expected_df_dc = np.array([[-0.69553357],
[0.34293336],
[0.39525239],
[-0.54785643],
[0.]])
f_of_x, df_dc = eval_backend.evaluate_with_derivative(
all_funcs_command_array, x, constants, False)
np.testing.assert_array_almost_equal(f_of_x, expected_f_of_x)
np.testing.assert_array_almost_equal(df_dc, expected_df_dc)
def test_higher_dim_func_deriv_x(eval_backend, higher_dim_command_array):
x = np.arange(8).reshape((4, 2))
constants = (10, 100)
expected_f_of_x = np.sum(x*constants, axis=1).reshape((-1, 1))
expected_df_dx = np.array([constants]*4)
f_of_x, df_dx = eval_backend.evaluate_with_derivative(
higher_dim_command_array, x, constants, True)
np.testing.assert_array_almost_equal(f_of_x, expected_f_of_x)
np.testing.assert_array_almost_equal(df_dx, expected_df_dx)
def test_higher_dim_func_deriv_c(eval_backend, higher_dim_command_array):
x = np.arange(8).reshape((4, 2))
constants = (10, 100)
expected_f_of_x = np.sum(x*constants, axis=1).reshape((-1, 1))
expected_df_dc = x
f_of_x, df_dc = eval_backend.evaluate_with_derivative(
higher_dim_command_array, x, constants, False)
np.testing.assert_array_almost_equal(f_of_x, expected_f_of_x)
np.testing.assert_array_almost_equal(df_dc, expected_df_dc)
|
{"hexsha": "c0f1b36e4107985e0253c6091144f0c62d3aff59", "size": 5780, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/unit/symbolic_regression/agraph/evaluation_backend/test_evaluation_backend.py", "max_stars_repo_name": "nolanstr/bingo_multi_stage", "max_stars_repo_head_hexsha": "7a88c4f5c59268d0612664be5864765db2edad51", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/unit/symbolic_regression/agraph/evaluation_backend/test_evaluation_backend.py", "max_issues_repo_name": "nolanstr/bingo_multi_stage", "max_issues_repo_head_hexsha": "7a88c4f5c59268d0612664be5864765db2edad51", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/unit/symbolic_regression/agraph/evaluation_backend/test_evaluation_backend.py", "max_forks_repo_name": "nolanstr/bingo_multi_stage", "max_forks_repo_head_hexsha": "7a88c4f5c59268d0612664be5864765db2edad51", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.900621118, "max_line_length": 76, "alphanum_fraction": 0.5576124567, "include": true, "reason": "import numpy", "num_tokens": 1382}
|
#include <iostream>
#include <ostream>
#include <limits>
#include <boost/test/unit_test.hpp>
#include <boost/test/data/test_case.hpp>
#include <boost/range/adaptor/indexed.hpp>
#include <SFML/Graphics.hpp>
#include "../src/Vehicle.h"
#include "../src/VehicleFactory.h"
#include "../src/TTUtils.h"
#include "../src/Intersection.h"
#include "../src/PathFactory.h"
#include "Test.h"
using namespace std::string_literals;
namespace data = boost::unit_test::data;
namespace std
{
std::ostream& operator<<(std::ostream& out, const tt::TurningPoint& item)
{
auto[point, directions, decisionPoint] = item;
out << "{ point=" << point
<< " turnType=" << item.turn
<< " decisionPoint=" << decisionPoint
<< "}";
return out;
}
std::ostream& operator<<(std::ostream& out, const tt::TurningPoints& item)
{
for (const auto& i : item)
{
out << i << ' ';
}
return out;
}
std::ostream& operator<<(std::ostream& out, const tt::TurningPointParser::Edge& item)
{
out << '{' << std::get<0>(item) << ',' << static_cast<std::uint32_t>(std::get<1>(item)) << '}';
return out;
}
}
BOOST_AUTO_TEST_SUITE(tt)
const std::tuple<std::string, sf::FloatRect> floatRectParseData[] =
{
{ "0,0,0,0"s, sf::FloatRect{ 0.f, 0.f, 0.f, 0.f } },
{ "1,2,3,4"s, sf::FloatRect{ 1,2,2,2 } },
{ "1, 2 , 3, 4"s, sf::FloatRect{ 1,2,2,2 } },
{ "1.0, 02.00 , 3 , 004.0"s, sf::FloatRect{ 1,2,2,2 } },
};
// --run_test=tt/testFloatRectParse
BOOST_DATA_TEST_CASE(testFloatRectParse, data::make(floatRectParseData), original, expected)
{
std::string::const_iterator start = std::begin(original);
const std::string::const_iterator stop = std::end(original);
sf::FloatRect rect;
bool result =
phrase_parse(start, stop, FloatRectParser, x3::ascii::space, rect);
BOOST_TEST_REQUIRE(result);
BOOST_REQUIRE(start == stop);
BOOST_TEST(rect == expected);
}
const std::tuple<std::string, std::vector<tt::TurningPoint>> parseIntersectionTestData[] =
{
{
"2,2,L0,single,single",
{
{ sf::Vector2i{3,2}, tt::Direction::UP, false },
{ sf::Vector2i{2,3}, tt::Direction::RIGHT, false },
}
}
};
bool intersection_sorter(const tt::TurningPoint& lhs, const tt::TurningPoint& rhs)
{
if (lhs.point.x != rhs.point.x)
{
return lhs.point.x < rhs.point.x;
}
return lhs.point.y < rhs.point.y;
}
// --run_test=tt/parse_IntersectionTest
BOOST_DATA_TEST_CASE(parse_IntersectionTest, data::make(parseIntersectionTestData), original, expectedv)
{
tt::IntersectionParser parser;
auto result = parser.parse(original.begin(), original.end());
BOOST_TEST(result.has_value());
auto[source, type, hzdbl, vtdbl] = *result;
auto iv = tt::makeIntersection(sf::Vector2i{ source }, type, hzdbl, vtdbl);
BOOST_TEST_REQUIRE(iv.size() == expectedv.size());
std::sort(iv.begin(), iv.end(), &intersection_sorter);
// I guess the test's variable is const?
TurningPoints expected = expectedv;
std::sort(expected.begin(), expected.end(), &intersection_sorter);
BOOST_TEST(iv.size() == expected.size());
for (const auto& item : (iv | boost::adaptors::indexed()))
{
const auto& i = item.value();
const auto& t = expected.at(item.index());
BOOST_TEST(i.point == t.point);
BOOST_TEST(i.decisionPoint == t.decisionPoint);
BOOST_TEST(i.turn == t.turn);
}
}
const std::tuple<std::string, tt::TurningPoint> parseTurningPointTestData[] =
{
{
"2,2,right",
tt::TurningPoint{ sf::Vector2i{2,2}, tt::Direction::RIGHT, false }
},
{
"2,2,right,true",
tt::TurningPoint{ sf::Vector2i{2,2}, tt::Direction::RIGHT, true }
}
};
// --run_test=tt/parse_turningPointTest
BOOST_DATA_TEST_CASE(parse_turningPointTest, data::make(parseTurningPointTestData), original, expected)
{
tt::TurningPointParser parser;
auto start = std::begin(original);
const auto stop = std::end(original);
auto result = parser.parse(start, stop);
BOOST_TEST(result.has_value());
auto intersection = *result;
BOOST_TEST(intersection.point == expected.point);
BOOST_TEST(intersection.turn == expected.turn);
BOOST_TEST(intersection.decisionPoint == expected.decisionPoint);
}
const std::tuple<sf::Vector2i, tt::IntersectionType, tt::TurningPoints> intersectionTestData[] =
{
{
sf::Vector2i{ 2,2 },
tt::IntersectionType::L0,
{
{ sf::Vector2i{3,2}, tt::Direction::UP, false },
{ sf::Vector2i{2,3}, tt::Direction::RIGHT, false },
}
},
{
sf::Vector2i{ 2,2 },
tt::IntersectionType::T0,
{
{ sf::Vector2i{2,2}, tt::Direction::DOWN, true },
{ sf::Vector2i{3,2}, tt::Direction::LEFT, false },
{ sf::Vector2i{2,3}, tt::Direction::DOWN, true },
{ sf::Vector2i{3,3}, tt::Direction::RIGHT, true },
}
}
};
// --run_test=tt/intersectionTest
BOOST_DATA_TEST_CASE(intersectionTest, data::make(intersectionTestData), source, type, expectedv)
{
auto iv = tt::makeIntersection(source, type);
BOOST_TEST_REQUIRE(iv.size() == expectedv.size());
std::sort(iv.begin(), iv.end(), &intersection_sorter);
// I guess the test's variable is const?
TurningPoints expected = expectedv;
std::sort(expected.begin(), expected.end(), &intersection_sorter);
BOOST_TEST(iv.size() == expected.size());
for (const auto& item : (iv | boost::adaptors::indexed()))
{
const auto& i = item.value();
const auto& t = expected.at(item.index());
BOOST_TEST(i.point == t.point);
BOOST_TEST(i.decisionPoint == t.decisionPoint);
BOOST_TEST(i.turn == t.turn);
}
}
const std::tuple<sf::FloatRect, sf::FloatRect, tt::Direction, float, bool> vehicleBlocks[]
{
// rects the same size
{
sf::FloatRect{ 0, 0, 10, 10},
sf::FloatRect{ 20, 0, 10, 10},
tt::Direction::RIGHT,
15.0f,
true
},
{
sf::FloatRect{ 0, 0, 10, 10},
sf::FloatRect{ 20, 5, 10, 10},
tt::Direction::RIGHT,
15.0f,
true
},
{
sf::FloatRect{ 0, 5, 10, 10},
sf::FloatRect{ 20, 0, 10, 10},
tt::Direction::RIGHT,
15.0f,
true
},
// `object` is taller
{
sf::FloatRect{ 0, 0, 10, 20},
sf::FloatRect{ 20, 5, 15, 15},
tt::Direction::RIGHT,
15.0f,
true
},
{
sf::FloatRect{ 0, 2, 10, 18},
sf::FloatRect{ 20, 0, 15, 5},
tt::Direction::RIGHT,
15.0f,
true
},
{
sf::FloatRect{ 0, 0, 10, 10},
sf::FloatRect{ 20, 8, 15, 4},
tt::Direction::RIGHT,
15.0f,
true
},
};
// --run_test=tt/vehicleBlockedTest
BOOST_DATA_TEST_CASE(vehicleBlockedTest, data::make(vehicleBlocks), object, other, direction, distance, blocked)
{
BOOST_TEST(tt::isPathBlocked(object, other, direction, distance) == blocked);
}
// --run_test=tt/path_forcedDoubleLaneTest
BOOST_AUTO_TEST_CASE(path_ForcedSingleLaneTest)
{
TurningPoints edges =
{
{ { -1, 2 }, Direction::RIGHT, false },
};
TurningPoints inter;
auto temp = tt::makeIntersection({ 2, 1 }, tt::IntersectionType::L180);
inter.insert(inter.end(), temp.begin(), temp.end());
temp = tt::makeIntersection({ 2, 7 }, tt::IntersectionType::L0);
inter.insert(inter.end(), temp.begin(), temp.end());
temp = tt::makeIntersection({ 5, 7 }, tt::IntersectionType::L270);
inter.insert(inter.end(), temp.begin(), temp.end());
tt::PathFactory fact{ sf::Vector2i{10, 10} };
fact.setEdges(edges);
fact.setTurningPoints(inter);
auto path = fact.makeRiboPath();
std::vector<sf::Vector2f> expected =
{
{-1,2}, {2,2}, {2,8}, {6,8},{6,-1}
};
BOOST_TEST(path.points() == expected);
}
// --run_test=tt/path_forcedDoubleLaneTest
BOOST_AUTO_TEST_CASE(path_forcedDoubleLaneTest)
{
TurningPoints edges =
{
{ { -1, 3 }, Direction::RIGHT, false },
};
TurningPoints inter;
auto temp = tt::makeIntersection({ 2, 1 }, tt::IntersectionType::L180, tt::LaneSize::DOUBLE, tt::LaneSize::DOUBLE);
inter.insert(inter.end(), temp.begin(), temp.end());
temp = tt::makeIntersection({ 2, 7 }, tt::IntersectionType::L0, tt::LaneSize::DOUBLE, tt::LaneSize::DOUBLE);
inter.insert(inter.end(), temp.begin(), temp.end());
temp = tt::makeIntersection({ 5, 7 }, tt::IntersectionType::L270, tt::LaneSize::DOUBLE, tt::LaneSize::DOUBLE);
inter.insert(inter.end(), temp.begin(), temp.end());
tt::PathFactory fact{ sf::Vector2i{10, 10} };
fact.setEdges(edges);
fact.setTurningPoints(inter);
auto path = fact.makeRiboPath();
std::vector<sf::Vector2f> expected =
{
{-1,3}, {2,3}, {2,9}, {7,9},{7,-1}
};
BOOST_TEST(path.points() == expected);
}
// --run_test=tt/pathMakingTXTest
BOOST_AUTO_TEST_CASE(pathMakingTXTest)
{
TurningPoints edges =
{
{ { -1, 3 }, Direction::RIGHT, false },
};
TurningPoints inter;
auto temp = tt::makeIntersection({ 2, 2 }, tt::IntersectionType::T90);
inter.insert(inter.end(), temp.begin(), temp.end());
tt::PathFactory fact{ sf::Vector2i{10, 10} };
fact.setEdges(edges);
fact.setTurningPoints(inter);
auto path = fact.makeRiboPath();
}
BOOST_AUTO_TEST_CASE(pathMakingSingleTXTest)
{
TurningPoints edges =
{
{ { -1, 4 }, Direction::RIGHT, false }
};
TurningPoints inter;
auto temp = tt::makeIntersection({ 3, 3 }, tt::IntersectionType::CROSS);
inter.insert(inter.end(), temp.begin(), temp.end());
tt::PathFactory fact{ sf::Vector2i{7, 7} };
fact.setEdges(edges);
fact.setTurningPoints(inter);
auto path = fact.makeRiboPath();
}
// --run_test=tt/pathMakingDoubleTXTest
BOOST_AUTO_TEST_CASE(pathMakingDoubleTXTest)
{
TurningPoints edges =
{
{ { -1, 4 }, Direction::RIGHT, false },
{ { -1, 8 }, Direction::RIGHT, false },
};
TurningPoints inter;
auto temp = tt::makeIntersection({ 3, 3 }, tt::IntersectionType::CROSS);
inter.insert(inter.end(), temp.begin(), temp.end());
temp = tt::makeIntersection({ 3, 7 }, tt::IntersectionType::CROSS);
inter.insert(inter.end(), temp.begin(), temp.end());
tt::PathFactory fact{ sf::Vector2i{11, 11} };
fact.setEdges(edges);
fact.setTurningPoints(inter);
auto path = fact.makeRiboPath();
}
BOOST_AUTO_TEST_CASE(getDirectionTest)
{
sf::Vector2f startf{ 0.0f, 0.0f };
sf::Vector2f stopf{ 1.0f, 1.0f };
auto direction = tt::getDirection(startf, stopf);
BOOST_TEST(direction & Direction::RIGHT);
BOOST_TEST(direction & Direction::DOWN);
direction = tt::getDirection(stopf, startf);
BOOST_TEST(direction & Direction::LEFT);
BOOST_TEST(direction & Direction::UP);
direction = tt::getDirection(sf::Vector2i{ -1, -1 }, sf::Vector2i{ 1,1 });
BOOST_TEST(direction & Direction::RIGHT);
BOOST_TEST(direction & Direction::DOWN);
direction = tt::getDirection(sf::Vector2i{ 0, 10 }, sf::Vector2i{ 0, 100 });
BOOST_TEST(direction & Direction::DOWN);
BOOST_TEST(direction ^ Direction::UP);
BOOST_TEST(direction ^ Direction::LEFT);
BOOST_TEST(direction ^ Direction::RIGHT);
}
BOOST_AUTO_TEST_CASE(oneBitSetTest)
{
BOOST_TEST(tt::exactly_one_bit_set(1) == true);
BOOST_TEST(tt::exactly_one_bit_set(2) == true);
BOOST_TEST(tt::exactly_one_bit_set(16) == true);
BOOST_TEST(tt::exactly_one_bit_set(0) == false);
BOOST_TEST(tt::exactly_one_bit_set(22) == false);
BOOST_TEST(tt::exactly_one_bit_set(std::numeric_limits<int>::max()) == false);
auto direction = tt::getDirection(sf::Vector2i{ -1,11 }, sf::Vector2i{ 10,11 });
BOOST_TEST(tt::exactly_one_bit_set(direction) == true);
}
BOOST_AUTO_TEST_SUITE_END() // tt
|
{"hexsha": "1756630ec365d44d2137fa6e9bdb0edb9a901a3a", "size": 11989, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tests/test_misc.cpp", "max_stars_repo_name": "zethon/ttvg", "max_stars_repo_head_hexsha": "51d79ee3154669447dd522731aa0f7057e723abd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2020-11-02T20:51:36.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-20T21:53:41.000Z", "max_issues_repo_path": "tests/test_misc.cpp", "max_issues_repo_name": "zethon/ttvg", "max_issues_repo_head_hexsha": "51d79ee3154669447dd522731aa0f7057e723abd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 41.0, "max_issues_repo_issues_event_min_datetime": "2020-07-20T16:37:48.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-27T00:52:06.000Z", "max_forks_repo_path": "tests/test_misc.cpp", "max_forks_repo_name": "zethon/ttvg", "max_forks_repo_head_hexsha": "51d79ee3154669447dd522731aa0f7057e723abd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2020-11-02T20:51:37.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-02T20:51:37.000Z", "avg_line_length": 28.0772833724, "max_line_length": 119, "alphanum_fraction": 0.6171490533, "num_tokens": 3372}
|
module TestVar
using Base.Test
using Bio.Seq
using Bio.Var
@testset "Counting mutations" begin
# Create a 20bp test DNA sequence pair containing every possible transition (4),
# every possible transversion (8), and 2 gapped sites and 2 ambiguous sites.
# This leaves 4 sites non-mutated/conserved.
dnas = [dna"ATTG-ACCTGGNTTTCCGAA", dna"A-ACAGAGTATACRGTCGTC"]
m1 = seqmatrix(dnas, :seq)
rnas = [rna"AUUG-ACCUGGNUUUCCGAA", rna"A-ACAGAGUAUACRGUCGUC"]
m2 = seqmatrix(rnas, :seq)
@test count_mutations(AnyMutation, dnas) == count_mutations(AnyMutation, rnas) == ([12], [16])
@test count_mutations(AnyMutation, m1) == count_mutations(AnyMutation, m2) == ([12], [16])
@test count_mutations(TransitionMutation, dnas) == count_mutations(TransitionMutation, rnas) == ([4], [16])
@test count_mutations(TransitionMutation, m1) == count_mutations(TransitionMutation, m2) == ([4], [16])
@test count_mutations(TransversionMutation, dnas) == count_mutations(TransversionMutation, rnas) == ([8], [16])
@test count_mutations(TransversionMutation, m1) == count_mutations(TransversionMutation, m2) == ([8], [16])
@test count_mutations(TransitionMutation, TransversionMutation, dnas) == count_mutations(TransitionMutation, TransversionMutation, rnas) == ([4], [8], [16])
@test count_mutations(TransitionMutation, TransversionMutation, m1) == count_mutations(TransitionMutation, TransversionMutation, m2) == ([4], [8], [16])
@test count_mutations(TransversionMutation, TransitionMutation, dnas) == count_mutations(TransversionMutation, TransitionMutation, rnas) == ([4], [8], [16])
@test count_mutations(TransversionMutation, TransitionMutation, m1) == count_mutations(TransversionMutation, TransitionMutation, m2) == ([4], [8], [16])
ans = Bool[false, false, true, true, false, true, true, true, false, true, true, false, true, false, true, true, false, false, true, true]
@test flagmutations(AnyMutation, m1)[1][:,1] == ans
@test flagmutations(AnyMutation, m2)[1][:,1] == ans
end
@testset "Distance Computation" begin
dnas1 = [dna"ATTG-ACCTGGNTTTCCGAA", dna"A-ACAGAGTATACRGTCGTC"]
m1 = seqmatrix(dnas1, :seq)
dnas2 = [dna"attgaacctggntttccgaa",
dna"atacagagtatacrgtcgtc"]
dnas3 = [dna"attgaacctgtntttccgaa",
dna"atagaacgtatatrgccgtc"]
m2 = seqmatrix(dnas2, :seq)
@test distance(Count{AnyMutation}, dnas1) == ([12], [16])
@test distance(Count{TransitionMutation}, dnas1) == ([4], [16])
@test distance(Count{TransversionMutation}, dnas1) == ([8], [16])
@test distance(Count{Kimura80}, dnas1) == ([4], [8], [16])
@test distance(Count{AnyMutation}, m1) == ([12], [16])
@test distance(Count{TransitionMutation}, m1) == ([4], [16])
@test distance(Count{TransversionMutation}, m1) == ([8], [16])
@test distance(Count{Kimura80}, m1) == ([4], [8], [16])
@test distance(Count{AnyMutation}, dnas2, 5, 5)[1][:] == [2, 4, 3, 3]
@test distance(Count{AnyMutation}, dnas2, 5, 5)[2][:] == [5, 5, 3, 5]
@test distance(Count{TransitionMutation}, dnas2, 5, 5)[1][:] == [0, 2, 1, 1]
@test distance(Count{TransitionMutation}, dnas2, 5, 5)[2][:] == [5, 5, 3, 5]
@test distance(Count{TransversionMutation}, dnas2, 5, 5)[1][:] == [2, 2, 2, 2]
@test distance(Count{TransversionMutation}, dnas2, 5, 5)[2][:] == [5, 5, 3, 5]
@test distance(Count{Kimura80}, dnas1) == ([4], [8], [16])
@test distance(Count{AnyMutation}, dnas2) == ([12], [18])
@test distance(Count{TransitionMutation}, dnas2) == ([4], [18])
@test distance(Count{TransversionMutation}, dnas2) == ([8], [18])
@test distance(Count{Kimura80}, dnas2) == ([4], [8], [18])
@test distance(Count{AnyMutation}, m2) == ([12], [18])
@test distance(Count{TransitionMutation}, m2) == ([4], [18])
@test distance(Count{TransversionMutation}, m2) == ([8], [18])
@test distance(Count{Kimura80}, m2) == ([4], [8], [18])
d = distance(Proportion{AnyMutation}, dnas2, 5, 5)
a = [0.4, 0.8, 1.0, 0.6]
for i in 1:length(d[1])
@test_approx_eq_eps d[1][i] a[i] 1e-4
end
@test d[2][:] == [5, 5, 3, 5]
d = distance(Proportion{TransitionMutation}, dnas2, 5, 5)
a = [0.0, 0.4, 0.333333, 0.2]
for i in 1:length(d[1])
@test_approx_eq_eps d[1][i] a[i] 1e-4
end
@test d[2][:] == [5, 5, 3, 5]
d = distance(Proportion{TransversionMutation}, dnas2, 5, 5)
a = [0.4, 0.4, 0.666667, 0.4]
for i in 1:length(d[1])
@test_approx_eq_eps d[1][i] a[i] 1e-4
end
@test d[2][:] == [5, 5, 3, 5]
@test distance(Proportion{AnyMutation}, dnas1) == ([(12 / 16)], [16])
@test distance(Proportion{TransitionMutation}, dnas1) == ([(4 / 16)], [16])
@test distance(Proportion{TransversionMutation}, dnas1) == ([(8 / 16)], [16])
@test distance(Proportion{AnyMutation}, m1) == ([(12 / 16)], [16])
@test distance(Proportion{TransitionMutation}, m1) == ([(4 / 16)], [16])
@test distance(Proportion{TransversionMutation}, m1) == ([(8 / 16)], [16])
@test distance(Proportion{AnyMutation}, dnas2) == ([(12 / 18)], [18])
@test distance(Proportion{TransitionMutation}, dnas2) == ([(4 / 18)], [18])
@test distance(Proportion{TransversionMutation}, dnas2) == ([(8 / 18)], [18])
@test distance(Proportion{AnyMutation}, m2) == ([(12 / 18)], [18])
@test distance(Proportion{TransitionMutation}, m2) == ([(4 / 18)], [18])
@test distance(Proportion{TransversionMutation}, m2) == ([(8 / 18)], [18])
@test distance(JukesCantor69, dnas1) == ([Inf], [Inf]) # Returns infinity as 12/16 is 0.75 - mutation saturation.
@test distance(JukesCantor69, m1) == ([Inf], [Inf])
@test round(distance(JukesCantor69, dnas2)[1][1], 3) == 1.648
@test round(distance(JukesCantor69, dnas2)[2][1], 3) == 1
@test round(distance(JukesCantor69, m2)[1][1], 3) == 1.648
@test round(distance(JukesCantor69, m2)[2][1], 3) == 1
@test_throws DomainError distance(JukesCantor69, dnas2, 5, 5)
d = distance(JukesCantor69, dnas3, 5, 5)
a = [0.232616, 0.571605, 0.44084, 0.571605]
v = [0.0595041, 0.220408, 0.24, 0.220408]
for i in 1:length(d[1])
@test_approx_eq_eps d[1][i] a[i] 1e-5
@test_approx_eq_eps d[2][i] v[i] 1e-5
end
@test round(distance(Kimura80, dnas2)[1][1], 3) == 1.648
@test round(distance(Kimura80, dnas2)[2][1], 3) == 1
@test round(distance(Kimura80, m2)[1][1], 3) == 1.648
@test round(distance(Kimura80, m2)[2][1], 3) == 1
end
end # module TestVar
|
{"hexsha": "e2e8cdaf1fd3760dde768ce2921e33d5b53131ed", "size": 6524, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/var/runtests.jl", "max_stars_repo_name": "JuliaPackageMirrors/Bio.jl", "max_stars_repo_head_hexsha": "ad78989811179dc1fd314479f45311b90954d986", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/var/runtests.jl", "max_issues_repo_name": "JuliaPackageMirrors/Bio.jl", "max_issues_repo_head_hexsha": "ad78989811179dc1fd314479f45311b90954d986", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/var/runtests.jl", "max_forks_repo_name": "JuliaPackageMirrors/Bio.jl", "max_forks_repo_head_hexsha": "ad78989811179dc1fd314479f45311b90954d986", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.8015267176, "max_line_length": 160, "alphanum_fraction": 0.6327406499, "num_tokens": 2379}
|
# -*- coding:utf8 -*-
r'''
TODO: remove all tensorflow graph construction in `build_op_info`
'''
import os
import numpy as np
import idx2numpy as idx2np
import tensorflow as tf
from utensor_cgen.ir import OperationInfo, TensorInfo
from utensor_cgen.ir.converter import (AttrValueConverter, DataTypeConverter,
GenericTensorConverterMixin)
from utensor_cgen.logger import logger
from utensor_cgen.matcher import OpEqualityDelegate, _morphism
from utensor_cgen.transformer.optimizer import RefCntOptimizer
from utensor_cgen.utils import NamescopedKWArgsParser
from .snippets import * # pylint: disable=W0401,W0614
__all__ = ['OperatorFactory', 'OpNotSupportedError']
class OpNotSupportedError(Exception): pass
class OperatorFactory():
# Can easily do something smarter
_operators = {}
def createOperatorSnippet(self, op_info, **kwargs):
op_type = op_info.op_type
if op_type not in self._operators:
err_msg = "unsupported op type in uTensor: {op.name}, {op.op_type}".format(op=op_info)
raise ValueError(err_msg)
op = self._operators[op_type](op_info, **kwargs) # Create desired object
return op.snippet # Ops know how to create their snippets
@classmethod
def get_opertor(cls, op_type):
op_cls = cls._operators.get(op_type)
if op_cls is None:
raise OpNotSupportedError(
'{} not supported in utensor_cgen'.format(op_type)
)
return op_cls
@classmethod
def build_op_info(cls, *args, ugraph, op_type, name, **kwargs):
op_cls = cls._operators.get(op_type, None)
if op_cls is None:
err_msg = "unsupported op type in uTensor: {}".format(op_type)
raise OpNotSupportedError(err_msg)
return op_cls.build_op_info(ugraph, name, *args, **kwargs)
@classmethod
def register(cls, op_cls):
cls._operators[op_cls.op_type] = op_cls
return op_cls
@classmethod
def support_op_types(cls):
"""Return the set of all supported ops
"""
return set(cls._operators.keys())
@classmethod
def is_supported(cls, op_type):
if op_type != 'Placeholder' and op_type not in cls._operators:
return False
return True
class _Operator(object):
def __init__(self):
self.name = ""
self._snippet = None
@property
def snippet(self):
return self._snippet
@classmethod
def build_op_info(cls, ugraph, name, *args, **kwargs):
raise NotImplementedError('%s does not have build_op_info method' % cls)
@OperatorFactory.register
@OpEqualityDelegate.is_compatible_with("Inline", _morphism.Const2InlineMorphism)
class _ConstOperator(_Operator):
op_type = "Const"
def __init__(self, op_info, **kwargs):
out_tensor_info = op_info.output_tensors[0]
out_tname, out_dtype = (out_tensor_info.name,
out_tensor_info.dtype)
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_count = parser.get('ref_counts', [0])[0]
pre_tname = self._tf_prepare_tensor_name(out_tname)
idx_fname = "{}.idx".format(pre_tname)
idx_dir = kwargs['idx_dir']
embed_data_dir = kwargs.get('embed_data_dir',
os.path.join("/fs", idx_dir))
self._snippet = CreateTensorIdxSnippet(embed_data_dir, out_tname,
idx_fname=idx_fname,
np_dtype=out_dtype,
ref_count=ref_count)
idx_path = os.path.join(idx_dir, idx_fname)
value = op_info.op_attr['value'].value
self._tf_save_data(idx_path, value)
@classmethod
def build_op_info(cls, ugraph, name, value, **kwargs):
generic_value = GenericTensorConverterMixin.__utensor_generic_type__(
np_array=value
)
return OperationInfo(
name=name,
input_tensors=[],
output_tensors=[
TensorInfo(
name='{}:0'.format(name),
op_name=name,
dtype=value.dtype,
shape=list(value.shape),
ugraph=ugraph
)
],
op_type=cls.op_type,
op_attr={
'value': AttrValueConverter.__utensor_generic_type__(
value_name='tensor', value=generic_value
),
'dtype': AttrValueConverter.__utensor_generic_type__(
value_name='type', value=DataTypeConverter.get_tf_value(value.dtype)
)
},
ugraph=ugraph,
backend=kwargs.get('backend', 'tensorflow')
)
def _tf_prepare_tensor_name(self, tensor_name):
"""Replace all ':' and '/' with '_' in a given tensor name
"""
prepared = tensor_name.replace(":", "_").replace("/", "_")
return prepared
def _tf_save_data(self, path, value):
np_array = value.np_array
if np_array.shape == ():
np_array = np.array([np_array])
with open(path, "wb") as fid:
idx2np.convert_to_file(fid, np_array)
logger.info("saving %s", path)
@OperatorFactory.register
@OpEqualityDelegate.is_associative(
permutations=((0, 1), (1, 0))
)
class _AddOperator(_Operator):
op_type = "Add" # tf op type
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
output = op_info.output_tensors[0].name
tf_dtype = op_info.input_tensors[0].dtype
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_count = parser.get('ref_counts', [0])[0]
to_eval = parser.get('to_eval', False)
self._snippet = AddOpSnippet(inputs, output, tf_dtype, ref_count, to_eval)
@classmethod
def build_op_info(cls, ugraph, name, tensor_x, tensor_y, **kwargs):
# broadcast the shape and promote types
dummy_x = np.empty(tensor_x.shape)
dummy_y = np.empty(tensor_y.shape)
output_shape = np.broadcast(dummy_x, dummy_y).shape
output_dtype = np.promote_types(tensor_x.dtype, tensor_y.dtype)
return OperationInfo(
name=name,
input_tensors=[tensor_x, tensor_y],
output_tensors=[
TensorInfo(
name='{}:0'.format(name),
op_name=name,
dtype=output_dtype,
shape=list(output_shape),
ugraph=ugraph
)
],
op_type=cls.op_type,
op_attr={
'T': AttrValueConverter.__utensor_generic_type__(
value_name='type',
value=DataTypeConverter.get_tf_value(output_dtype)
)
},
ugraph=ugraph,
backend=kwargs.get('backend', 'tensorflow')
)
@OperatorFactory.register
class _ArgMaxOperator(_Operator):
op_type = "ArgMax"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
out_tensor_info = op_info.output_tensors[0]
output, out_dtype = out_tensor_info.name, out_tensor_info.dtype
in_dtype = op_info.input_tensors[0].dtype
data_manager = kwargs['data_manager']
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr,
data_manager,
op_info)
ref_count = parser.get('ref_counts', [0])[0]
to_eval = parser.get('to_eval', False)
address = parser.get('address', [])
self._snippet = ArgMaxOpSnippet(inputs, output, in_dtype, out_dtype, ref_count, to_eval, address)
@classmethod
def build_op_info(cls, ugraph, name, input_tensor, dtype=np.dtype('int64'), axis=0, **kwargs):
if isinstance(axis, int):
axis, = ugraph.add_op(
np.array(axis, dtype=np.dtype('int32')),
op_type='Const',
name='{}/axis'.format(name)
)
dummy_in = np.empty(input_tensor.shape, dtype=input_tensor.dtype)
graph = tf.Graph()
with graph.as_default():
dummy_out = tf.math.argmax(
dummy_in,
axis=axis.op.op_attr['value'].value.np_array,
name='dummy',
output_type=tf.as_dtype(dtype)
)
node_def = [node for node in graph.as_graph_def().node if node.name=='dummy'][0]
output_shape = dummy_out.shape.as_list()
op_attr = {
k: AttrValueConverter.get_generic_value(v)
for k, v in node_def.attr.items()
}
return OperationInfo(
name=name,
op_type=cls.op_type,
input_tensors=[input_tensor, axis],
output_tensors=[
TensorInfo(
name='{}:0'.format(name),
op_name=name,
dtype=dtype,
shape=output_shape,
ugraph=ugraph
)
],
op_attr=op_attr,
ugraph=ugraph,
backend=kwargs.get('backend', 'tensorflow')
)
@OperatorFactory.register
class _DequantizeOperator(_Operator):
op_type = "Dequantize"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
out_tensor_info = op_info.output_tensors[0]
data_manager = kwargs['data_manager']
output, out_dtype = out_tensor_info.name, out_tensor_info.dtype
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr,
data_manager,
op_info)
ref_count = parser.get('ref_counts', [0])[0]
to_eval = parser.get('to_eval', False)
address = parser.get('address', [])
self._snippet = DequantizeOpSnippet(inputs, output, out_dtype, ref_count, to_eval, address)
@OperatorFactory.register
class _MaxOperator(_Operator):
op_type = "Max"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
out_tensor_info = op_info.output_tensors[0]
data_manager = kwargs['data_manager']
output, out_dtype, out_shape = (out_tensor_info.name,
out_tensor_info.dtype,
out_tensor_info.shape)
# FIXME: automatic alloc for uTensor fail
if not out_shape:
out_shape = [1]
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr,
data_manager,
op_info)
ref_count = parser.get('ref_counts', [0])[0]
to_eval = parser.get('to_eval', False)
address = parser.get('address', [])
self._snippet = MaxOpSnippet(inputs, output, out_dtype, out_shape, ref_count, to_eval, address)
@classmethod
def build_op_info(cls, ugraph, name, tensor, axis=-1, keepdims=False, **kwargs):
if isinstance(axis, int):
axis, = ugraph.add_op(
np.array(axis, dtype=np.dtype('int32')),
op_type='Const',
name='{}/axis'.format(name)
)
dummy_in = np.empty(tensor.shape, dtype=tensor.dtype)
graph = tf.Graph()
with graph.as_default():
dummy_out = tf.reduce_max(
dummy_in,
axis=axis.op.op_attr['value'].value.np_array,
keepdims=keepdims,
name='dummy'
)
node_def = [node for node in graph.as_graph_def().node if node.name == 'dummy'][0]
return OperationInfo(
name=name,
input_tensors=[tensor, axis],
output_tensors=[
TensorInfo(
name='{}:0'.format(name),
op_name=name,
dtype=tensor.dtype,
shape=dummy_out.shape.as_list(),
ugraph=ugraph
)
],
op_type=cls.op_type,
op_attr={
k: AttrValueConverter.get_generic_value(v)
for k, v in node_def.attr.items()
},
backend=kwargs.get('backend', 'tensorflow'),
ugraph=ugraph
)
@OperatorFactory.register
class _MinOperator(_Operator):
op_type = "Min"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
out_info = op_info.output_tensors[0]
output, out_dtype, out_shape = (out_info.name,
out_info.dtype,
out_info.shape)
# FIXME: automatic alloc for uTensor fail
if not out_shape:
out_shape = [1]
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_count = parser.get('ref_counts', [0])[0]
to_eval = parser.get('to_eval', False)
self._snippet = MinOpSnippet(inputs, output, out_dtype, out_shape, ref_count, to_eval)
@classmethod
def build_op_info(cls, ugraph, name, tensor, axis=-1, keepdims=False, **kwargs):
if isinstance(axis, int):
axis, = ugraph.add_op(
np.array(axis, dtype=np.dtype('int32')),
op_type='Const',
name='{}/axis'.format(name)
)
dummy_in = np.empty(tensor.shape, dtype=tensor.dtype)
graph = tf.Graph()
with graph.as_default():
dummy_out = tf.reduce_min(
dummy_in,
axis=axis.op.op_attr['value'].value.np_array,
keepdims=keepdims,
name='dummy'
)
node_def = [node for node in graph.as_graph_def().node if node.name == 'dummy'][0]
output_shape = dummy_out.shape.as_list()
return OperationInfo(
name=name,
input_tensors=[tensor, axis],
output_tensors=[
TensorInfo(
name='{}:0'.format(name),
op_name=name,
dtype=tensor.dtype,
shape=output_shape,
ugraph=ugraph,
)
],
op_type=cls.op_type,
backend=kwargs.get('backend', 'tensorflow'),
ugraph=ugraph,
op_attr={
k: AttrValueConverter.get_generic_value(v)
for k, v in node_def.attr.items()
}
)
@OperatorFactory.register
class _MaxPool(_Operator):
op_type = "MaxPool"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
output = op_info.output_tensors[0].name
dtype = op_info.output_tensors[0].dtype
ksize = op_info.op_attr['ksize'].value.ints_value
strides = op_info.op_attr['strides'].value.ints_value
padding = op_info.op_attr['padding'].value.decode('utf8')
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_count = parser.get('ref_counts', [0])[0]
to_eval = parser.get('to_eval', False)
self._snippet = MaxPoolSnippet(inputs, output, dtype,
ksize, strides, padding,
ref_count, to_eval)
@classmethod
def build_op_info(
cls,
ugraph,
name,
tensor,
ksize_height,
ksize_width,
stride_height,
stride_width,
padding='SAME',
**kwargs
):
dummy_arr = np.empty(tensor.shape, dtype=tensor.dtype)
graph = tf.Graph()
with graph.as_default():
tf_tensor = tf.nn.max_pool(
dummy_arr,
ksize=[1, ksize_height, ksize_width, 1],
strides=[1, stride_height, stride_width, 1],
padding=padding,
name='dummy'
)
output_shape = tf_tensor.shape.as_list()
graph_def = graph.as_graph_def()
node_def = [node for node in graph_def.node if node.name == 'dummy'][0]
return OperationInfo(
name=name,
input_tensors=[tensor],
output_tensors=[
TensorInfo(
name='{}:0'.format(name),
op_name=name,
dtype=tensor.dtype,
shape=output_shape,
ugraph=ugraph
)
],
op_type=cls.op_type,
backend=kwargs.get('backend', 'tensorflow'),
ugraph=ugraph,
op_attr={
k: AttrValueConverter.get_generic_value(v)
for k, v in node_def.attr.items()
}
)
@OperatorFactory.register
class _QuantizedMaxPool(_Operator):
op_type = "QuantizedMaxPool"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
outputs = [tensor_info.name for tensor_info in op_info.output_tensors]
dtype = op_info.output_tensors[0].dtype
ksize = op_info.op_attr['ksize'].value.ints_value
strides = op_info.op_attr['strides'].value.ints_value
padding = op_info.op_attr['padding'].value.decode('utf8')
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_counts = parser.get('ref_counts', [])
to_eval = parser.get('to_eval', False)
self._snippet = QuantizedMaxPoolSnippet(inputs, outputs, dtype,
ksize, strides, padding,
ref_counts, to_eval)
@OperatorFactory.register
class _MinOperator(_Operator):
op_type = "Min"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
out_info = op_info.output_tensors[0]
data_manager = kwargs['data_manager']
output, out_dtype, out_shape = (out_info.name,
out_info.dtype,
out_info.shape)
# FIXME: automatic alloc for uTensor fail
if not out_shape:
out_shape = [1]
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr,
data_manager,
op_info)
ref_count = parser.get('ref_counts', [0])[0]
to_eval = parser.get('to_eval', False)
address = parser.get('address', [])
self._snippet = MinOpSnippet(inputs, output, out_dtype, out_shape, ref_count, to_eval, address)
@classmethod
def build_op_info(cls, ugraph, name, tensor, axis=-1, keepdims=False, **kwargs):
if isinstance(axis, int):
axis, = ugraph.add_op(
np.array(axis, dtype=np.dtype('int32')),
op_type='Const',
name='{}/axis'.format(name)
)
dummy_in = np.empty(tensor.shape, dtype=tensor.dtype)
graph = tf.Graph()
with graph.as_default():
dummy_out = tf.reduce_min(
dummy_in,
axis=axis.op.op_attr['value'].value.np_array,
keepdims=keepdims,
name='dummy'
)
node_def = [node for node in graph.as_graph_def().node if node.name == 'dummy'][0]
output_shape = dummy_out.shape.as_list()
return OperationInfo(
name=name,
input_tensors=[tensor, axis],
output_tensors=[
TensorInfo(
name='{}:0'.format(name),
op_name=name,
dtype=tensor.dtype,
shape=output_shape,
ugraph=ugraph,
)
],
op_type=cls.op_type,
backend=kwargs.get('backend', 'tensorflow'),
ugraph=ugraph,
op_attr={
k: AttrValueConverter.get_generic_value(v)
for k, v in node_def.attr.items()
}
)
@OperatorFactory.register
class _QuantizeV2Operator(_Operator):
op_type = "QuantizeV2"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
outputs = [tensor_info.name for tensor_info in op_info.output_tensors]
out_dtype = op_info.output_tensors[0].dtype
data_manager = kwargs['data_manager']
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr,
data_manager,
op_info)
ref_counts = parser.get('ref_counts', [])
to_eval = parser.get('to_eval', False)
address = parser.get('address', [])
self._snippet = QuantizeV2OpSnippet(inputs, outputs, out_dtype, ref_counts, to_eval, address)
@OperatorFactory.register
class _MatMulOperator(_Operator):
op_type = "MatMul"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
output = op_info.output_tensors[0].name
in_tensor_info = op_info.input_tensors[0]
x_dtype, w_dtype, out_dtype = (op_info.input_tensors[0].dtype,
op_info.input_tensors[1].dtype,
op_info.output_tensors[0].dtype)
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_count = parser.get('ref_counts', [0])[0]
to_eval = parser.get('to_eval', False)
self._snippet = MatMulOpSnippet(inputs, output,
x_dtype, w_dtype, out_dtype,
ref_count, to_eval)
@classmethod
def build_op_info(cls, ugraph, name, tensor_x, tensor_w, **kwargs):
dtype_x = tensor_x.dtype
dtype_w = tensor_w.dtype
out_dtype = np.promote_types(dtype_x, dtype_w)
if tensor_x.shape[-1] != tensor_w.shape[0]:
raise ValueError(
'dimension mismatch: {},{}'.format(tensor_x.shape, tensor_w.shape)
)
return OperationInfo(
name=name,
input_tensors=[
tensor_x, tensor_w
],
output_tensors=[
TensorInfo(
name='{}:0'.format(name),
op_name=name,
dtype=out_dtype,
shape=tensor_x.shape[:-1]+tensor_w.shape[1:],
ugraph=ugraph
)
],
op_type=cls.op_type,
op_attr={
'T': AttrValueConverter.__utensor_generic_type__(
value_name='type',
value=DataTypeConverter.get_tf_value(out_dtype)
),
'transpose_a': AttrValueConverter.__utensor_generic_type__(
value_name='b',
value=kwargs.get('transpose_x', False)
),
'transpose_b': AttrValueConverter.__utensor_generic_type__(
value_name='b',
value=kwargs.get('tranpose_w', False)
)
},
ugraph=ugraph,
backend=kwargs.get('backend', 'tensorflow')
)
@OperatorFactory.register
class _QuantizedMatMulOperator(_Operator):
op_type = "QuantizedMatMul"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
outputs = [tensor_info.name for tensor_info in op_info.output_tensors]
in_tensor_info = op_info.input_tensors[0]
x_dtype, w_dtype, out_dtype = (op_info.input_tensors[0].dtype,
op_info.input_tensors[1].dtype,
op_info.output_tensors[0].dtype)
data_manager = kwargs['data_manager']
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr,
data_manager,
op_info)
ref_counts = parser.get('ref_counts', [])
to_eval = parser.get('to_eval', False)
address = parser.get('address', [])
self._snippet = QuantizedMatMulOpSnippet(inputs, outputs,
x_dtype, w_dtype, out_dtype,
ref_counts, to_eval, address)
@OperatorFactory.register
class _ReluOperator(_Operator):
op_type = "Relu"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
output = op_info.output_tensors[0].name
in_dtype, out_dtype = (op_info.input_tensors[0].dtype,
op_info.output_tensors[0].dtype) #NT: why separate this out?
#DB: I don't know, it's in the uTensor C code
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_count = parser.get('ref_counts', [0])[0]
to_eval = parser.get('to_eval', False)
self._snippet = ReluOpSnippet(inputs, output, in_dtype,
out_dtype,
ref_count, to_eval)
@classmethod
def build_op_info(cls, ugraph, name, tensor, **kwargs):
return OperationInfo(
name=name,
input_tensors=[tensor],
output_tensors=[
TensorInfo(
name='{}:0'.format(name),
op_name=name,
dtype=tensor.dtype,
shape=tensor.shape[:],
ugraph=ugraph
)
],
op_type=cls.op_type,
op_attr={
'T': AttrValueConverter.__utensor_generic_type__(
value_name='type',
value=DataTypeConverter.get_tf_value(tensor.dtype)
)
},
ugraph=ugraph,
backend=kwargs.get('backend', 'tensorflow')
)
@OperatorFactory.register
class _QuantizedReluOperator(_Operator):
op_type = "QuantizedRelu"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
outputs = [tensor_info.name for tensor_info in op_info.output_tensors]
in_dtype, qout_dtype = (op_info.input_tensors[0].dtype,
op_info.output_tensors[0].dtype) #NT: why separate this out?
#DB: I don't know, it's in the uTensor C code
data_manager = kwargs['data_manager']
out_dtypes = [tensor_info.dtype for tensor_info in op_info.output_tensors[1:]]
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr,
data_manager,
op_info)
ref_counts = parser.get('ref_counts', [])
to_eval = parser.get('to_eval', False)
address = parser.get('address', [])
self._snippet = QuantizedReluOpSnippet(inputs, outputs, in_dtype,
out_dtypes, qout_dtype,
ref_counts, to_eval, address)
@OperatorFactory.register
class _QuantizedAddOperator(_Operator):
op_type = "QuantizedAdd"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
outputs = [tensor_info.name for tensor_info in op_info.output_tensors]
x_dtype, w_dtype, out_dtype = (op_info.input_tensors[0].dtype,
op_info.input_tensors[1].dtype,
op_info.output_tensors[0].dtype)
data_manager = kwargs['data_manager']
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr,
data_manager,
op_info)
ref_counts = parser.get('ref_counts', [])
to_eval = parser.get('to_eval', False)
address = parser.get('address', [])
self._snippet = QuantizedAddOpSnippet(inputs, outputs,
x_dtype, w_dtype, out_dtype,
ref_counts, to_eval, address)
@OperatorFactory.register
class _QuantizedMulOperator(_Operator):
op_type = "QuantizedMul"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
outputs = [tensor_info.name for tensor_info in op_info.output_tensors]
x_dtype, w_dtype, out_dtype = (op_info.input_tensors[0].dtype,
op_info.input_tensors[1].dtype,
op_info.output_tensors[0].dtype)
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_counts = parser.get('ref_counts', [])
to_eval = parser.get('to_eval', False)
self._snippet = QuantizedMulOpSnippet(inputs, outputs,
x_dtype, w_dtype, out_dtype,
ref_counts, to_eval)
@OperatorFactory.register
class _RequantizationRangeOperator(_Operator):
op_type = "RequantizationRange"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
outputs = [tensor_info.name for tensor_info in op_info.output_tensors]
out_dtype = op_info.output_tensors[0].dtype
data_manager = kwargs['data_manager']
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr,
data_manager,
op_info)
ref_counts = parser.get('ref_counts', [])
to_eval = parser.get('to_eval', False)
address = parser.get('address', [])
self._snippet = RequantizationRangeOpSnippet(inputs, outputs, out_dtype,
ref_counts, to_eval, address)
@OperatorFactory.register
class _RequantizeOperator(_Operator):
op_type = "Requantize"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
outputs = [tensor_info.name for tensor_info in op_info.output_tensors]
qout_dtype = op_info.output_tensors[0].dtype
range_dtype = op_info.output_tensors[1].dtype
data_manager = kwargs['data_manager']
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr,
data_manager,
op_info)
ref_counts = parser.get('ref_counts', [])
to_eval = parser.get('to_eval', False)
address = parser.get('address', [])
self._snippet = RequantizeOpSnippet(inputs, outputs,
qout_dtype, range_dtype,
ref_counts, to_eval, address)
@OperatorFactory.register
class _ReshapeOperator(_Operator):
op_type = "Reshape"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
output = op_info.output_tensors[0].name
data_manager = kwargs['data_manager']
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr,
data_manager,
op_info)
ref_count = parser.get('ref_counts', [0])[0]
to_eval = parser.get('to_eval', False)
address = parser.get('address', [])
dtype = op_info.input_tensors[0].dtype
self._snippet = ReshapeOpSnippet(inputs, output, dtype, ref_count, to_eval, address)
@OperatorFactory.register
class _QuantizedReshapeOperator(_Operator):
op_type = "QuantizedReshape"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
outputs = [tensor_info.name for tensor_info in op_info.output_tensors]
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_counts = parser.get('ref_counts', [])
to_eval = parser.get('to_eval', False)
self._snippet = QuantizedReshapeOpSnippet(inputs=inputs,
outputs=outputs,
ref_counts=ref_counts,
to_eval=to_eval)
@OperatorFactory.register
class _CMSIS_NN_FCOperator(_Operator):
op_type="CMSIS_NN_FC"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
#import pdb; pdb.set_trace()
# Note order of inputs/outputs is preserved
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
output = op_info.output_tensors[0].name
out_dtype = op_info.output_tensors[0].dtype
in_dtypes = [tensor_info.dtype for tensor_info in op_info.input_tensors]
assert (op_info.input_tensors[0].shape[1] == None or op_info.input_tensors[0].shape[1] == 1)
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_counts = parser.get('ref_counts', [])
to_eval = parser.get('to_eval', False)
self._snippet = CMSISNNFCOpSnippet(inputs=inputs,
output=output,
ref_counts=ref_counts,
in_dtypes=in_dtypes,
out_dtype=out_dtype,
to_eval=to_eval)
@OperatorFactory.register
class _Conv2DOperator(_Operator):
op_type = "Conv2D"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
output = op_info.output_tensors[0].name
in_dtype, filter_dtype = (op_info.input_tensors[0].dtype,
op_info.input_tensors[1].dtype)
out_dtype = op_info.output_tensors[0].dtype
strides = op_info.op_attr["strides"].value.ints_value
padding = op_info.op_attr["padding"].value.decode('utf8')
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_count = parser.get('ref_counts', [0])[0]
to_eval = parser.get('to_eval', False)
self._snippet = Conv2DOpSnippet(inputs, output, strides, padding,
in_dtype=in_dtype, filter_dtype=filter_dtype, out_dtype=out_dtype,
ref_count=ref_count, to_eval=to_eval)
@classmethod
def build_op_info(cls, ugraph, name, tensor_x, tensor_w, stride_height, stride_width, padding='SAME', **kwargs):
# dboy: I'm too lazy to implement the padding algorithm again
# simply call tf to find out the output shape
dummy_x = np.empty(tensor_x.shape, dtype=tensor_x.dtype)
dummy_w = np.empty(tensor_w.shape, dtype=tensor_w.dtype)
graph = tf.Graph()
with graph.as_default():
dummy_out = tf.nn.conv2d(
dummy_x,
dummy_w,
strides=[1, stride_height, stride_width, 1],
padding=padding,
name='dummy'
)
node_def = [node for node in graph.as_graph_def().node if node.name == 'dummy'][0]
output_shape = dummy_out.shape.as_list()
output_dtype = np.promote_types(tensor_x.dtype, tensor_w.dtype)
op_attr = {
k: AttrValueConverter.get_generic_value(v)
for k, v in node_def.attr.items()
}
return OperationInfo(
name=name,
input_tensors=[tensor_x, tensor_w],
output_tensors=[
TensorInfo(
name='{}:0'.format(name),
op_name=name,
dtype=output_dtype,
shape=output_shape,
ugraph=ugraph,
)
],
op_type=cls.op_type,
op_attr=op_attr,
ugraph=ugraph,
backend=kwargs.get('backend', 'tensorflow'),
)
@OperatorFactory.register
class _FusedConv2DMaxpoolOperator(_Operator):
op_type = "FusedConv2DMaxpool"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
output = op_info.output_tensors[0].name
in_dtype, filter_dtype = (op_info.input_tensors[0].dtype,
op_info.input_tensors[1].dtype)
out_dtype = op_info.output_tensors[0].dtype
strides = op_info.op_attr["strides"].value.ints_value
ksize = op_info.op_attr["ksize"].value.ints_value
padding = op_info.op_attr["padding"].value.decode('utf8')
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_count = parser.get('ref_counts', [0])[0]
to_eval = parser.get('to_eval', False)
self._snippet = FusedConv2DMaxpoolOpSnippet(inputs, output, strides, ksize, padding,
in_dtype=in_dtype, filter_dtype=filter_dtype, out_dtype=out_dtype,
ref_count=ref_count, to_eval=to_eval)
@OperatorFactory.register
class _QuantizedFusedConv2DMaxpoolOperator(_Operator):
op_type = "QuantizedFusedConv2DMaxpool"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
outputs = [tensor_info.name for tensor_info in op_info.output_tensors]
in_dtype, filter_dtype = (op_info.input_tensors[0].dtype,
op_info.input_tensors[1].dtype)
out_dtypes = [tensor_info.dtype for tensor_info in op_info.output_tensors]
strides = op_info.op_attr['_utensor_conv']["strides"].value.ints_value
ksize = op_info.op_attr['_utensor_pool']["ksize"].value.ints_value
padding = op_info.op_attr['_utensor_conv']["padding"].value.decode('utf8')
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_counts = parser.get('ref_counts', None)
to_eval = parser.get('to_eval', False)
self._snippet = QuantizedFusedConv2DMaxpoolOpSnippet(
inputs, outputs, strides, ksize, padding,
in_dtype=in_dtype, filter_dtype=filter_dtype, out_dtypes=out_dtypes,
ref_counts=ref_counts, to_eval=to_eval
)
@OperatorFactory.register
class _Conv2DQuantOperator(_Operator):
op_type = "QuantizedConv2D"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
outputs = [tensor_info.name for tensor_info in op_info.output_tensors]
in_dtype, filter_dtype = (op_info.input_tensors[0].dtype,
op_info.input_tensors[1].dtype)
out_dtypes = [tensor_info.dtype for tensor_info in op_info.output_tensors]
strides = op_info.op_attr["strides"].value.ints_value
padding = op_info.op_attr["padding"].value.decode('utf8')
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_counts = parser.get('ref_counts', [])
to_eval = parser.get('to_eval', False)
self._snippet = Conv2DQuantOpSnippet(inputs, outputs, strides, padding,
in_dtype=in_dtype, filter_dtype=filter_dtype, out_dtypes=out_dtypes,
ref_counts=ref_counts, to_eval=to_eval)
@OperatorFactory.register
class _Uint8Q7OriginOperator(_Operator):
op_type = "Uint8Q7OriginOp"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
output = op_info.output_tensors[0].name
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_count = parser.get('ref_counts', [0])[0]
to_eval = parser.get('to_eval', False)
self._snippet = Uint8Q7OriginSnippet(inputs, output, ref_count, to_eval)
#hard coding to uint8_t uint8_t int32_t for now
@OperatorFactory.register
class _QuantRangeForMultiplication_u8_u8_int32_Operator(_Operator):
op_type = "QuantRangeForMultiplicationu8u8int32Op"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
outputs = [tensor_info.name for tensor_info in op_info.output_tensors]
if op_info.output_tensors[0].dtype != op_info.output_tensors[1].dtype:
assert "output tensors must have the same data type"
#output_type = op_info.output_tensors[0].dtype
#FIXME: hard coding the output to int32 for now
output_type = np.dtype([('qint32', '<i4')])
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_counts = parser.get('ref_counts', [])
to_eval = parser.get('to_eval', False)
self._snippet = QuantRangeForMultiplicationSnippet(inputs, outputs, output_type, ref_counts, to_eval)
@OperatorFactory.register
@OpEqualityDelegate.is_compatible_with("Const", _morphism.Inline2ConstMorphism)
class _InlineOperator(_Operator):
op_type = "Inline"
def __init__(self, op_info, **kwargs):
out_tensor_info = op_info.output_tensors[0]
out_tname, out_dtype, tensor_shape = (out_tensor_info.name,
out_tensor_info.dtype,
out_tensor_info.shape)
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_count = parser.get('ref_counts', [0])[0]
pre_tname = self._prepare_tensor_name(out_tname)
inline_tname = self._prepare_inline_array_name(out_tname)
value = op_info.op_attr['value'].value.np_array.flatten()
self._snippet = CreateTensorBinarySnippet(out_tname, tensor_shape=tensor_shape,
tf_dtype=out_dtype,
sptr_name=pre_tname,
inline_name=inline_tname,
ref_count=ref_count)
weight_snippet = WeightSnippet(inline_tname,
out_dtype,
tensor_shape,
value)
weight_container = kwargs['weight_container']
weight_container.add_snippet(weight_snippet)
def _prepare_tensor_name(self, tensor_name):
prepared = tensor_name.replace(":", "_").replace("/", "_")
return prepared
def _prepare_inline_array_name(self, tensor_name):
inline = tensor_name.replace(":", "_").replace("/", "_")
preapred = "inline_{}".format(inline)
return preapred
@OperatorFactory.register
class _RamOperator(_Operator):
op_type = "Ram"
def __init__(self, op_info, **kwargs):
out_tensor_info = op_info.output_tensors[0]
out_tname, out_dtype, tensor_shape = (out_tensor_info.name,
out_tensor_info.dtype,
out_tensor_info.shape)
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_count = parser.get('ref_counts', [0])[0]
pre_tname = self._prepare_tensor_name(out_tname)
#inline_tname = self._prepare_inline_array_name(out_tname)
#value = op_info.op_attr['value'].value.np_array.flatten()
self._snippet = CreateTensorRamSnippet(out_tname, tensor_shape=tensor_shape,
tf_dtype=out_dtype,
sptr_name=pre_tname,
ref_count=ref_count)
def _prepare_tensor_name(self, tensor_name):
prepared = tensor_name.replace(":", "_").replace("/", "_")
return prepared
@OperatorFactory.register
class _ShapeOperator(_Operator):
op_type = "Shape"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
output = op_info.output_tensors[0].name
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_count = parser.get('ref_counts', [0])[0]
to_eval = parser.get('to_eval', True)
out_dtype = op_info.output_tensors[0].dtype
self._snippet = ShapeOpSnippet(inputs, output, out_dtype, ref_count, to_eval)
@OperatorFactory.register
class _StridedSliceOperator(_Operator):
op_type = "StridedSlice"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
output = op_info.output_tensors[0].name
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_count = parser.get('ref_counts', [0])[0]
to_eval = parser.get('to_eval', True)
dtype = op_info.input_tensors[0].dtype
out_dtype = op_info.output_tensors[0].dtype
begin_mask = op_info.op_attr['begin_mask'].value
ellipsis_mask = op_info.op_attr['ellipsis_mask'].value
end_mask = op_info.op_attr['end_mask'].value
new_axis_mask = op_info.op_attr['begin_mask'].value
shrink_axis_mask = op_info.op_attr['shrink_axis_mask'].value
self._snippet = StridedSliceOpSnippet(inputs, output, dtype, out_dtype,
begin_mask, ellipsis_mask, end_mask,
new_axis_mask, shrink_axis_mask,
ref_count, to_eval)
@OperatorFactory.register
class _PackOperator(_Operator):
op_type = "Pack"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
output = op_info.output_tensors[0].name
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_count = parser.get('ref_counts', [0])[0]
to_eval = parser.get('to_eval', True)
dtype = op_info.input_tensors[0].dtype
out_dtype = op_info.output_tensors[0].dtype
N = op_info.op_attr['N'].value
axis = op_info.op_attr['axis'].value
self._snippet = PackOpSnippet(inputs, output, dtype, out_dtype, N, axis, ref_count, to_eval)
@OperatorFactory.register
class _SoftmaxOperator(_Operator):
# NOTE: softmax in tf is a composite op, no trivial way
# to construct the op_info if we want to support
# tf quantization for softmax op. We simply just
# support uTensor softmax only.
op_type = "Softmax"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
input_tname = op_info.input_tensors[0].name
output_tname = op_info.output_tensors[0].name
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_count = parser.get('ref_counts', [0])[0]
to_eval = parser.get('to_eval', True)
out_dtype = op_info.output_tensors[0].dtype
in_dtype = op_info.input_tensors[0].dtype
self._snippet = SoftmaxOpSnippet(
input_tname,
output_tname,
in_dtype,
out_dtype,
ref_count,
to_eval
)
@OperatorFactory.register
class _GatherOperator(_Operator):
op_type = "Gather" # tf op type
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
output = op_info.output_tensors[0].name
tf_dtype = op_info.input_tensors[0].dtype
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_count = parser.get('ref_counts', [0])[0]
to_eval = parser.get('to_eval', False)
self._snippet = GatherOpSnippet(inputs, output, tf_dtype, ref_count, to_eval)
|
{"hexsha": "4f7f939a65964f9c1af988a3620892d451aa3135", "size": 46951, "ext": "py", "lang": "Python", "max_stars_repo_path": "utensor_cgen/backend/operators.py", "max_stars_repo_name": "dboyliao/utensor_cgen", "max_stars_repo_head_hexsha": "aacd3adf4ee2a521a8eb2e75807fe3c1c0d1e1e5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-12-29T17:40:49.000Z", "max_stars_repo_stars_event_max_datetime": "2017-12-29T17:40:49.000Z", "max_issues_repo_path": "utensor_cgen/backend/operators.py", "max_issues_repo_name": "dboyliao/utensor_cgen", "max_issues_repo_head_hexsha": "aacd3adf4ee2a521a8eb2e75807fe3c1c0d1e1e5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-12-28T02:25:45.000Z", "max_issues_repo_issues_event_max_datetime": "2017-12-28T02:25:45.000Z", "max_forks_repo_path": "utensor_cgen/backend/operators.py", "max_forks_repo_name": "dboyliao/utensor_cgen", "max_forks_repo_head_hexsha": "aacd3adf4ee2a521a8eb2e75807fe3c1c0d1e1e5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2017-12-27T17:15:38.000Z", "max_forks_repo_forks_event_max_datetime": "2017-12-29T06:43:00.000Z", "avg_line_length": 37.3219395866, "max_line_length": 114, "alphanum_fraction": 0.6291878767, "include": true, "reason": "import numpy", "num_tokens": 10908}
|
# Tetracene exemaple
from kimonet.system.generators import regular_system, crystal_system
from kimonet.analysis import Trajectory, visualize_system, TrajectoryAnalysis, plot_polar_plot
from kimonet import system_test_info
from kimonet.system.molecule import Molecule
from kimonet.system.state import State
from kimonet.core.processes.couplings import forster_coupling
from kimonet.core.processes.decays import einstein_radiative_decay
from kimonet.core.processes.types import GoldenRule, DecayRate, DirectRate, SimpleRate
from kimonet.system.vibrations import MarcusModel, LevichJortnerModel, EmpiricalModel
from kimonet.core.processes.transitions import Transition
from kimonet import calculate_kmc, calculate_kmc_parallel, calculate_kmc_parallel_py2
from kimonet.system.state import ground_state as gs
from kimonet.utils import old_distance_between_molecules, distance_vector_periodic
from kimonet.fileio import store_trajectory_list, load_trajectory_list
import numpy as np
# states list
s1 = State(label='s1', energy=2.9705, multiplicity=1, size=1)
tt = State(label='tt', energy=2.0, multiplicity=1, size=2, connected_distance=8)
t1 = State(label='t1', energy=1.5, multiplicity=3, size=1)
# transition moments
transition_moment = {Transition(s1, gs): [0.1, 0.0]}
def direction_transfer(initial, final, rate_constant=1):
"""
Allows transfer along a single direction
:param initial:
:param final:
:param rate_constant:
:return:
"""
r = initial[0].get_center().get_coordinates() - initial[1].get_center().get_coordinates()
dot = np.dot(r, initial[0].get_center().get_orientation_vector())
#if np.abs(dot) > 0.1:
# rate_constant = 0
return rate_constant
def electronic_coupling_direction(initial, final, couplings=None):
"""
Allows transfer along a single direction
:param initial: initial states list
:param final: final states list
:param couplings: coupling list
:return: rate constant
"""
r_vector = initial[1].get_center().get_coordinates() - initial[0].get_center().get_coordinates()
cell_incr = initial[0].cell_state - final[0].cell_state
r = distance_vector_periodic(r_vector, initial[0].supercell, cell_incr)
norm = np.linalg.norm(r)
dot_a = np.abs(np.dot(r, [1, 0]))/np.linalg.norm([1, 0])/norm
dot_ab_1 = np.abs(np.dot(r, [1, 1]))/np.linalg.norm([1, 1])/norm
dot_ab_2 = np.abs(np.dot(r, [1, -1]))/np.linalg.norm([1, -1])/norm
dot_b = np.abs(np.dot(r, [0, 1]))/np.linalg.norm([0, 1])/norm
ichosen = int(np.argmax([dot_a, dot_ab_1, dot_ab_2, dot_b]))
return couplings[ichosen] # eV
# Electronic couplings in eV for the closest neighbor molecule in the indicated direction
singlet_couplings = [12.61e-3, # a
41.85e-3, # ab
41.85e-3, # ab
27.51e-3] # b
triplet_couplings = [0.0e-3, # a
7.2e-3, # ab
7.2e-3, # ab
1.2e-3] # b
# Vibrations
vibrational_model = MarcusModel(reorganization_energies={(gs, s1): 0.07,
(s1, gs): 0.07,
(gs, t1): 0.07, # assuming triplet same reorganization
(t1, gs): 0.07}, # energy as singlet
temperature=300)
#################################################################################
# 2D model (plane a-b) , not diffusion in C
molecule = Molecule()
system = crystal_system(molecules=[molecule, molecule], # molecule to use as reference
scaled_site_coordinates=[[0.0, 0.0],
[0.5, 0.5]],
unitcell=[[7.3347, 0.0000],
[-0.2242, 6.0167]],
dimensions=[4, 4], # supercell size
orientations=[[0.0, 0.0, np.pi/8],
[0.0, 0.0, -np.pi/8]]) # if element is None then random, if list then Rx Ry Rz
system.cutoff_radius = 8.1 # Angstroms
# Transport
system.process_scheme = [GoldenRule(initial_states=(s1, gs), final_states=(gs, s1),
electronic_coupling_function=electronic_coupling_direction,
arguments={'couplings': singlet_couplings},
vibrations=vibrational_model,
description='singlet transport'),
GoldenRule(initial_states=(t1, gs), final_states=(gs, t1),
electronic_coupling_function=electronic_coupling_direction,
arguments={'couplings': triplet_couplings},
vibrations=vibrational_model,
description='triplet transport'),
# Transitions
SimpleRate(initial_states=(s1, gs), final_states=(tt,),
rate_constant=8.3, # ns^-1
description='singlet fission'),
SimpleRate(initial_states=(tt,), final_states=(gs, s1),
rate_constant=1.0, # ns^-1
description='triplet fusion'),
SimpleRate(initial_states=(tt,), final_states=(t1, t1),
rate_constant=2.0, # ns^-1
description='triplet dissociation'),
# Decays
SimpleRate(initial_states=(s1,), final_states=(gs,),
rate_constant=8e-2, # ns^-1
description='Singlet decay '),
SimpleRate(initial_states=(t1,), final_states=(gs,),
rate_constant=1.6e-5, # ns^-1
description='Triplet decay'),
SimpleRate(initial_states=(t1, t1), final_states=(s1, gs),
rate_constant=1.8e-6, # ns^-1
description='triplet-triplet annihilation'),
#SimpleRate(initial_states=(s1, s1), final_states=(gs, gs),
# rate_constant=1,
# description='singlet-singlet annihilation'),
#SimpleRate(initial_states=(t1, s1), final_states=(gs, gs),
# rate_constant=100,
# description='singlet-triplet annihilation'),
]
np.random.seed(0)
system.add_excitation_random(s1, 2)
system_test_info(system)
visualize_system(system)
# exit()
trajectories = calculate_kmc(system,
num_trajectories=50, # number of trajectories that will be simulated
max_steps=1000, # maximum number of steps for trajectory allowed
silent=False)
store_trajectory_list(trajectories, 'singlet_fission.h5')
#trajectories = load_trajectory_list('singlet_fission.h5')
analysis = TrajectoryAnalysis(trajectories)
for s in ['s1', 't1']:
print('STATE: ', s)
print('diffusion coefficient (average): {} angs^2/ns'.format(analysis.diffusion_coefficient(s)))
print('lifetime: {} ns'.format(analysis.lifetime(s)))
print('diffusion length: {} angs'.format(analysis.diffusion_length(s)))
print('diffusion tensor')
print(analysis.diffusion_coeff_tensor(s))
print(analysis.diffusion_coeff_tensor(s, unit_cell=system.supercell))
print('diffusion length tensor')
print(analysis.diffusion_length_square_tensor(s))
print(analysis.diffusion_length_square_tensor(s, unit_cell=system.supercell))
# print(np.sqrt(analysis.diffusion_coeff_tensor()*analysis.lifetime()*2))
plt = analysis.plot_2d(state=s)
plt.figure()
analysis.plot_distances(state=s)
plt.show()
plot_polar_plot(analysis.diffusion_coeff_tensor(s))
plot_polar_plot(analysis.diffusion_coeff_tensor(s, unit_cell=system.supercell))
for s in ['s1', 't1', 'tt']:
analysis.plot_exciton_density(state=s)
plt = analysis.plot_exciton_density()
plt.show()
|
{"hexsha": "3761df82bb82ef6760397869453f57fd0da89c9e", "size": 8456, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/singlet_fission.py", "max_stars_repo_name": "abelcarreras/kimonet", "max_stars_repo_head_hexsha": "1b3b3af8f6a67ed5d99990e54d295f395ad4b994", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-02T17:18:23.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-02T17:18:23.000Z", "max_issues_repo_path": "scripts/singlet_fission.py", "max_issues_repo_name": "abelcarreras/kimonet", "max_issues_repo_head_hexsha": "1b3b3af8f6a67ed5d99990e54d295f395ad4b994", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/singlet_fission.py", "max_forks_repo_name": "abelcarreras/kimonet", "max_forks_repo_head_hexsha": "1b3b3af8f6a67ed5d99990e54d295f395ad4b994", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-09-16T09:03:48.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-21T07:10:05.000Z", "avg_line_length": 44.7407407407, "max_line_length": 117, "alphanum_fraction": 0.574385052, "include": true, "reason": "import numpy", "num_tokens": 1952}
|
import numpy as np
from xray import Dataset, Variable, Coordinate
from xray.core import indexing, variable
from . import TestCase, ReturnItem
class TestIndexers(TestCase):
def set_to_zero(self, x, i):
x = x.copy()
x[i] = 0
return x
def test_expanded_indexer(self):
x = np.random.randn(10, 11, 12, 13, 14)
y = np.arange(5)
I = ReturnItem()
for i in [I[:], I[...], I[0, :, 10], I[..., 10], I[:5, ..., 0],
I[..., 0, ...], I[y], I[y, y], I[..., y, y],
I[..., 0, 1, 2, 3, 4]]:
j = indexing.expanded_indexer(i, x.ndim)
self.assertArrayEqual(x[i], x[j])
self.assertArrayEqual(self.set_to_zero(x, i),
self.set_to_zero(x, j))
with self.assertRaisesRegexp(IndexError, 'too many indices'):
indexing.expanded_indexer(I[1, 2, 3], 2)
def test_orthogonal_indexer(self):
x = np.random.randn(10, 11, 12, 13, 14)
y = np.arange(5)
I = ReturnItem()
# orthogonal and numpy indexing should be equivalent, because we only
# use at most one array and it never in between two slice objects
# (i.e., we try to avoid numpy's mind-boggling "partial indexing"
# http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html)
for i in [I[:], I[0], I[0, 0], I[:5], I[2:5], I[2:5:-1], I[:3, :4],
I[:3, 0, :4], I[:3, 0, :4, 0], I[y], I[:, y], I[0, y],
I[:2, :3, y], I[0, y, :, :4, 0]]:
j = indexing.orthogonal_indexer(i, x.shape)
self.assertArrayEqual(x[i], x[j])
self.assertArrayEqual(self.set_to_zero(x, i),
self.set_to_zero(x, j))
# for more complicated cases, check orthogonal indexing is still
# equivalent to slicing
z = np.arange(2, 8, 2)
for i, j, shape in [
(I[y, y], I[:5, :5], (5, 5, 12, 13, 14)),
(I[y, z], I[:5, 2:8:2], (5, 3, 12, 13, 14)),
(I[0, y, y], I[0, :5, :5], (5, 5, 13, 14)),
(I[y, 0, z], I[:5, 0, 2:8:2], (5, 3, 13, 14)),
(I[y, :, z], I[:5, :, 2:8:2], (5, 11, 3, 13, 14)),
(I[0, :2, y, y, 0], I[0, :2, :5, :5, 0], (2, 5, 5)),
(I[0, :, y, :, 0], I[0, :, :5, :, 0], (11, 5, 13)),
(I[:, :, y, :, 0], I[:, :, :5, :, 0], (10, 11, 5, 13)),
(I[:, :, y, z, :], I[:, :, :5, 2:8:2], (10, 11, 5, 3, 14))]:
k = indexing.orthogonal_indexer(i, x.shape)
self.assertEqual(shape, x[k].shape)
self.assertArrayEqual(x[j], x[k])
self.assertArrayEqual(self.set_to_zero(x, j),
self.set_to_zero(x, k))
# standard numpy (non-orthogonal) indexing doesn't work anymore
with self.assertRaisesRegexp(ValueError, 'only supports 1d'):
indexing.orthogonal_indexer(x > 0, x.shape)
with self.assertRaisesRegexp(ValueError, 'invalid subkey'):
print(indexing.orthogonal_indexer((1.5 * y, 1.5 * y), x.shape))
def test_convert_label_indexer(self):
# TODO: add tests that aren't just for edge cases
coord = Coordinate('x', [1, 2, 3])
with self.assertRaisesRegexp(ValueError, 'not all values found'):
indexing.convert_label_indexer(coord, [0])
with self.assertRaises(KeyError):
indexing.convert_label_indexer(coord, 0)
def test_remap_label_indexers(self):
# TODO: fill in more tests!
data = Dataset({'x': ('x', [1, 2, 3])})
test_indexer = lambda x: indexing.remap_label_indexers(data, {'x': x})
self.assertEqual({'x': 0}, test_indexer(1))
self.assertEqual({'x': 0}, test_indexer(np.int32(1)))
self.assertEqual({'x': 0}, test_indexer(Variable([], 1)))
class TestLazyArray(TestCase):
def test_slice_slice(self):
I = ReturnItem()
x = np.arange(100)
slices = [I[:3], I[:4], I[2:4], I[:1], I[:-1], I[5:-1], I[-5:-1],
I[::-1], I[5::-1], I[:3:-1], I[:30:-1], I[10:4:], I[::4],
I[4:4:4], I[:4:-4]]
for i in slices:
for j in slices:
expected = x[i][j]
new_slice = indexing.slice_slice(i, j, size=100)
actual = x[new_slice]
self.assertArrayEqual(expected, actual)
def test_lazily_indexed_array(self):
x = variable.NumpyArrayAdapter(np.random.rand(10, 20, 30))
lazy = indexing.LazilyIndexedArray(x)
I = ReturnItem()
# test orthogonally applied indexers
indexers = [I[:], 0, -2, I[:3], [0, 1, 2, 3], np.arange(10) < 5]
for i in indexers:
for j in indexers:
for k in indexers:
expected = np.asarray(x[i, j, k])
for actual in [lazy[i, j, k],
lazy[:, j, k][i],
lazy[:, :, k][:, j][i]]:
self.assertEqual(expected.shape, actual.shape)
self.assertArrayEqual(expected, actual)
# test sequentially applied indexers
indexers = [(3, 2), (I[:], 0), (I[:2], -1), (I[:4], [0]), ([4, 5], 0),
([0, 1, 2], [0, 1]), ([0, 3, 5], I[:2])]
for i, j in indexers:
expected = np.asarray(x[i][j])
actual = lazy[i][j]
self.assertEqual(expected.shape, actual.shape)
self.assertArrayEqual(expected, actual)
|
{"hexsha": "22809669a3e045bab7f0d7101cb0f0ff5e4a44c8", "size": 5589, "ext": "py", "lang": "Python", "max_stars_repo_path": "xray/test/test_indexing.py", "max_stars_repo_name": "eriknw/xray", "max_stars_repo_head_hexsha": "19df8d202b1d8054019e7e42365c67cdde6ff448", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "xray/test/test_indexing.py", "max_issues_repo_name": "eriknw/xray", "max_issues_repo_head_hexsha": "19df8d202b1d8054019e7e42365c67cdde6ff448", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "xray/test/test_indexing.py", "max_forks_repo_name": "eriknw/xray", "max_forks_repo_head_hexsha": "19df8d202b1d8054019e7e42365c67cdde6ff448", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-05T00:19:02.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-05T00:19:02.000Z", "avg_line_length": 46.1900826446, "max_line_length": 78, "alphanum_fraction": 0.4907854715, "include": true, "reason": "import numpy", "num_tokens": 1686}
|
[STATEMENT]
lemma differing_vars_subst [rule_format]:
assumes dom\<sigma>: "dom \<sigma> \<supseteq> differing_vars mem\<^sub>1 mem\<^sub>2"
shows "mem\<^sub>1 [\<mapsto> \<sigma>] = mem\<^sub>2 [\<mapsto> \<sigma>]"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. subst \<sigma> mem\<^sub>1 = subst \<sigma> mem\<^sub>2
[PROOF STEP]
proof (rule ext)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. subst \<sigma> mem\<^sub>1 x = subst \<sigma> mem\<^sub>2 x
[PROOF STEP]
fix x
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. subst \<sigma> mem\<^sub>1 x = subst \<sigma> mem\<^sub>2 x
[PROOF STEP]
from dom\<sigma>
[PROOF STATE]
proof (chain)
picking this:
differing_vars mem\<^sub>1 mem\<^sub>2 \<subseteq> dom \<sigma>
[PROOF STEP]
show "mem\<^sub>1 [\<mapsto> \<sigma>] x = mem\<^sub>2 [\<mapsto> \<sigma>] x"
[PROOF STATE]
proof (prove)
using this:
differing_vars mem\<^sub>1 mem\<^sub>2 \<subseteq> dom \<sigma>
goal (1 subgoal):
1. subst \<sigma> mem\<^sub>1 x = subst \<sigma> mem\<^sub>2 x
[PROOF STEP]
unfolding subst_def differing_vars_def
[PROOF STATE]
proof (prove)
using this:
{x. mem\<^sub>1 x \<noteq> mem\<^sub>2 x} \<subseteq> dom \<sigma>
goal (1 subgoal):
1. (case \<sigma> x of None \<Rightarrow> mem\<^sub>1 x | Some v \<Rightarrow> v) = (case \<sigma> x of None \<Rightarrow> mem\<^sub>2 x | Some v \<Rightarrow> v)
[PROOF STEP]
by (cases "\<sigma> x", auto)
[PROOF STATE]
proof (state)
this:
subst \<sigma> mem\<^sub>1 x = subst \<sigma> mem\<^sub>2 x
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 621, "file": "SIFUM_Type_Systems_Compositionality", "length": 7}
|
import pandas as pd
import numpy as np
import math
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_array, check_is_fitted
def fourier_sin_encoding(series, max_val, n=1):
return np.sin((series / max_val) * (2 * math.pi * n))
def fourier_cos_encoding(series, max_val, n=1):
return np.cos((series / max_val) * (2 * math.pi * n))
class DateFourier(BaseEstimator, TransformerMixin):
""" A transformer that adds fourier terms for a given date period.
For each passed in date column this transformer will compute
a number of columns for the fourier series based on the passed in
periods.
Parameters
----------
cols : List[str], default=None
A list of the columns that should be transformed. If it is None
then all detected datetime columns will be transformed.
periods: List[{'dayfoweek', 'dayofmonth', 'dayofyear', 'monthofyear'}],
default=['dayofyear']
List of the different periods we want to capture with fourier terms.
fourier : int > 0
Integer representing how many terms of the fourier series
should be added.
drop_base_cols: boolean, default=True
if true the base datetime columns will be dropped.
Attributes
----------
n_features_ : int
The number of features of the data passed to :meth:`fit`.
"""
def __init__(self,
cols=None,
fourier=1,
periods=['dayofyear'],
drop_base_cols=True):
self.cols = cols
self.fourier = fourier
self.periods = periods
self.drop_base_cols = drop_base_cols
# check to make sure periods are valid
valid_periods = [
'dayofweek',
'dayofmonth',
'dayofyear',
'monthofyear',
# 'minuteofday', # TODO add functionality for time of day
]
if not all([x in valid_periods for x in self.periods]):
raise ValueError(
'Periods must be a subset of {}'.format(valid_periods))
def fit(self, X, y=None):
"""Check data and if cols=None select all datetime columns.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The training input samples.
y : None
There is no need of a target in a transformer, yet the pipeline API
requires this parameter.
Returns
-------
self : object
Returns self.
"""
# Check input
#X = check_array(X, accept_sparse=True, dtype=None)
self.n_features_ = X.shape[1]
if type(X) != pd.DataFrame:
raise TypeError('This Transformer currently only accepts pandas \
dataframes as inputs')
# If no columns are passed we identify any
# datetime columns for transformation
if self.cols is None:
self.cols = X.select_dtypes(
include=[np.datetime64]).columns.tolist()
return self
def transform(self, X):
""" Add Fourier series columns to datetframe.
Parameters
----------
X : {array-like, sparse-matrix}, shape (n_samples, n_features)
The input samples.
Returns
-------
X_transformed : array, shape (n_samples, n_features)
The array containing the element-wise square roots of the values
in ``X``.
"""
# Check is fit had been called
check_is_fitted(self, 'n_features_')
# Input validation
#X = check_array(X, accept_sparse=True, dtype=None)
df = X
for col in self.cols:
cyclical_cols = {}
if 'dayofweek' in self.periods:
df['{}_day_of_week'.format(col)] = df[col].dt.dayofweek
cyclical_cols['{}_day_of_week'.format(col)] = 7
if 'dayofmonth' in self.periods:
df['{}_day_of_month'.format(col)] = df[col].dt.day
cyclical_cols['{}_day_of_month'.format(col)] = 31
if 'dayofyear' in self.periods:
df['{}_day_of_year'.format(col)] = df[col].dt.dayofyear
cyclical_cols['{}_day_of_year'.format(col)] = 365
if 'monthofyear' in self.periods:
df['{}_month'.format(col)] = df[col].dt.month
cyclical_cols['{}_month'.format(col)] = 12
for fcol, max_val in cyclical_cols.items():
for i in range(1, self.fourier + 1):
df[fcol + '_fourier_sin_' +
str(i)] = fourier_sin_encoding(df[fcol], max_val, n=i)
df[fcol + '_fourier_cos_' +
str(i)] = fourier_cos_encoding(df[fcol], max_val, n=i)
df = df.drop(list(cyclical_cols.keys()), axis='columns')
if self.drop_base_cols:
df = df.drop(self.cols, axis='columns')
return df
|
{"hexsha": "e1a3d65209832299054ad0d18a4ae59e0c7f15ba", "size": 5028, "ext": "py", "lang": "Python", "max_stars_repo_path": "skautobots/date_processing/_date_fourier.py", "max_stars_repo_name": "ccastleberry/sk-autobots", "max_stars_repo_head_hexsha": "7aaee0faa6383fbefaef1509cfc87010d2c58b84", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "skautobots/date_processing/_date_fourier.py", "max_issues_repo_name": "ccastleberry/sk-autobots", "max_issues_repo_head_hexsha": "7aaee0faa6383fbefaef1509cfc87010d2c58b84", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-04-24T02:40:26.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-24T03:30:58.000Z", "max_forks_repo_path": "skautobots/date_processing/_date_fourier.py", "max_forks_repo_name": "ccastleberry/sk-autobots", "max_forks_repo_head_hexsha": "7aaee0faa6383fbefaef1509cfc87010d2c58b84", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.1608391608, "max_line_length": 79, "alphanum_fraction": 0.576372315, "include": true, "reason": "import numpy", "num_tokens": 1141}
|
A little email etiquette goes a long way. See also UCD Classroom Etiquette.
From (undergraduate) student to instructor:
Start the email with Dear Prof. Smith or wiki:wikipedia Indiana Jones Dear Dr. Jones, as appropriate unless they have told you otherwise. Some faculty, particularly in the sciences, dont mind being called by their first names, but it never hurts to be more formal from the start. Do not refer to your instructor as Mr. or Ms. and definitely not as Mrs. (It is offensive to many women if you make assumptions about their marital status, and even if a woman is married she may prefer Ms.).
Indicate which class you are in. Many faculty teach more than one class. Dont make them go check their roster.
Use proper grammar, punctuation, and spelling, to the best of your abilities. This is not the time for u 2 show ur prof how kewl u r. They wont get it and they will think youre either disrespectful or not very smart.
Dont expect an instantaneous reply, especially if you send late at night. But within 24 hours is reasonable.
If you have a question that requires a long answer, go to office hours. Some things are just too hard to explain over email.
Sign with your first and last name. instructors want to know who is emailing them, and not all email programs give this information.
Check your email for a reply. Nothing is more irritating for a instructor than to take time to compose a reply, only to be asked the next day, Did you get my email?
Reply to the instructor. Thank yous are always appreciated, especially if the instructor has written a long email.
From instructor to student:
Reply promptly. If you cant reply right away, at least give a quick response to let the student know you got their email and will reply as soon as you can.
Be positive. If youre commenting on a students work, try to find at least one relatively positive thing to say about it, even if most of what you have to say is negative.
Be polite. The message youre trying to convey will be lost if it is delivered rudely.
20081020 23:58:20 nbsp Learning how to write meaningful subject headings can also make life a lot easier for both the sender and the recipient (not just instructors, but also counselors, academic advisers or peer advisers). When you consider that the Instructor:Student ratio is at least 1:100 (imagine teaching 3x sections of 30 students each), a subject heading of Help Midterm?! isnt very conducive to you receiving any helpful responses, other than, Who are you and what do you mean?
Instead, try something involving your name, your class section, and the actual subject of discussion, as in the following examples:
BioSci 101A, Lecture Section 2 Kacey Lander Question about Section 5 on the practice midterm: format to use for essay questions?
JPN01, 8:00 AM session Chapter 5 Conversation do we need to translate all 10 pages?
LIT105, Afternoon Language Lab Session Cancelled or moved to another room? T.A. did not specify.
This practice is especially useful if you are a student employee who has to communicate exclusively via email to other staff members in different departments. Speaking as a university staff employee, we receive an average of 5075 emails a day. Because everyones concern is urgent and timesensitive, emails with nondescript subject headings (e.g., Tomorrow, Front Desk, Sick, Can I leave early?) easily get lost in the barrage of daily announcements, reminders and FYI notifications. If anything, being able to write distinct subject headings helps us track multiple discussions and demonstrates that you are capable of communicating effectively in writing which is usually a plus in a professional environment.
And I agree that ending with polite salutations such as Thank you, Thank you for your time, Sincerely, or other variations is always welcome because it shows that you respect the other persons time. Users/T.Zukumori
20090102 22:59:06 nbsp oh man...I just like to start my messages out Hey Teach. Thats not appropriate? Users/AmandaAbughosh
20090305 20:41:48 nbsp Thanks for the tips! Reading this makes me wonder who you are! Users/renee415
20090615 21:34:38 nbsp O my, this is a nice thing to refer people to. But also, let me add: if you dont come to class or come rarely, dont email your professor, even with questions that might not have been covered in class. Why? Because you could have asked before or after class, or contacted a peer. Not attending and expecting your professor to deal with you via email sends this message: I cant be bothered to give you any of my time during the hours we are supposed to interact, but I demand that you give me your time whenever I need it. Yuck. Obviously, if you have a disability or illness that makes class attendance difficult, and are all set up with the UCD resources for that, thats a different story. Users/BethFreeman
Glad you said something! Students do this all the time. I think this has to do with the fact that many students walk around with this sense of entitlement and just expect other people to bail them out or clean up after them. Its just a lack of maturity and selfawareness. Hopefully, something that students will grow out of by the time they all graduate.Users/CurlyGirl26
20100801 14:40:03 nbsp As a student, its very frustrating when you apply for an internship under a professor, and dont even receive a reply, even when following all of the above suggestions. Even a simple Sorry, dont need an intern or Sorry, youre not what were looking for would suffice. I put in a lot of time into personalizing my resume and cover letter for each internship. Some acknowledgment is all I ask for. Users/TheShah
I agree without knowing more about the situation, that seems inexcusable. Users/CovertProfessor
20100909 10:15:30 nbsp How do you start an email to a lecturer who is not a professor and only has a masters? I once ran into this problem and ended up dropping the salutation all together. Users/JeffLee
In my opinion, the person has the job of a professor and so should be address as Professor X. The fact that they arent tenuretrack or dont have a PhD doesnt change the fact that they are doing the job of a professor. If they dont want you to call them that, they will probably say so, but I think it is better to choose the more respectful salutation from the start you are less likely to offend someone that way. Users/CovertProfessor
20140220 14:48:21 nbsp @Jeff Look at the syllabus and see what they call themselves. I usually play it up, and call them Dr. or Professor even if they arent. It plays into their ego, like CovertProfessor implicitly said. Users/JimPage
|
{"hexsha": "47a039fe75200b07f1a7240e7fa81040fba2bfb8", "size": 6701, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/UCD_Email_Etiquette.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/UCD_Email_Etiquette.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/UCD_Email_Etiquette.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 142.5744680851, "max_line_length": 739, "alphanum_fraction": 0.7882405611, "num_tokens": 1520}
|
"""Disagg benchmark tests"""
import os
import pathlib
import yaml
import pytest
import numpy as np
import pandas as pd
from gmhazard_calc import disagg
from gmhazard_calc import site
from gmhazard_calc import gm_data
from gmhazard_calc import constants
from gmhazard_calc.im import IM, IM_COMPONENT_MAPPING
@pytest.fixture(scope="module")
def config():
config_file = (
pathlib.Path(__file__).resolve().parent / "bench_data/disagg_config.yaml"
)
with open(config_file, "r") as f:
config = yaml.safe_load(f)
return config
def test_disagg(config):
def test_data(
im: IM,
station_name: str,
branch_name: str,
disagg: disagg.DisaggResult,
bench_data: pd.DataFrame,
):
print(
"Running - ensemble - {}, im - {}, station name - {}, "
"dataset {}".format(ensemble_id, im, station_name, branch_name)
)
bench_total_contr = bench_data.squeeze()
try:
# Sanity check
assert np.isclose(disagg.total_contributions.sum(), 1.0)
# Benchmark checking
assert np.all(
np.isclose(bench_total_contr.values, disagg.total_contributions.values)
)
except AssertionError:
print(
"Ensemble - {}, im - {}, station name - {}, branch {} "
"- FAILED - Results are different".format(
ensemble_id, im, station_name, branch_name
)
)
return 1
print(
"Ensemble - {}, im - {}, station name - {}, branch {} "
"- PASSED".format(ensemble_id, im, station_name, branch_name)
)
return 0
ensembles = config["ensembles"]
# Iterate over the ensembles to test
results = []
for ensemble_id in ensembles.keys():
ens_config_ffp = (
pathlib.Path(os.getenv("ENSEMBLE_CONFIG_PATH"))
/ "benchmark_tests"
/ f"{ensemble_id}.yaml"
)
ens = gm_data.Ensemble(ensemble_id, ens_config_ffp)
exceedance = ensembles[ensemble_id]["exceedance"]
ims = []
for im_string in ensembles[ensemble_id]["ims"]:
im = IM.from_str(im_string)
if ens.im_ensembles[0].im_data_type == constants.IMDataType.parametric:
ims.extend(
[
IM(im.im_type, im.period, component)
for component in IM_COMPONENT_MAPPING[im.im_type]
]
)
else:
ims.append(im)
for im in ims:
for station_name in ensembles[ensemble_id]["station_names"]:
site_info = site.get_site_from_name(ens, station_name)
# Test the individual branches disagg results
disagg_dict = disagg.run_branches_disagg(
ens.get_im_ensemble(im.im_type),
site_info,
im,
exceedance=exceedance,
)
for branch_name, branch_disagg in disagg_dict.items():
bench_data_file_sim = (
pathlib.Path(__file__).resolve().parent
/ f"bench_data/disagg/{ensemble_id}"
/ f"{im.file_format()}_{im.component}"
/ f"{station_name.replace('.', 'p')}"
/ f"{branch_name}.csv"
)
bench_data = pd.read_csv(bench_data_file_sim, index_col=0)
results.append(
test_data(
im, station_name, branch_name, branch_disagg, bench_data
)
)
bench_data_file_ensemble = (
pathlib.Path(__file__).resolve().parent
/ f"bench_data/disagg/{ensemble_id}"
/ f"{im.file_format()}_{im.component}"
/ f"{station_name.replace('.', 'p')}"
/ "ensemble.csv"
)
bench_data = pd.read_csv(bench_data_file_ensemble, index_col=0)
results.append(
test_data(
im,
station_name,
"ensemble",
disagg.run_ensemble_disagg(
ens, site_info, im, exceedance=exceedance
),
bench_data,
)
)
if np.sum(results) > 0:
raise AssertionError(
"Some of the benchmark tests failed, "
"check the output to determine which ones failed."
)
|
{"hexsha": "7daa87afc29b12d1a904ae6d20aad89bc037edd1", "size": 4779, "ext": "py", "lang": "Python", "max_stars_repo_path": "calculation/gmhazard_calc/gmhazard_calc/test/test_disagg_bench.py", "max_stars_repo_name": "ucgmsim/gmhazard", "max_stars_repo_head_hexsha": "d3d90b4c94b3d9605597a3efeccc8523a1e50c0e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "calculation/gmhazard_calc/gmhazard_calc/test/test_disagg_bench.py", "max_issues_repo_name": "ucgmsim/gmhazard", "max_issues_repo_head_hexsha": "d3d90b4c94b3d9605597a3efeccc8523a1e50c0e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2021-10-13T02:33:23.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T21:01:08.000Z", "max_forks_repo_path": "calculation/gmhazard_calc/gmhazard_calc/test/test_disagg_bench.py", "max_forks_repo_name": "ucgmsim/gmhazard", "max_forks_repo_head_hexsha": "d3d90b4c94b3d9605597a3efeccc8523a1e50c0e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4195804196, "max_line_length": 87, "alphanum_fraction": 0.5088930739, "include": true, "reason": "import numpy", "num_tokens": 945}
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\chapter{Traffic Generators Survey}\label{ap:traffic-gen-survey}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Introduction}
This appendix contains the cut-content of chapter~\ref{ch:literature-review}, serving now as complementary material. In the first section we show an extensive survey on traffic generator tools, and on the second, some use-cases of traffic-generators validation.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Traffic generator tools}
In this section, we present a short review of many open-source tools available for synthetic traffic generation and benchmark. The aim in this section is to present the most mentioned tools in the literature and the most recent and advanced ones. On tables are presented a survey of the main features of such tools. Some free, but not open-source traffic generators are listed as well. Before present our survey, we refer to some tools mentioned in the literature, but we could not find source code and manual.
\textbf{BRUNO}\cite{bruno-paper} is traffic generator implemented aiming performance and accuracy on timings. It has many configurable parameters that allow emulation of many web server scenarios. \textbf{Divide and conquer}\cite{validate-trafficgen}: is a replay engine that works in a distributed manner. It can split traces among multiple commodity PCs, and reply packets, to produce realistic traffic.
Some others mentioned tools \cite{web-ditg} we were not able to find any reference of available features are: \textbf{UDPgen}, \textbf{Network Traffic Generator}, \textbf{Packet Shell}, \textbf{Real-Time Voice Traffic Generator}, \textbf{PIM-SM Protocol Independent Multicast Packet Generator}, \textbf{TTCP}, \textbf{SPAK, Packet Generator}, \textbf{TfGen}, \textbf{TrafGen} and \textbf{Mtools}. Table ~\ref{tab:traffic-gen-links} presents an updated list of links for download.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Traffic Generators - Feature Survey}
Tables ~\ref{tab:packet-level-tg1}, ~\ref{tab:multi-level-tg2}, ~\ref{tab:app-level-tg3}, and ~\ref{tab:replay-tg1} is presented a survey of of the main features of such tools, such as support for Operational systems, protocols, stochastic functions available for traffic generation, and traffic generator class. Some free, but not open-source, traffic generators are listed as well.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% PACKET-LEVEL TGS
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{table}[ht!]
\centering
\caption{Summary of packet-level traffic generators.}
\scalebox{0.7}{
\begin{tabular}{ccccc}
\hline
\rowcolor[HTML]{9B9B9B}
\multicolumn{5}{c}{\cellcolor[HTML]{9B9B9B}\textbf{Packet-level Traffic Generators}} \\
\rowcolor[HTML]{9B9B9B}
\textbf{\begin{tabular}[c]{@{}c@{}}Traffic\\ Generator\end{tabular}} & \textbf{Operating System} & \textbf{\begin{tabular}[c]{@{}c@{}}Network \\ Protocols\end{tabular}} & \textbf{\begin{tabular}[c]{@{}c@{}}Available \\ stochastic distributions\end{tabular}} & \textbf{Interface} \\ \hline
\textbf{D-ITG} & \begin{tabular}[c]{@{}c@{}}Linux, Windows, \\ Linux Familiar, \\ Montavista, Snapgear\end{tabular} & \begin{tabular}[c]{@{}c@{}}IPv4-6, ICMP, TCP\\ UDP, DCCP, SCTP\end{tabular} & \begin{tabular}[c]{@{}c@{}}constant, uniform, \\ exponential, pareto, \\ cauchy, normal, \\ poisson, gamma\end{tabular} & \begin{tabular}[c]{@{}c@{}}CLI, \\ Script, \\ API\end{tabular} \\
\rowcolor[HTML]{C0C0C0}
{\color[HTML]{000000} Ostinato} & {\color[HTML]{000000} \begin{tabular}[c]{@{}c@{}}Linux, Windows, \\ FreeBDS\end{tabular}} & {\color[HTML]{000000} \begin{tabular}[c]{@{}c@{}}Ethernet/802.3/LLC, \\ SNAP; VLAN, (with QinQ); \\ ARP, IPv4-6-Tunnelling;\\ TCP, UDP, ICMPv4, \\ ICMPv6, IGMP, MLD; \\ HTTP, SIP, RTSP, NNTP, \\ custom protocol, etc...\end{tabular}} & {\color[HTML]{000000} constant} & {\color[HTML]{000000} \begin{tabular}[c]{@{}c@{}}GUI,\\ CLI,\\ script,\\ API\end{tabular}} \\
PackETH & \begin{tabular}[c]{@{}c@{}}Linux, MacOS,\\ Windows\end{tabular} & \begin{tabular}[c]{@{}c@{}}Ehernet II, ethernet 802.3, \\ 802.1q, QinQ, ARP,\\ IPv4-6, UDP, TCP, ICMP, \\ ICMPv6, IGMP\end{tabular} & constant & CLI, GUI \\
\rowcolor[HTML]{C0C0C0}
{\color[HTML]{000000} Seagull} & {\color[HTML]{000000} Linux, Windows} & {\color[HTML]{000000} \begin{tabular}[c]{@{}c@{}}IPv4-6, UDP, TCP, SCTP, \\ SSL/TLS and SS7/TCAP. \\ custom protocol\end{tabular}} & {\color[HTML]{000000} \begin{tabular}[c]{@{}c@{}}constant, \\ poisson\end{tabular}} & {\color[HTML]{000000} CLI, GUI} \\
Iperf & \begin{tabular}[c]{@{}c@{}}Windows, Linux, \\ Android, MacOS X,\\ FreeBSD, OpenBSD,\\ NetBSD, VxWorks, \\ Solaris\end{tabular} & IPv4-6, UDP, TCP, SCTP & constant & CLI, API \\
\rowcolor[HTML]{C0C0C0}
{\color[HTML]{000000} BRUTE} & {\color[HTML]{000000} Linux} & {\color[HTML]{000000} IPv4-6, UDP, TCP} & {\color[HTML]{000000} \begin{tabular}[c]{@{}c@{}}constant, poisson, \\ trimodal, exponential\end{tabular}} & {\color[HTML]{000000} CLI, script} \\
SourcesOnOff & Linux & IPv4, TCP, UDP & \begin{tabular}[c]{@{}c@{}}weibull, pareto, \\ exponential, normal\end{tabular} & CLI \\
\rowcolor[HTML]{C0C0C0}
{\color[HTML]{000000} TG} & {\color[HTML]{000000} \begin{tabular}[c]{@{}c@{}}Linux, FreeBSD, \\ Solaris SunOS\end{tabular}} & {\color[HTML]{000000} IPv4, TCP, UDP} & {\color[HTML]{000000} \begin{tabular}[c]{@{}c@{}}constant, uniform, \\ exponential\end{tabular}} & {\color[HTML]{000000} CLI} \\
Mgen & Linux(Unix), Windows & IPv4-6, UDP, TCP, SINK & constant, exponential, & CLI, Script \\
\rowcolor[HTML]{C0C0C0}
KUTE & Linux 2.6 & UDP & constant & kernel module \\
\begin{tabular}[c]{@{}c@{}}RUDE \& \\ CRUDE\end{tabular} & \begin{tabular}[c]{@{}c@{}}Linux, Solaris SunOS, \\ FreeBSD\end{tabular} & IPv4, UDP & constant & CLI \\
\rowcolor[HTML]{C0C0C0}
NetSpec & Linux & IPv4,UDP, TCP & \begin{tabular}[c]{@{}c@{}}uniform, normal, log-normal, \\ exponential, poisson, \\ geometric, pareto, gamma\end{tabular} & script \\
Nping & Windows, Linux, Mac OS X & \begin{tabular}[c]{@{}c@{}}TCP, UDP, ICMP, \\ IPv4-6, ARP\end{tabular} & constant & CLI \\
\rowcolor[HTML]{C0C0C0}
MoonGen & Linux & \begin{tabular}[c]{@{}c@{}}IPv4-6, IPsec,\\ ICMP, UDP, TCP\end{tabular} & constant, poisson & scipt API \\
\begin{tabular}[c]{@{}c@{}}Dpdk \\ Pktgen\end{tabular} & Linux & \begin{tabular}[c]{@{}c@{}}IPv4, IPv6, ARP, \\ ICMP, TCP, UDP\end{tabular} & constant & CLI, script API \\
\rowcolor[HTML]{C0C0C0}
LegoTG & Linux & (depend on underlying tool) & (depend on underlying tool) & CLI, script \\
\begin{tabular}[c]{@{}c@{}}gen\_send/\\ gen\_recv\end{tabular} & \begin{tabular}[c]{@{}c@{}}Solaris, FreeBSD, \\ AIX4.1, Linux\end{tabular} & UDP & constant & CLI \\
\rowcolor[HTML]{C0C0C0}
mxtraf & Linux & TCP, UDP, IPv4 & constant & GUI, script \\
\begin{tabular}[c]{@{}c@{}}Jigs Traffic \\ Generator (JTG)\end{tabular} & Linux & TCP, UDP, IPv4-6 & constant & CLI \\ \hline
\end{tabular}
}
\label{tab:packet-level-tg1}
\end{table}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% MULTI/FLOW-LEVEL TGS
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{table}[ht!]
\centering
\caption{Summary of multi-level and flow-level traffic generators.}
\scalebox{0.7}{
\begin{tabular}{ccccc}
\hline
\rowcolor[HTML]{9B9B9B}
\multicolumn{5}{c}{\cellcolor[HTML]{9B9B9B}\textbf{Flow and Multi-level Traffic Generators}} \\
\rowcolor[HTML]{9B9B9B}
\textbf{\begin{tabular}[c]{@{}c@{}}Traffic\\ Generator\end{tabular}} & \textbf{Operating System} & \textbf{\begin{tabular}[c]{@{}c@{}}Network\\ Protocols\end{tabular}} & \textbf{Model} & \textbf{Interface} \\ \hline
Swing & Linux & \begin{tabular}[c]{@{}c@{}}IPv4, TCP, UDP,\\ HTTP, NAPSTER,\\ NNTP and SMTP\end{tabular} & \begin{tabular}[c]{@{}c@{}}Multi-level \\ auto-configurable \\ Ethernet\end{tabular} & CLI \\
\rowcolor[HTML]{C0C0C0}
{\color[HTML]{000000} Harpoon} & {\color[HTML]{000000} \begin{tabular}[c]{@{}c@{}}FreeBSD, Linux, \\ MacOS X, Solaris\end{tabular}} & {\color[HTML]{000000} \begin{tabular}[c]{@{}c@{}}TCP, UDP, IPv4,\\ IPv6\end{tabular}} & {\color[HTML]{000000} \begin{tabular}[c]{@{}c@{}}Flow-level \\ auto-configurable \\ Ethernet\end{tabular}} & {\color[HTML]{000000} CLI} \\
LiTGen & - & - & Multi-level Wifi & - \\
\rowcolor[HTML]{C0C0C0}
EAR & Linu & \begin{tabular}[c]{@{}c@{}}IEEE 802.11, ICMP, UDP, \\ TCP, TFTP, Telnet\end{tabular} & \begin{tabular}[c]{@{}c@{}}"Event Reproduction Ratio" techinique - \\ wireless IEEE 802.11\end{tabular} & - \\ \hline
\end{tabular}
}
\label{tab:multi-level-tg2}
\end{table}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% APPLICATION-LEVEL TGS
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{table}[ht!]
\centering
\caption{Summary of application-level traffic generators. }
\scalebox{0.7}{
\begin{tabular}{cccc}
\hline
\rowcolor[HTML]{9B9B9B}
\multicolumn{4}{c}{\cellcolor[HTML]{9B9B9B}Application-level Traffic Generators} \\ \hline
\rowcolor[HTML]{9B9B9B}
\begin{tabular}[c]{@{}c@{}}Traffic\\ Generator\end{tabular} & Operating System & Model & Interface \\ \hline
GenSyn & Java Virtual Machine & User-behavior emulation & GUI \\
\rowcolor[HTML]{C0C0C0}
D-ITG & \begin{tabular}[c]{@{}c@{}}Linux, Windows, Linux \\ Familiar, Montavista, Snapgear\end{tabular} & \begin{tabular}[c]{@{}c@{}}Telnet, DNS, Quake3,\\ CounterStrike (active and inactive), \\ VoIP (G.711, G.729, G.723)\end{tabular} & CLI \\
Surge & Linux & Client/Server & CLI \\
\rowcolor[HTML]{C0C0C0}
Httperf & Linux & HTTP/1.0, HTTP/1.1 & CLI \\
VoIP Traffic Generator & - & VoIP & CLI \\
\rowcolor[HTML]{C0C0C0}
ParaSynTG & - & HTTP workload properties & CLI \\
NetSpec & LInux & \begin{tabular}[c]{@{}c@{}}HTTP, FTP, Telnet, Mpev video, \\ voice and video teleconference\end{tabular} & CLI \\ \hline
\end{tabular}
}
\label{tab:app-level-tg3}
\end{table}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% REPLAY ENGINES
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{table}[ht!]
\centering
\caption{Summary of replay-engines traffic generators.}
\scalebox{0.7}{
\begin{tabular}{ccc}
\hline
\rowcolor[HTML]{9B9B9B}
\multicolumn{3}{c}{\cellcolor[HTML]{9B9B9B}\textbf{Replay-Engines Traffic Generators}} \\
\rowcolor[HTML]{9B9B9B}
\textbf{\begin{tabular}[c]{@{}c@{}}Traffic\\ Generator\end{tabular}} & \textbf{Operating System} & \textbf{Implementation} \\ \hline
Ostinato & Linux, Windows, FreeBDS & Software-only \\
\rowcolor[HTML]{C0C0C0}
PackETH & \begin{tabular}[c]{@{}c@{}}Linux, MacOS,\\ Windows\end{tabular} & Software-only \\
BRUNO & Linux & hardware-dependent \\
\rowcolor[HTML]{C0C0C0}
TCPReplay & Linux & Software-only \\
TCPivo & Linux & Software-only \\
\rowcolor[HTML]{C0C0C0}
\begin{tabular}[c]{@{}c@{}}NetFPGA \\ PacketGenerator\end{tabular} & Linux & Hardware \\
\begin{tabular}[c]{@{}c@{}}NetFPGA \\ Caliper\end{tabular} & Linux & Hardware \\
\rowcolor[HTML]{C0C0C0}
\begin{tabular}[c]{@{}c@{}}NetFPGA \\ OSTN\end{tabular} & Linux & Hardware \\
\textbf{MoonGen} & Linux & Hardware-dependent \\
\rowcolor[HTML]{C0C0C0}
\textbf{DPDK Pktgen} & Linux & Hardware-dependent \\
\textbf{NFPA} & Linux & hardware-dependent \\ \hline
\end{tabular}
}
\label{tab:replay-tg1}
\end{table}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Packet-level traffic generators}
\textbf{D-ITG}\cite{ditg-paper}\cite{web-ditg}: D-ITG (Distributed Internet Traffic Generator) is a platform capable to produce IPv4 and IPv6 traffic defined by IDT and PS probabilistic distributions such as constant, uniform, Pareto, Cauch, Normal, Poisson, Gamma, Weibull, and On/Off; both configurable and pre-defined for many applications, from Telnet, through online games. It provides many flow-level options of customization, like duration, start delay and number of packets, support to many link-layer and transport-layer protocols, options, sources and destinations addresses/ports. It has support for \acrshort{NAT} traversal, so it is possible to make experiments between two different networks separated by the cloud. D-ITG can also be used to measure packet loss, jitter, and throughput. D-ITG may be used through a \acrshort{CLI}, scripts, or a API, that can be used to create applications and remotely control other hosts through a daemon.
\textbf{Ostinato}\cite{web-ostinato}: Ostinato is a packet crafter, network traffic generator and analyzer with a friendly \acrshort{GUI} (“Wireshark in reverse” as the documentation says) and a Python API. This tool permits craft and sends packets of different protocols at different rates. Support Server/Client communication and a vast variety of protocols, from the link layer (such as 802.3 and \acrshort{VLAN}) to the application layer (such HTTP and IP).
It is also possible to add any unimplemented protocols, through scripts defined by the user.
\textbf{Seagull}\cite{wp-seagull}\cite{web-seagull}: an Open Source Multi-protocol traffic generator 2009]: Seagull is a traffic generator and test open-source tool, released by HP.
It has support of many protocols, from link layer to application layer, and its support is easily extended, via XML dictionaries. As the documentation argues, the protocol extension flexibility is one of the main features. It supports high speeds, and is reliable, being tested through hundreds of hours.
It can also generate traffic using three statistical models: uniform(constant), best-effort and Poisson.
\textbf{BRUTE}\cite{web-brute}: Is a traffic generator that operates on the top of Linux 2.4-6 and 2.6.x, not currently being supported on newer versions. It also supports some stochastic functions (constant, poison, trimodal) for departure time burst, and can simulate VoIP traffic.
\textbf{PackETH} \cite{web-packeth}: PackETH is GUI and CLI stateless packet generator tool for ethernet. It supports many adjustments of parameters, and many protocols as well, and can set MAC addresses.
\textbf{Iperf}\cite{web-iperf}: Ipef is a network traffic generator tools, designed for the measure of the maximum achievable bandwidth on IP networks, for both TCP and UDP traffic, but can evaluate delay, windows size, and packet loss. It has a GUI interface, called Jperf\cite{web-jperf}. There is also a JavaAPI, for automating tests s\cite{jperf-git}. Support IPv4 and IPv6.
\textbf{NetPerf}\cite{web-netperf}: Netperf is a benchmark tool that can be used to measure the performance of many types of networks, providing tests for both unidirectional throughput and end-to-end latency. It has support for TCP, UDP, and \acrshort{SCTP}, both over IPv4 and IPv6.
\textbf{sourcesOnOff}\cite{sourcesonoff-paper} \cite{web-sourcesonoff}: sourcesOnOff is a new traffic generator released on 2014, that aims to generate realistic synthetic traffic using probabilistic models to control on and off time of traffic flows. As shown on the paper, it is able to guarantee self-similarity, an has support to many probabilistic distributions for the on/off times: Weibull, Pareto, Exponential and Gaussian. Supports TCP and UDP over IPv4.
\textbf{TG}\cite{web-tg}: TG is a traffic generator that can generate and receive one-way packet streams transmitted from the UNIX user level process between source and traffic sink nodes. A simple specification language controls it, that enables the craft of different lengths and interarrival times distributions, such as Constant, uniform, exponential and on/off(markov2).
\footnote{not open-source}\textbf{MGEN}\cite{web-mgen}: MGEN (Multi-Generator) is a traffic generator developed by the Naval Research Laboratory (NRL) PROTocol Engineering Advanced Networking (PROTEAN) Research Group. It can be used to emulate the traffic patterns of unicast and/or multicast UDP and TCP IP applications. It supports many different types of stochastic functions, nominated periodic, Poisson, burst jitter and clone which can control inter-departure times and packet size.
\textbf{KUTE}\cite{web-kute}: KUTE is a kernel level packet generator, designed to have a maximum performance traffic generator and receiver mainly for use with Gigabit Ethernet. It works in the kernel level, sending packets as fast as possible, direct to the hardware driver, bypassing the stack. However, KUTE works only on Linux 2.6, and has only be tested on Ethernet Hardware. Also, it only supports constant UDP traffic.
\textbf{RUDE \& CRUDE}\cite{web-rude-crude}: RUDE (Real-time UDP Data Emitter) and CRUDE (Collector for RUDE), are small and flexible programs which run on user-level. It has a GUI called GRUDE. It works only with UDP protocol.
\footnote{not open-source}\textbf{NetSpec}\cite{web-netspec}: NetSpec is a tool designed to do network tests, as opposed to doing point to point testing. NetSpec provides a framework that allows a user to control multiple processes across multiple hosts from a central point of control, using daemons that implement traffic sources and sinks, along with measurement tools. Also, it can model many different traffic patterns and applications, such as maximum host rate, Constant Bit Rate (CBR), WWW, World Wide Web, FTP, File Transfer Protocol, telnet, MPEG video, voice, and video teleconference.
\textbf{Nping}\cite{web-nping}: active hosts, as a traffic generator for network stack stress testing, ARP poisoning, Denial of Service attacks, route tracing, etc. Nping CLI permits the users control over protocols headers.
\textbf{TCPreplay}\cite{web-tcpreplay}: TCPreplay is a user-level replay engine, that can use pcap files as input, and then forward, packets in a network interface. It can modify some header parameters as well.
\textbf{TCPivo}\cite{tcpivo-paper}\cite{web-tcpivo}: TCPivo is a high-speed traffic replay engine that is able to read traffic traces, and replay packets in a network interface, working at the kernel level. It is not currently supported kernel versions greater than 2.6.
\textbf{NetFPGA PacketGenerator}\cite{web-netfpgapacketgenerator}: NetFPGA Packet Generator is a hardware-based traffic generator and capture tool, build over the NetFPGA 1G, and open \acrshort{FPGA} platform with 4 ethernet interfaces of 1 Gigabit of bandwidth each. It is a replay engine tool which uses as input pcap files. It is able to accurately control the delay between the frames, with the default delay being the same in the pcap file. It is also able to capture packets and report statistics of the traffic.
\textbf{NetFPGA Caliper}\cite{web-caliper}: is a hardware-based traffic generator, build on NetFPGA 1G, built over NetThreads platform, an FPGA microprocessor which support threads programming. Different form NetFPGA Packet Generator, Caliper can produce live packets. It is written in C.
\textbf{NetFPGA OSNT}\cite{web-osnt}: OSNT (Open Source Network Tester) is hardware based network traffic generator built over the NetFPGA 10G. As NetFPGA 1G, NetFPGA 10G is an FPGA platform with 4 ethernet interfaces, but with 10 Gigabits of bandwidth. OSNT is a replay engine and is loaded with pcap traces.
\textbf{Dpdk Pktgen}\cite{web-dpdk-pktgen}: Pktgen is a traffic generator measurer built over DPDK. DPDK is a development kit, a set of libraries and drivers for fast packet processing. DPDK was designed to run on any processor but has some limitation in terms of supported NICs, that can be found on its website.
\textbf{MoonGen}\cite{moongen-paper}\cite{web-moongen}: MoonGen is a scriptable highspeed packet generator built over DPDK and LuaJIT. It can send packets at 10 Gbit/s, even with 64 bytes packets on a single CPU core. MoonGen can achieve this rate even if each packet is modified by a Lua script. Also, it provides accurate timestamping and rate control. It is able to generate traffic using several protocols ( IPv4, IPv6, IPsec, ARP, ICMP, UDP, and TCP), and can generate different inter-departure times, like a Poisson process and burst traffic.
\textbf{gen\_send/gen\_recv}\cite{web-gensend-genrecv}: gen\_send and gen\_recv are simple UDP traffic generator applications. It uses UDP sockets. gen\_send can control features like desired data rate, packet size and inter-packet time.
\textbf{mxtraf}\cite{web-mxtraf}: mxtraf enables a small number of hosts to saturate a network, with a tunable mixture of TCP and UDP traffic.
\textbf{Jigs Traffic Generator (JTG)}\cite{web-jtg}: is a simple, accurate traffic generator. JTG process only sends one stream of traffic, and stream characteristics are defined only by command line arguments. It also supports IPv6.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Application-level/Special-scenarios traffic generators}
\footnote{not open-source}\textbf{ParaSynTG}\cite{parasyntg-paper}: application-level traffic generator configurable by input parameters, which considers most of the observes www traffic workload properties.
\footnote{not open-source}\textbf{GenSyn}\cite{web-gensyn}: network traffic generator implemented in Java that mimics TCP and UDP connections, based on user behavior.
\textbf{Surge}\cite{surge-paper}: Surge is an application level workload generator which emulates a set of real users accessing a web server. It matches many empirical measurements of real traffic, like server file distribution, request size distribution, relative file popularity, idle periods of users and other characteristics.
\textbf{Httperf}\cite{web-httperf}: Is an application lever traffic generator to measure web server performance. It uses the protocol HTTP (HTTP/1.0 and HTTP/1.1), and offer many types of workloads while keeping track of statistics related to the generated traffic.
Its most basic operation is to generate a set of HTTP GET requests and measure the number of replies and response rate.
\textbf{VoIP Traffic Generator}: it is a traffic generator written in Perl that creates multiple streams of traffic, aiming to simulate VoIP traffic.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Flow-level and multi-level traffic generators}
\textbf{Harpoon}\cite{harpoon-paper}: Harpoon is a flow-based traffic generator, that can automatically extract form Netflow traces parameters, in order to generate flows that exhibit the same statistical characteristics measured before, including temporal and spatial characteristics.
\textbf{Swing}\cite{swing-paper} \cite{web-swing}: Swing is a closed-loop multi-layer and network responsive generator. It can read capture traces and captures the packet interactions of many applications, being able to models distributions for the user, application, and network behavior, stochastic and responsively. Swing can model user behavior, REEs, connection, packets, and network.
\footnote{not open-source}\textbf{LiTGen}(Lightweight Traffic Generator)\cite{litgen-paper} is an open-loop, multilevel traffic generator. It can model wireless network traffic in a peer user and application basis. This tool model the traffic in three different levels: packet level, object level (smaller parts of an application session), and session level.
\footnote{not open-source}\textbf{EAR}\cite{ear-paper}: traffic generator that uses a technique called“EventReproduction Ratio” to mimic wireless IEEE 802.11 protocol behavior.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Others traffic generation tools}
\textbf{NFPA}\cite{nfpa-paper}: NFPA is a benchmark tool based on DPDK Pkgen, specialized in executing and automatize performance measurements over network functions. It works being directly connected to a specific device under tests. It uses built-in, and user-defined traffic traces and Lua scripts control and collect information of DPDK Pktgen. It has a command line and Web interface, and automatically plot the results.
\textbf{LegoTG}\cite{legotg-paper}: LegoTG is a modular framework for composing custom traffic generation. It aims to simplify the combination on the use of different traffic generators and modulators on different testbeds, automatizing the process of installation, execution, resource allocation, and synchronization via a centralized orchestrator, which uses a software repository. It already has support to many tools, and to add support to new tools is necessary to add and edit two files, called TG block, and ExFile.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Traffic Generators -- Repository Survey}
\sloppy
\begin{table}[ht!]
\centering
\sloppy
\caption{Links for the traffic generators repositories}
\label{tab:traffic-gen-links}
%\begin{tabular}{@{}ll@{}}
\begin{tabularx}{\textwidth}{@{}ll@{} p{10.0cm}}
\toprule
\sloppy
Traffic Generator & Repository \\ \midrule
D-ITG & \href{http://traffic.comics.unina.it/software/ITG/}{http://traffic.comics.unina.it/software/ITG/} \\
Ostinato & \href{http://ostinato.org/}{http://ostinato.org/} \\
Seagull & \href{http://gull.sourceforge.net/}{http://gull.sourceforge.net/} \\
BRUTE & \href{http://wwwtlc.iet.unipi.it/software/brute/ }{http://wwwtlc.iet.unipi.it/software/brute/ } \\
PackETH & \href{http://packeth.sourceforge.net/packeth/Home.html}{http://packeth.sourceforge.net/packeth/Home.html} \\
Iperf & \href{https://iperf.fr/}{https://iperf.fr/} \\
NetPerf & \href{http://www.netperf.org/netperf/}{http://www.netperf.org/netperf/} \\
sourcesOnOff & \href{http://www.recherche.enac.fr/~avaret/sourcesonoff}{http://www.recherche.enac.fr/~avaret/sourcesonoff} \\
TG & \href{http://www.postel.org/tg/}{http://www.postel.org/tg/} \\
MGEN* & \href{http://www.nrl.navy.mil/itd/ncs/products/mgen }{http://www.nrl.navy.mil/itd/ncs/products/mgen } \\
KUTE & \href{http://caia.swin.edu.au/genius/tools/kute/}{http://caia.swin.edu.au/genius/tools/kute/} \\
RUDE \& CRUDE & \href{http://rude.sourceforge.net/}{ http://rude.sourceforge.net/} \\
Pktgen & \href{http://www.linuxfoundation.org/collaborate/workgroups/networking/pktgen}{http://www.linuxfoundation.org/collaborate/workgroups/networking/pktgen} \\
NetSpec & \href{http://www.ittc.ku.edu/netspec/}{http://www.ittc.ku.edu/netspec/} \\
Nping & \href{https://nmap.org/nping/ }{https://nmap.org/nping/} \\
TCPreplay & \href{http://tcpreplay.appneta.com/}{http://tcpreplay.appneta.com/} \\
TCPivo & \href{http://www.thefengs.com/wuchang/work/tcpivo/}{http://www.thefengs.com/wuchang/work/tcpivo/} \\
NetFPGA PacketGenerator & \href{https://github.com/NetFPGA/netfpga/wiki/PacketGenerator}{https://github.com/NetFPGA/netfpga/wiki/PacketGenerator} \\
NetFPGA Caliper & \href{https://github.com/NetFPGA/netfpga/wiki/PreciseTrafGen}{https://github.com/NetFPGA/netfpga/wiki/PreciseTrafGen} \\
NetFPGA OSNT & \href{https://github.com/NetFPGA/OSNT-Public/wiki/OSNT-Traffic-Generator}{https://github.com/NetFPGA/OSNT-Public/wiki/OSNT-Traffic-Generator} \\
DPDK Pktgen & \href{http://pktgen.readthedocs.io/en/latest/getting_started.html}{http://pktgen.readthedocs.io/en/latest/getting\_started.html} \\
MoonGen & \href{https://github.com/emmericp/MoonGen}{https://github.com/emmericp/MoonGen} \\
gen\_send/gen\_recv & \href{http://www.citi.umich.edu/projects/qbone/generator.html}{http://www.citi.umich.edu/projects/qbone/generator.html} \\
mxtraf & \href{http://mxtraf.sourceforge.net/}{http://mxtraf.sourceforge.net/} \\
JTG & \begin{tabular}[c]{@{}l@{}}\href{https://sourceforge.net/projects/iperf/files/}{https://sourceforge.net/projects/iperf/files/}\\ \href{https://github.com/AgilData/jperf}{https://github.com/AgilData/jperf}\end{tabular} \\
GenSyn & \href{http://www.item.ntnu.no/people/personalpages/fac/poulh/gensyn}{http://www.item.ntnu.no/people/personalpages/fac/poulh/gensyn} \\
SURGE & \href{http://cs-www.bu.edu/faculty/crovella/surge_1.00a.tar.gz}{http://cs-www.bu.edu/faculty/crovella/surge\_1.00a.tar.gz} \\
Httperf & \href{https://linux.die.net/man/1/httperf}{https://linux.die.net/man/1/httperf} \\
VoIP Traffic Generator & \href{https://sourceforge.net/projects/voiptg/}{https://sourceforge.net/projects/voiptg/} \\
Harpoon & \href{http://cs.colgate.edu/~jsommers/harpoon/}{http://cs.colgate.edu/~jsommers/harpoon/} \\
Swing & \href{http://cseweb.ucsd.edu/~kvishwanath/Swing/}{http://cseweb.ucsd.edu/~kvishwanath/Swing/} \\
NFPA & \\
LegoTG & \\ \bottomrule
\end{tabularx}
%\end{tabular}
\end{table}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Validation of Ethernet traffic generators: some use cases}
In this section, we list some use cases of validation of Ethernet traffic generators. Our validation methods used in chapter 4 and 5 were based on them. We are going to present seven different study cases on validation of related traffic generators. They are Swing, Harpoon [Sommers et al. 2004], D-ITG [Botta et al. 2012], sourcesOnOff [Varet2014], MoonGen [Emmerich et al.2015], LegoTG [Bartlett e Mirkovic2015] and NFPA [Csikor et al. 2015].
\subsection{Swing}
Swing\cite{swing-paper} is at present, one of the primary references of realistic traffic generation. The authors extracted bidirectional metrics from a network link of synthetic traces. Their goals were to get realism, responsiveness, and randomness. They define realism as a trace that reflects the following characteristics of the original:
Packet inter-arrival rate and burstiness across many time scales;
\begin{itemize}
\item Packet size distributions;
\item Flow characteristics as arrival rate and length distributions;
\item Destination IPs and port distributions.
\end{itemize}
The traffic generator uses a structural model the account interactions between many layers of the network stack. Each layer has many control variables, which is randomly generated by a stochastic process. They begin the parameterization, classifying \cite{web-libpcap} \textit{pcap} files with the data; they can estimate parameters.
They validate the results using public available traffic traces, from \cite{web-mawi} and CAIDA\cite{web-caida}. On the paper, the authors focuses on the validation metrics below:
\begin{itemize}
\item Comparison of estimated parameters of the original and swing generated traces;
\item Comparison of aggregate and per-application bandwidth and packets per seconds;
\item QoS metrics such as two-way delay and loss rates;
\item Scaling analysis, via Energy multiresolution energy analysis.
\end{itemize}
To the vast majority of the results, both original and swing traces results were close to each other. Thus, Swing was able to match aggregate and burstiness metrics, per byte and per packet, across many timescales.
\subsection{Harpoon}
Harpoon\cite{harpoon-validation}\cite{harpoon-paper} is a traffic generator able to generate representative traffic at IP flow level. It can generate TCP and IP with the same byte, packet, temporal and spatial characteristics measured at routers. Also, Harpoon is a self-configurable tool, since it automatically extracts parameters from network traces. It estimates some parameters from original traffic trace: file sizes, inter-connection times, source and destination IP addresses, and the number of active sessions.
As proof of concept \cite{harpoon-validation}, the authors compared statistics from the original, and harpoon’s generated traces. The two main types of comparisons: diurnal throughput, and stochastic variable CDF and frequency distributions. Diurnal throughput refers to the average bandwidth variation within a day period. In a usual network, during the day the bandwidth consumption is larger than the night. Also, they compared:
\begin{itemize}
\item CDF of bytes transferred per 10 minutes
\item CDF of packets transferred per 10 minutes
\item CDF of inter-connection time
\item CDF of file size
\item CDF of flow arrivals per 1 hour
\item Destination IP address frequency
\end{itemize}
In the end, they showed the differences in throughput evaluation of a Cisco 6509 switch/router using Harpoon and a constant rate traffic generator. Harpoon was proven able to give close CDFs, frequency and diurnal throughput plots compared to the original traces. Also, the results demonstrated that Harpoon provides a more variable load on routers, compared to constant rate traffic. It indicates the importance of using realistic traffic traces on the evaluation of equipment and technologies.
\subsection{D-ITG}
D-ITG\cite{ditg-paper} is a network traffic generator, with many configurable features. The tool provides a platform that meets many emerging requirements for a realistic traffic generation. For example, multi-platform, support of many protocols, distributed operation, sending/receiving flow scalability, generation models, and analytical model based generation high bit/packet rate. You can see different analytical and models and protocols supported by D-ITG at table~\ref{tab:packet-level-tg1}.
To the evaluation of realism on analytical model parameterization of D-ITG, the authors used a synthetic replication of a eight players's LAN party of Age of Mythology\footnote{\href{https://www.ageofempires.com/games/aom/}{https://www.ageofempires.com/games/aom/}}. They have captured traffic flows during the party, then, they modeled its packet size and inter-packet time distributions. They show that the synthetic traffic and the analytical model have similar curves of packet size and inter-packet time; thus it can approximate the empirical data. Also, the bit rate's mean and the standard deviation of the empirical and synthetic data are similar.
\subsection{sourcesOnOff}
Varet et al.\cite{sourcesonoff-paper} create an application implemented in C, called SourcesOnOff. It models the activity interval of packet trains using probabilistic distributions. To choose the best stochastic model, the authors had captured traffic traces using TCPdump. Then the developed tool that could configure out what distribution (Weibull, Pareto, Exponential, Gaussian, etc.) is better to the original traffic traces. They used the Bayesian Information Criterion (BIC) for distance assessment and tested the smaller BIC for each distribution, insuring a good correlation between the original and generated traces and self-similarity.
The validation methods used on sourcesOnOff are:
\begin{itemize}
\item A visual comparison between On time and Off time of the original trace and the stochastic fitting;
\item QQplots, which aim to evaluate the correlation between inter-trains duration of real and generated traffic;
\item Measurement of the measured throughput's autocorrelation~\ref{ap:revision-probability} of the real and synthetic traffic;
\item Hurst exponent computation of the real and the synthetic trace;
\end{itemize}
The results pointed to an excellent stochastic fitting, better for On-time values. On the other hand, the correlation value of the QQplot was more significant on the Off time values (99.8\% versus 97.9\%). In the real and synthetic traces, the throughput's autocorrelation remained between an upper limit of 5\%. Finally, the ratio between the evaluated Hurst exponent always remained smaller than 12%.
\subsection{MoonGen}
MoonGen\cite{moongen-paper} is a high-speed scriptable paper capable of saturating 10 GnE link with 64 bytes packets, using a single CPU core. The authors built it over DPDK and LuaJit, enabling the user to have high flexibility on the crafting of each packet, through Lua scripts. It has multi-core support and runs on commodity server hardware. MoonGen also can test latency with sub-microsecond precision and accuracy, using hardware timestamping of modern NICs cards. The Lua scripting API enable the implementation and high customization along with high-speed. This includes the controlling of packet sizes and inter-departure times.
The authors evaluated this traffic generator focused on throughput metrics, rather than others. Also, they have small packet sizes (64 bytes to 256), since the per-packet costs dominate. In their work, they were able to surpass 15 Gbit/s with an XL71040GbENIC. Also, they achieve throughput values close to the line rate with packets of 128 bytes, and 2CPU cores.
\subsection{LegoTG}
Bartlett et al.\cite{legotg-paper} implemented a modular framework for composing custom traffic generation, called LegoTG. As argued by the authors (and by our present work), automation of many aspects of traffic generation is a desirable feature. The process of how to generate proper background traffic may become research by itself. Traffic generators available today offer a single model and a restricted set of features into a single code base, makes customization difficult.
The primary purpose of their experiment was to show how LegoTG could generate background traffic, only. Also, they showed how realistic background traffic could influence research conclusions. The test chosen is one of the use cases proposed for Swing\cite{background-traffic-matter}, and evaluated the error on bandwidth estimation of different measurement tools. It showed that LegoTG could provide a secure and custom traffic generation.
|
{"hexsha": "1c34ea9993e4a6502004166525f93fcc7a0f4c71", "size": 56459, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "docs/appendix/appendixC.tex", "max_stars_repo_name": "AndersonPaschoalon/MasterDegreeThesis-ptBr", "max_stars_repo_head_hexsha": "e4cbf752deecf124aa0adfc630d510645d8c4ea6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "docs/appendix/appendixC.tex", "max_issues_repo_name": "AndersonPaschoalon/MasterDegreeThesis-ptBr", "max_issues_repo_head_hexsha": "e4cbf752deecf124aa0adfc630d510645d8c4ea6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/appendix/appendixC.tex", "max_forks_repo_name": "AndersonPaschoalon/MasterDegreeThesis-ptBr", "max_forks_repo_head_hexsha": "e4cbf752deecf124aa0adfc630d510645d8c4ea6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 138.3799019608, "max_line_length": 955, "alphanum_fraction": 0.486158097, "num_tokens": 10226}
|
[STATEMENT]
lemma sqrt_nat_ceiling[simp]: "sqrt_nat_ceiling x = \<lceil> sqrt (real x) \<rceil>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sqrt_nat_ceiling x = \<lceil>sqrt (real x)\<rceil>
[PROOF STEP]
unfolding sqrt_nat_ceiling_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. root_nat_ceiling 2 x = \<lceil>sqrt (real x)\<rceil>
[PROOF STEP]
by (simp add: sqrt_def)
|
{"llama_tokens": 172, "file": "Sqrt_Babylonian_Sqrt_Babylonian", "length": 2}
|
[STATEMENT]
theorem three_divides_nat:
shows "(3 dvd n) = (3 dvd sumdig n)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (3 dvd n) = (3 dvd sumdig n)
[PROOF STEP]
proof (unfold sumdig_def)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (3 dvd n) = (3 dvd (\<Sum>x<nlen n. n div 10 ^ x mod 10))
[PROOF STEP]
have "n = (\<Sum>x<nlen n. (n div (10::nat)^x mod 10) * 10^x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. n = (\<Sum>x<nlen n. n div 10 ^ x mod 10 * 10 ^ x)
[PROOF STEP]
by (rule exp_exists)
[PROOF STATE]
proof (state)
this:
n = (\<Sum>x<nlen n. n div 10 ^ x mod 10 * 10 ^ x)
goal (1 subgoal):
1. (3 dvd n) = (3 dvd (\<Sum>x<nlen n. n div 10 ^ x mod 10))
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
n = (\<Sum>x<nlen n. n div 10 ^ x mod 10 * 10 ^ x)
goal (1 subgoal):
1. (3 dvd n) = (3 dvd (\<Sum>x<nlen n. n div 10 ^ x mod 10))
[PROOF STEP]
have "3 dvd (\<Sum>x<nlen n. (n div (10::nat)^x mod 10) * 10^x) =
(3 dvd (\<Sum>x<nlen n. n div 10^x mod 10))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (3 dvd (\<Sum>x<nlen n. n div 10 ^ x mod 10 * 10 ^ x)) = (3 dvd (\<Sum>x<nlen n. n div 10 ^ x mod 10))
[PROOF STEP]
by (rule three_div_general)
[PROOF STATE]
proof (state)
this:
(3 dvd (\<Sum>x<nlen n. n div 10 ^ x mod 10 * 10 ^ x)) = (3 dvd (\<Sum>x<nlen n. n div 10 ^ x mod 10))
goal (1 subgoal):
1. (3 dvd n) = (3 dvd (\<Sum>x<nlen n. n div 10 ^ x mod 10))
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
n = (\<Sum>x<nlen n. n div 10 ^ x mod 10 * 10 ^ x)
(3 dvd (\<Sum>x<nlen n. n div 10 ^ x mod 10 * 10 ^ x)) = (3 dvd (\<Sum>x<nlen n. n div 10 ^ x mod 10))
[PROOF STEP]
show "3 dvd n = (3 dvd (\<Sum>x<nlen n. n div 10^x mod 10))"
[PROOF STATE]
proof (prove)
using this:
n = (\<Sum>x<nlen n. n div 10 ^ x mod 10 * 10 ^ x)
(3 dvd (\<Sum>x<nlen n. n div 10 ^ x mod 10 * 10 ^ x)) = (3 dvd (\<Sum>x<nlen n. n div 10 ^ x mod 10))
goal (1 subgoal):
1. (3 dvd n) = (3 dvd (\<Sum>x<nlen n. n div 10 ^ x mod 10))
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
(3 dvd n) = (3 dvd (\<Sum>x<nlen n. n div 10 ^ x mod 10))
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1122, "file": null, "length": 10}
|
function bob(stimulus::AbstractString)
stimulus = strip(stimulus)
if issilence(stimulus)
return "Fine. Be that way!"
elseif isshouting(stimulus)
return "Whoa, chill out!"
elseif isquestion(stimulus)
return "Sure."
else
return "Whatever."
end
end
issilence(stimulus::AbstractString) = isempty(stimulus)
isquestion(stimulus::AbstractString) = endswith(stimulus, '?')
function isshouting(stimulus::AbstractString)
all(isupper, stimulus) && return true
!any(isalpha, stimulus) && return false
for c in stimulus
# ignore all non-letter chars
if isalpha(c) && !isupper(c)
return false
end
end
return true
end
|
{"hexsha": "a38470e120712a2797ac60410a35a98b61dae1f2", "size": 720, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "exercises/bob/example.jl", "max_stars_repo_name": "miguelraz/julia-1", "max_stars_repo_head_hexsha": "ef059b088e967251b3180820ac4d2768f1b0ac85", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "exercises/bob/example.jl", "max_issues_repo_name": "miguelraz/julia-1", "max_issues_repo_head_hexsha": "ef059b088e967251b3180820ac4d2768f1b0ac85", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "exercises/bob/example.jl", "max_forks_repo_name": "miguelraz/julia-1", "max_forks_repo_head_hexsha": "ef059b088e967251b3180820ac4d2768f1b0ac85", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.2258064516, "max_line_length": 62, "alphanum_fraction": 0.65, "num_tokens": 189}
|
import requests
from astroquery.simbad import Simbad
import numpy as np
import pandas as pd
from astropy.table import QTable, Table, Column
from astropy import units as u
import urllib
import re
import bs4
import math
import matplotlib.pyplot as plt
def plot(star_name):
# Convert the names of stars to HIP numbers, and get HIP numbers.
if star_name.startswith('HIP'):
HIP = int(star_name.split('HIP')[1])
else:
result_table = Simbad.query_objectids(star_name)
line = list(filter(lambda x: 'HIP' in str(x), result_table))
HIP = int(line[0][0].split('HIP')[1])
try:
url = f'https://hipparcos-tools.cosmos.esa.int/cgi-bin/HIPcatalogueSearch.pl?hipepId={HIP}'
webpage = str(urllib.request.urlopen(url).read())
soup = bs4.BeautifulSoup(webpage,'html.parser')
text = soup.find(name='pre').get_text().lstrip("\\n").rstrip("\\r\\n'")
text = text.split('\\n',17)
label = [text[6],text[8],text[9]]
label_list = [x.split(':',1)[1].strip().split(' ')[0] for x in label]
label_list = [float(x) for x in label_list]
print('Median Magnitude(red line) (mag):',label_list[0])
print('5th percentile (max) (mag):',label_list[1])
print('95th percentile (min) (mag):',label_list[2])
text_list = text[17].split('\\r\\n')
data_list = [x.split('|') for x in text_list]
data_list_t = list(map(list, zip(*data_list)))
index_float = [0,1,2]
for i in index_float:
data_list_t[i] = [float(x) for x in list(data_list_t[i])]
text_list = text[17].split('\\r\\n')
data_list = [x.split('|') for x in text_list]
data_list_t = list(map(list, zip(*data_list)))
index_float = [0,1,2]
for i in index_float:
data_list_t[i] = [float(x) for x in list(data_list_t[i])]
plt.title(f'Epoch photometry data of HIP {HIP}')
plt.xlabel('Observation epoch (BJD - 2440000)')
plt.ylabel('Calibrated Hp (mag)')
plt.gca().invert_yaxis()
plt.axhline(y=label_list[0],color='red')
plt.scatter(data_list_t[0],data_list_t[1],s=10,color='blue')
plt.errorbar(data_list_t[0],data_list_t[1],yerr=data_list_t[2],fmt='none',ecolor='cyan')
except IndexError:
print(f'The data of HIP {HIP} cannot be found.')
|
{"hexsha": "e1d1d763bfc8803ba66a124f5c135582b64948ac", "size": 2362, "ext": "py", "lang": "Python", "max_stars_repo_path": "hipy/plot_lightcurve.py", "max_stars_repo_name": "Yifan-Xuan/hipy", "max_stars_repo_head_hexsha": "dd569b89d6d246ea8444182a7e0f8dc7d3381c4b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "hipy/plot_lightcurve.py", "max_issues_repo_name": "Yifan-Xuan/hipy", "max_issues_repo_head_hexsha": "dd569b89d6d246ea8444182a7e0f8dc7d3381c4b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hipy/plot_lightcurve.py", "max_forks_repo_name": "Yifan-Xuan/hipy", "max_forks_repo_head_hexsha": "dd569b89d6d246ea8444182a7e0f8dc7d3381c4b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.724137931, "max_line_length": 99, "alphanum_fraction": 0.6185436071, "include": true, "reason": "import numpy,from astropy", "num_tokens": 657}
|
from load_grid import LoadSudokuGrid
from visualize_results import Visualize
from sudoku_solver import SudokuSolver
import numpy as np
if __name__ == '__main__':
user_grid = LoadSudokuGrid().grid
if user_grid is not None:
solved_grid = SudokuSolver(np.array([[element for element in row] for row in user_grid])).solve_grid()
if solved_grid is not None:
Visualize(solved_grid, user_grid).show_solution()
|
{"hexsha": "7a9b676afb466a994b873f991ea30dfafe40b10f", "size": 433, "ext": "py", "lang": "Python", "max_stars_repo_path": "RUN.py", "max_stars_repo_name": "MAXIORBOY/SudokuSolver", "max_stars_repo_head_hexsha": "9046feda159ff32613e9c02f5f6f21a57f09ef23", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "RUN.py", "max_issues_repo_name": "MAXIORBOY/SudokuSolver", "max_issues_repo_head_hexsha": "9046feda159ff32613e9c02f5f6f21a57f09ef23", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-09-27T18:09:38.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-27T18:16:58.000Z", "max_forks_repo_path": "RUN.py", "max_forks_repo_name": "MAXIORBOY/SudokuSolver", "max_forks_repo_head_hexsha": "9046feda159ff32613e9c02f5f6f21a57f09ef23", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.3076923077, "max_line_length": 110, "alphanum_fraction": 0.7551963048, "include": true, "reason": "import numpy", "num_tokens": 100}
|
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2004-2011. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
// See http://www.boost.org/libs/interprocess for documentation.
//
//////////////////////////////////////////////////////////////////////////////
// Copyright (C) 2001-2003
// William E. Kempf
//
// Permission to use, copy, modify, distribute and sell this software
// and its documentation for any purpose is hereby granted without fee,
// provided that the above copyright notice appear in all copies and
// that both that copyright notice and this permission notice appear
// in supporting documentation. William E. Kempf makes no representations
// about the suitability of this software for any purpose.
// It is provided "as is" without express or implied warranty.
#ifndef BOOST_INTERPROCESS_TEST_MUTEX_TEST_TEMPLATE_HEADER
#define BOOST_INTERPROCESS_TEST_MUTEX_TEST_TEMPLATE_HEADER
#include <boost/interprocess/detail/config_begin.hpp>
#include <boost/interprocess/exceptions.hpp>
#include "boost_interprocess_check.hpp"
#include "util.hpp"
#include <boost/thread/thread.hpp>
#include <boost/interprocess/sync/scoped_lock.hpp>
#include <boost/date_time/posix_time/posix_time_types.hpp>
#include <iostream>
namespace boost { namespace interprocess { namespace test {
template <typename M>
struct test_lock
{
typedef M mutex_type;
typedef boost::interprocess::scoped_lock<mutex_type> lock_type;
void operator()()
{
mutex_type interprocess_mutex;
// Test the lock's constructors.
{
lock_type lock(interprocess_mutex, boost::interprocess::defer_lock);
BOOST_INTERPROCES_CHECK(!lock);
}
lock_type lock(interprocess_mutex);
BOOST_INTERPROCES_CHECK(lock ? true : false);
// Test the lock and unlock methods.
lock.unlock();
BOOST_INTERPROCES_CHECK(!lock);
lock.lock();
BOOST_INTERPROCES_CHECK(lock ? true : false);
}
};
template <typename M>
struct test_trylock
{
typedef M mutex_type;
typedef boost::interprocess::scoped_lock<mutex_type> try_to_lock_type;
void operator()()
{
mutex_type interprocess_mutex;
// Test the lock's constructors.
{
try_to_lock_type lock(interprocess_mutex, boost::interprocess::try_to_lock);
BOOST_INTERPROCES_CHECK(lock ? true : false);
}
{
try_to_lock_type lock(interprocess_mutex, boost::interprocess::defer_lock);
BOOST_INTERPROCES_CHECK(!lock);
}
try_to_lock_type lock(interprocess_mutex);
BOOST_INTERPROCES_CHECK(lock ? true : false);
// Test the lock, unlock and trylock methods.
lock.unlock();
BOOST_INTERPROCES_CHECK(!lock);
lock.lock();
BOOST_INTERPROCES_CHECK(lock ? true : false);
lock.unlock();
BOOST_INTERPROCES_CHECK(!lock);
BOOST_INTERPROCES_CHECK(lock.try_lock());
BOOST_INTERPROCES_CHECK(lock ? true : false);
}
};
template <typename M>
struct test_timedlock
{
typedef M mutex_type;
typedef boost::interprocess::scoped_lock<mutex_type> timed_lock_type;
void operator()()
{
mutex_type interprocess_mutex;
// Test the lock's constructors.
{
// Construct and initialize an ptime for a fast time out.
boost::posix_time::ptime pt = delay(1*BaseSeconds, 0);
timed_lock_type lock(interprocess_mutex, pt);
BOOST_INTERPROCES_CHECK(lock ? true : false);
}
{
timed_lock_type lock(interprocess_mutex, boost::interprocess::defer_lock);
BOOST_INTERPROCES_CHECK(!lock);
}
timed_lock_type lock(interprocess_mutex);
BOOST_INTERPROCES_CHECK(lock ? true : false);
// Test the lock, unlock and timedlock methods.
lock.unlock();
BOOST_INTERPROCES_CHECK(!lock);
lock.lock();
BOOST_INTERPROCES_CHECK(lock ? true : false);
lock.unlock();
BOOST_INTERPROCES_CHECK(!lock);
boost::posix_time::ptime pt = delay(3*BaseSeconds, 0);
BOOST_INTERPROCES_CHECK(lock.timed_lock(pt));
BOOST_INTERPROCES_CHECK(lock ? true : false);
}
};
template <typename M>
struct test_recursive_lock
{
typedef M mutex_type;
typedef boost::interprocess::scoped_lock<mutex_type> lock_type;
void operator()()
{
mutex_type mx;
{
lock_type lock1(mx);
lock_type lock2(mx);
}
{
lock_type lock1(mx, defer_lock);
lock_type lock2(mx, defer_lock);
}
{
lock_type lock1(mx, try_to_lock);
lock_type lock2(mx, try_to_lock);
}
{
//This should always lock
boost::posix_time::ptime pt = delay(2*BaseSeconds);
lock_type lock1(mx, pt);
lock_type lock2(mx, pt);
}
}
};
// plain_exclusive exercises the "infinite" lock for each
// read_write_mutex type.
template<typename M>
void lock_and_sleep(void *arg, M &sm)
{
data<M> *pdata = static_cast<data<M>*>(arg);
boost::interprocess::scoped_lock<M> l(sm);
if(pdata->m_secs){
boost::thread::sleep(xsecs(pdata->m_secs));
}
else{
boost::thread::sleep(xsecs(2*BaseSeconds));
}
++shared_val;
pdata->m_value = shared_val;
}
template<typename M>
void lock_and_catch_errors(void *arg, M &sm)
{
data<M> *pdata = static_cast<data<M>*>(arg);
try
{
boost::interprocess::scoped_lock<M> l(sm);
if(pdata->m_secs){
boost::thread::sleep(xsecs(pdata->m_secs));
}
else{
boost::thread::sleep(xsecs(2*BaseSeconds));
}
++shared_val;
pdata->m_value = shared_val;
}
catch(interprocess_exception const & e)
{
pdata->m_error = e.get_error_code();
}
}
template<typename M>
void try_lock_and_sleep(void *arg, M &sm)
{
data<M> *pdata = static_cast<data<M>*>(arg);
boost::interprocess::scoped_lock<M> l(sm, boost::interprocess::defer_lock);
if (l.try_lock()){
boost::thread::sleep(xsecs(2*BaseSeconds));
++shared_val;
pdata->m_value = shared_val;
}
}
template<typename M>
void timed_lock_and_sleep(void *arg, M &sm)
{
data<M> *pdata = static_cast<data<M>*>(arg);
boost::posix_time::ptime pt(delay(pdata->m_secs));
boost::interprocess::scoped_lock<M>
l (sm, boost::interprocess::defer_lock);
if (l.timed_lock(pt)){
boost::thread::sleep(xsecs(2*BaseSeconds));
++shared_val;
pdata->m_value = shared_val;
}
}
template<bool SameObject, typename M>
void test_mutex_lock()
{
shared_val = 0;
M m1, m2;
M *pm1, *pm2;
if(SameObject){
pm1 = pm2 = &m1;
}
else{
pm1 = &m1;
pm2 = &m2;
}
data<M> d1(1);
data<M> d2(2);
// Locker one launches, holds the lock for 2*BaseSeconds seconds.
boost::thread tm1(thread_adapter<M>(&lock_and_sleep, &d1, *pm1));
//Wait 1*BaseSeconds
boost::thread::sleep(xsecs(1*BaseSeconds));
// Locker two launches, but it won't hold the lock for 2*BaseSeconds seconds.
boost::thread tm2(thread_adapter<M>(&lock_and_sleep, &d2, *pm2));
//Wait completion
tm1.join();
boost::thread::sleep(xsecs(1*BaseSeconds));
tm2.join();
BOOST_INTERPROCES_CHECK(d1.m_value == 1);
BOOST_INTERPROCES_CHECK(d2.m_value == 2);
}
template<bool SameObject, typename M>
void test_mutex_lock_timeout()
{
shared_val = 0;
M m1, m2;
M *pm1, *pm2;
if(SameObject){
pm1 = pm2 = &m1;
}
else{
pm1 = &m1;
pm2 = &m2;
}
int wait_time_s = BOOST_INTERPROCESS_TIMEOUT_WHEN_LOCKING_DURATION_MS / 1000;
if (wait_time_s == 0 )
wait_time_s = 1;
data<M> d1(1, wait_time_s * 3);
data<M> d2(2, wait_time_s * 2);
// Locker one launches, and holds the lock for wait_time_s * 2 seconds.
boost::thread tm1(thread_adapter<M>(&lock_and_sleep, &d1, *pm1));
//Wait 1*BaseSeconds
boost::thread::sleep(xsecs(wait_time_s));
// Locker two launches, and attempts to hold the lock for wait_time_s * 2 seconds.
boost::thread tm2(thread_adapter<M>(&lock_and_catch_errors, &d2, *pm2));
//Wait completion
tm1.join();
boost::thread::sleep(xsecs(1*BaseSeconds));
tm2.join();
BOOST_INTERPROCES_CHECK(d1.m_value == 1);
BOOST_INTERPROCES_CHECK(d2.m_value == -1);
BOOST_INTERPROCES_CHECK(d1.m_error == no_error);
BOOST_INTERPROCES_CHECK(d2.m_error == boost::interprocess::timeout_when_locking_error);
}
template<bool SameObject, typename M>
void test_mutex_try_lock()
{
shared_val = 0;
M m1, m2;
M *pm1, *pm2;
if(SameObject){
pm1 = pm2 = &m1;
}
else{
pm1 = &m1;
pm2 = &m2;
}
data<M> d1(1);
data<M> d2(2);
// Locker one launches, holds the lock for 2*BaseSeconds seconds.
boost::thread tm1(thread_adapter<M>(&try_lock_and_sleep, &d1, *pm1));
//Wait 1*BaseSeconds
boost::thread::sleep(xsecs(1*BaseSeconds));
// Locker two launches, but it should fail acquiring the lock
boost::thread tm2(thread_adapter<M>(&try_lock_and_sleep, &d2, *pm2));
//Wait completion
tm1.join();
tm2.join();
//Only the first should succeed locking
BOOST_INTERPROCES_CHECK(d1.m_value == 1);
BOOST_INTERPROCES_CHECK(d2.m_value == -1);
}
template<bool SameObject, typename M>
void test_mutex_timed_lock()
{
shared_val = 0;
M m1, m2;
M *pm1, *pm2;
if(SameObject){
pm1 = pm2 = &m1;
}
else{
pm1 = &m1;
pm2 = &m2;
}
data<M> d1(1, 2*BaseSeconds);
data<M> d2(2, 2*BaseSeconds);
// Locker one launches, holds the lock for 2*BaseSeconds seconds.
boost::thread tm1(thread_adapter<M>(&timed_lock_and_sleep, &d1, *pm1));
//Wait 1*BaseSeconds
boost::thread::sleep(xsecs(1*BaseSeconds));
// Locker two launches, holds the lock for 2*BaseSeconds seconds.
boost::thread tm2(thread_adapter<M>(&timed_lock_and_sleep, &d2, *pm2));
//Wait completion
tm1.join();
tm2.join();
//Both should succeed locking
BOOST_INTERPROCES_CHECK(d1.m_value == 1);
BOOST_INTERPROCES_CHECK(d2.m_value == 2);
}
template <typename M>
inline void test_all_lock()
{
//Now generic interprocess_mutex tests
std::cout << "test_lock<" << typeid(M).name() << ">" << std::endl;
test_lock<M>()();
std::cout << "test_trylock<" << typeid(M).name() << ">" << std::endl;
test_trylock<M>()();
std::cout << "test_timedlock<" << typeid(M).name() << ">" << std::endl;
test_timedlock<M>()();
}
template <typename M>
inline void test_all_recursive_lock()
{
//Now generic interprocess_mutex tests
std::cout << "test_recursive_lock<" << typeid(M).name() << ">" << std::endl;
test_recursive_lock<M>()();
}
template<bool SameObject, typename M>
void test_all_mutex()
{
std::cout << "test_mutex_lock<" << typeid(M).name() << ">" << std::endl;
test_mutex_lock<SameObject, M>();
std::cout << "test_mutex_try_lock<" << typeid(M).name() << ">" << std::endl;
test_mutex_try_lock<SameObject, M>();
std::cout << "test_mutex_timed_lock<" << typeid(M).name() << ">" << std::endl;
test_mutex_timed_lock<SameObject, M>();
}
}}} //namespace boost { namespace interprocess { namespace test {
#include <boost/interprocess/detail/config_end.hpp>
#endif //BOOST_INTERPROCESS_TEST_MUTEX_TEST_TEMPLATE_HEADER
|
{"hexsha": "2870c4d55062767457b34e26787df7770573f157", "size": 11355, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "libs/interprocess/test/mutex_test_template.hpp", "max_stars_repo_name": "AishwaryaDoosa/Boost1.49", "max_stars_repo_head_hexsha": "67bdb3b36d72dec7414a62f3b050162e608ea266", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "libs/interprocess/test/mutex_test_template.hpp", "max_issues_repo_name": "AishwaryaDoosa/Boost1.49", "max_issues_repo_head_hexsha": "67bdb3b36d72dec7414a62f3b050162e608ea266", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "libs/interprocess/test/mutex_test_template.hpp", "max_forks_repo_name": "AishwaryaDoosa/Boost1.49", "max_forks_repo_head_hexsha": "67bdb3b36d72dec7414a62f3b050162e608ea266", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.9714964371, "max_line_length": 90, "alphanum_fraction": 0.6511668868, "num_tokens": 3009}
|
#' Get document revisions.
#'
#' @export
#' @template all
#' @template return
#' @param dbname Database name
#' @param docid Document ID
#' @param simplify (logical) Simplify to character vector of revision ids.
#' If \code{FALSE}, gives back availabilit info too. Default: \code{TRUE}
#' @examples \dontrun{
#' (x <- Cushion$new())
#'
#' if ("sofa" %in% db_list(x)) {
#' db_delete(x, dbname = "sofadb")
#' }
#' db_create(x, dbname = "sofadb")
#'
#' doc1 <- '{"name": "drink", "beer": "IPA", "score": 5}'
#' doc_create(x, dbname="sofadb", doc1, docid="abeer")
#' doc_create(x, dbname="sofadb", doc1, docid="morebeer", as='json')
#'
#' db_revisions(x, dbname="sofadb", docid="abeer")
#' db_revisions(x, dbname="sofadb", docid="abeer", simplify=FALSE)
#' db_revisions(x, dbname="sofadb", docid="abeer", as='json')
#' db_revisions(x, dbname="sofadb", docid="abeer", simplify=FALSE, as='json')
#' }
db_revisions <- function(cushion, dbname, docid, simplify=TRUE, as='list', ...) {
check_cushion(cushion)
call_ <- sprintf("%s/%s/%s", cushion$make_url(), dbname, docid)
tmp <- sofa_GET(call_, as = "list", query = list(revs_info = 'true'),
cushion$get_headers(), ...)
revs <- if (simplify) vapply(tmp$`_revs_info`, "[[", "", "rev") else tmp$`_revs_info`
if (as == 'json') jsonlite::toJSON(revs) else revs
}
|
{"hexsha": "de194b573b268f512512d6efc99983e410603067", "size": 1330, "ext": "r", "lang": "R", "max_stars_repo_path": "R/revisions.r", "max_stars_repo_name": "FTwex/sofa-cloudant", "max_stars_repo_head_hexsha": "097577be2446865e17d41bcb015141eed19c7139", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "R/revisions.r", "max_issues_repo_name": "FTwex/sofa-cloudant", "max_issues_repo_head_hexsha": "097577be2446865e17d41bcb015141eed19c7139", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "R/revisions.r", "max_forks_repo_name": "FTwex/sofa-cloudant", "max_forks_repo_head_hexsha": "097577be2446865e17d41bcb015141eed19c7139", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.0, "max_line_length": 87, "alphanum_fraction": 0.6345864662, "num_tokens": 431}
|
# -*- coding: utf-8 -*-
from jplephem.spk import SPK
import jdcal
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
mu=398600.4418
tof=30.*60.
x0 = np.array([6045,3490,0])
vC=np.array([-2.457,6.618,2.533])
x1=np.array([12214.839, 10249.467, 2000])
v1=np.array([-3.448, .924, 0])
vf=[.52, .8414, .1451]
r2=12282.
#v2=xf[3:6]
C= 7080.
S=(C+np.linalg.norm(x0[0:3])+(r2))/2
tMin=np.sqrt(2)*np.sqrt((S**3.)/mu)*(1-(((S-C)/S)**(3./2.)))/3.
aMin=S/2.0
aMax=S*2.0
alphaMax= 2.*np.arcsin(np.sqrt(S/(2.0*aMin)))
betaMax = 2.*np.arcsin(np.sqrt((S-C)/(2.0*aMin)))
tMax=np.sqrt((aMin**3)/mu)*(alphaMax-betaMax-(np.sin(alphaMax)-np.sin(betaMax)))
tol=60
p=0
while(error>tol and p<maxItt):
a= (aMin+aMax)/2.0
alpha= 2.*np.arcsin(np.sqrt(S/(2.*a)))
beta=2.*np.arcsin(np.sqrt((S-C)/(2.*a)))
deltaT=np.sqrt((a**3)/mu)*(alpha-beta-(np.sin(alpha)-np.sin(beta)))
error=tof-deltaT
if(error<0):
aMin=a
#aMax=a
else:
aMax=a
#aMin=a
p+=1
#p+=-1
error=np.abs(error)
#print(error)
if(p>=maxItt):
print('Stuck')
z=1
else:
u1=x0/np.linalg.norm(x0) #base vector from IC pos
u2= #unit vec from rondevous pos
uc= #(rF-r0)/c
A=np.sqrt(mu/(4.0*a))*(1/np.tan(alpha/2))
B=np.sqrt(mu/(4.0*a))*(1/np.tan(beta/2))
v0=((B+A)*vC)+((B-A)*v1)
vf=((B+A)*vC)-((B-A)*v2)
deltaVec=v0-vf
deltaV=np.linalg.norm(deltaVec)
deltaVMat[c,d]=deltaV
|
{"hexsha": "f5619256691888f18e8190bd8416e381aa81a12d", "size": 1521, "ext": "py", "lang": "Python", "max_stars_repo_path": "Porkchop Plot/lambertTest.py", "max_stars_repo_name": "gereshes/Blog-Companion", "max_stars_repo_head_hexsha": "2598da9cbd5de1ac4aadee90a165c9b833496b6b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2019-07-05T04:06:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-07T21:09:27.000Z", "max_issues_repo_path": "Porkchop Plot/lambertTest.py", "max_issues_repo_name": "gereshes/Blog-Companion", "max_issues_repo_head_hexsha": "2598da9cbd5de1ac4aadee90a165c9b833496b6b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Porkchop Plot/lambertTest.py", "max_forks_repo_name": "gereshes/Blog-Companion", "max_forks_repo_head_hexsha": "2598da9cbd5de1ac4aadee90a165c9b833496b6b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-12-10T15:01:27.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-09T16:14:37.000Z", "avg_line_length": 26.224137931, "max_line_length": 80, "alphanum_fraction": 0.5641025641, "include": true, "reason": "import numpy", "num_tokens": 621}
|
\documentclass{article}
\usepackage[margin=0.75cm]{geometry}
\usepackage{hyperref}
\usepackage{graphicx}
\usepackage{float}
\usepackage{caption}
\usepackage{textcomp}
\usepackage{xcolor}
\usepackage[lofdepth,lotdepth]{subfig}
\title{OrpailleCC: a Library for Data Stream Analysis on Embedded Systems}
\author{Martin Khannouz, Bo Li, Tristan Glatard}
\begin{document}
\maketitle
\section*{Summary}
The Internet of Things could benefit in several ways from mining data
streams on connected objects rather than in the cloud. In particular,
limiting network communication with cloud services would improve user
privacy and reduce energy consumption in connected devices. Besides,
applications could leverage the computing power of connected objects
for improved scalability.
OrpailleCC provides a consistent collection of data stream algorithms
developed to be deployed on embedded devices. Its main objective is to
support research on data stream mining for connected objects,
by facilitating the comparison and benchmarking of algorithms in a
consistent framework. It also enables programmers of embedded systems to use
out-of-the-box algorithms with an efficient implementation.
To the best of our knowledge, existing libraries of stream mining
algorithms cannot be used on connected objects due to their resource consumption or
assumptions about the target system (e.g., existence of a `malloc` function).
Nevertheless, for more powerful devices such as desktop computers, Java
frameworks such as Massive Online Analysis~\cite{moa} and WEKA~\cite{weka} achieve
similar goals as OrpailleCC.
OrpailleCC targets the classes of problems discussed in~\cite{kejariwal2015},
in particular Sampling and
Filtering. Sampling covers algorithms that
build a representative sample of a
data stream. OrpailleCC implements the reservoir
sampling~\cite{reservoir_sampling} and one variant, the chained reservoir
sampling~\cite{chained_reservoir_sampling}. Filtering algorithms
remove the stream elements that do not belong to a specific set.
OrpailleCC implements the Bloom Filter~\cite{bloom} and the Cuckoo
Filter~\cite{cuckoo_filter}, two well-tested algorithms that address this
problem.
In addition to Sampling and Filtering, OrpailleCC
provides algorithms for stream Classification and for stream Compression. The
Micro-Cluster Nearest Neighbour algorithm~\cite{mc-nn} is based on the
k-nearest neighbor to classify a data stream while detecting concept
drifts. The Lightweight Temporal Compression~\cite{ltc} and a
multi-dimensional variant~\cite{ltcd} are two methods to compress data
streams.
All implementations rely as little as possible on functions provided by the
operating system, for instance `malloc`, since such functions are typically
not available on embedded systems. When algorithms cannot be
implemented without such functions, the library uses template parameters to
request the required functions from the user. All algorithms are
developed for FreeRTOS~\cite{freertos}, a free real-time operating
system used in embedded systems, but they should work on any
micro-controller with a C++11 compiler. The C++11 programming language
was chosen for its performance as well as its popularity in the
field. All methods are tested and tests are run through Travis-CI.
In the future, we plan to extend the library with other reliable
algorithms to widely cover as many common problems as possible. We also plan to
use it as a basis to design new stream classification methods.
External contributions are, of course, most welcome.
\bibliographystyle{plain}
\bibliography{paper}
\end{document}
|
{"hexsha": "4b7230b7087c978fa878d83bbd4a44f381ea3970", "size": 3629, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "paper/paper.tex", "max_stars_repo_name": "codacy-badger/OrpailleCC", "max_stars_repo_head_hexsha": "1b822d39160c359ee9576b8a2d0d8abc2e2785eb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "paper/paper.tex", "max_issues_repo_name": "codacy-badger/OrpailleCC", "max_issues_repo_head_hexsha": "1b822d39160c359ee9576b8a2d0d8abc2e2785eb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "paper/paper.tex", "max_forks_repo_name": "codacy-badger/OrpailleCC", "max_forks_repo_head_hexsha": "1b822d39160c359ee9576b8a2d0d8abc2e2785eb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.1298701299, "max_line_length": 83, "alphanum_fraction": 0.8159272527, "num_tokens": 800}
|
#include "geometrycentral/arap.h"
#include <Eigen/SparseCholesky>
#include <Eigen/SVD>
#include <Eigen/Dense>
using namespace geometrycentral;
ARAP::ARAP(HalfedgeMesh* m, Geometry<Euclidean>* g) : mesh(m), geom(g), vertexIndices(mesh), isoTriangleParam(mesh), uvCoords(mesh) {
vertexIndices = mesh->getVertexIndices();
}
Eigen::SparseMatrix<std::complex<double>> ARAP::createLaplaceMatrix() {
size_t n = mesh->nVertices();
Eigen::SparseMatrix<std::complex<double>> A(n,n);
std::vector<Eigen::Triplet<std::complex<double>>> triplets;
for (VertexPtr v1 : mesh->vertices()) {
int index1 = vertexIndices[v1];
double sum = __DBL_EPSILON__;
// add neighbor weights
for (HalfedgePtr heOut : v1.outgoingHalfedges()) {
VertexPtr v2 = heOut.twin().vertex();
int index2 = vertexIndices[v2];
double weight = (geom->cotan(heOut) + geom->cotan(heOut.twin())) / 2.0;
sum += weight;
triplets.push_back(Eigen::Triplet<std::complex<double>>(index1, index2, std::complex<double>(-weight,0)));
}
// add diagonal weight
triplets.push_back(Eigen::Triplet<std::complex<double>>(index1, index1, std::complex<double>(sum,0)));
}
A.setFromTriplets(triplets.begin(), triplets.end());
return A;
}
void ARAP::computeIsoTriangleParam() {
for (FacePtr f : mesh->faces()) {
// Gather elements
HalfedgePtr he = f.halfedge();
double l_ab = geom->length(he.edge());
double l_ac = geom->length(he.prev().edge());
double theta_a = geom->angle(he.next()); // radians
// Place first vertex at (0,0)
isoTriangleParam[he] = Vector2{0,0};
// Place second vertex at (|ab|,0)
isoTriangleParam[he.next()] = Vector2{l_ab,0};
// Place third vertex at (|ac|,0) rotated by theta_a CCW
isoTriangleParam[he.prev()] = Vector2{cos(theta_a) * l_ac, sin(theta_a) * l_ac};
}
}
FaceData<Eigen::Matrix2d> ARAP::computeLRotations(VertexData<Vector2> const &u) {
FaceData<Eigen::Matrix2d> L(mesh);
for (FacePtr f : mesh->faces()) {
// Gather elements
HalfedgePtr he1 = f.halfedge();
HalfedgePtr he2 = he1.next();
HalfedgePtr he3 = he1.prev();
std::vector<Vector2> ut = { u[he1.vertex()], u[he2.vertex()], u[he3.vertex()] };
std::vector<Vector2> xt = { isoTriangleParam[he1], isoTriangleParam[he2], isoTriangleParam[he3] };
std::vector<double> thetat = { geom->cotan(he1), geom->cotan(he2), geom->cotan(he3) };
// Compute St matrix
Eigen::Matrix2d St = Eigen::Matrix2d::Zero();
for (int i = 0; i < 3; i++) {
Vector2 ui = ut[i] - ut[(i+1) % 3];
Vector2 xi = xt[i] - xt[(i+1) % 3];
St(0,0) += thetat[i] * ui.x * xi.x;
St(0,1) += thetat[i] * ui.x * xi.y;
St(1,0) += thetat[i] * ui.y * xi.x;
St(1,1) += thetat[i] * ui.y * xi.y;
}
// Perform SVD decomposition, where L_t = UV^T
Eigen::JacobiSVD<Eigen::Matrix2d> svd( St, Eigen::ComputeFullU | Eigen::ComputeFullV );
Eigen::Matrix2d U = svd.matrixU();
Eigen::Matrix2d V = svd.matrixV();
Eigen::Matrix2d UVT = U * V.transpose();
if (UVT.determinant() < 0) {
V.col(1) *= -1;
UVT = U * V.transpose();
}
L[f] = UVT;
}
return L;
}
Eigen::MatrixXcd ARAP::computebVector(FaceData<Eigen::Matrix2d> const &L) {
Eigen::MatrixXcd b = Eigen::MatrixXcd::Zero(mesh->nVertices(),1);
for (VertexPtr v : mesh->vertices()) {
size_t index = vertexIndices[v];
for (HalfedgePtr he_ij : v.outgoingHalfedges()) {
HalfedgePtr he_ji = he_ij.twin();
// first triangle term
if (he_ij.isReal()) {
Vector2 xi = isoTriangleParam[he_ij];
Vector2 xj = isoTriangleParam[he_ij.next()];
double cotan_ij = geom->cotan(he_ij);
Eigen::Matrix2d Lt_ij = L[he_ij.face()];
std::complex<double> sub((xi-xj).x, (xi-xj).y);
std::complex<double> rot(Lt_ij(0,0), Lt_ij(1,0));
b(index,0) += cotan_ij * rot * sub / 2.0;
}
// second triangle term
if (he_ji.isReal()) {
Vector2 xi = isoTriangleParam[he_ji.next()];
Vector2 xj = isoTriangleParam[he_ji];
double cotan_ji = geom->cotan(he_ji);
Eigen::Matrix2d Lt_ji = L[he_ji.face()];
std::complex<double> sub((xi-xj).x, (xi-xj).y);
std::complex<double> rot(Lt_ji(0,0), Lt_ji(1,0));
b(index,0) += cotan_ji * rot * sub / 2.0;
}
}
}
return b;
}
void ARAP::computeARAP() {
// Build Laplace Matrix A (n x n) and factorize
Eigen::SparseMatrix<std::complex<double>> A = createLaplaceMatrix();
Eigen::SimplicialLDLT<Eigen::SparseMatrix<std::complex<double>>> solver;
solver.compute(A);
// Compute isometric parameterization for each triangle t
computeIsoTriangleParam();
// Initial parameterization u (using SCP)
SpectralConformal s = SpectralConformal(mesh,geom);
VertexData<Vector2> u = s.computeSpectralConformal();
// Repeat the following until convergence:
for (int i = 0; i < 10; i++) {
// Fix the mapping u (n x 1) and solve for L_t (2x2) for each triangle t
FaceData<Eigen::Matrix2d> L = computeLRotations(u);
// Compute b (n x 1) using L
Eigen::MatrixXcd b = computebVector(L);
// Solve Au = b
Eigen::MatrixXcd u_new = solver.solve(b);
// Update u
for (VertexPtr v : mesh->vertices()) {
std::complex<double> uv = u_new(vertexIndices[v],0);
u[v] = Vector2{uv.real(), uv.imag()};
}
std::cout << "finished iteration: " << i << std::endl;
}
// normalize
uvCoords = u;
normalize();
// write output obj file
std::ofstream outfile ("ARAP.obj");
writeToFile(outfile);
outfile.close();
std::cout<<"Done ARAP!"<<std::endl;
}
void ARAP::writeToFile(std::ofstream &outfile) {
// write vertices
for (VertexPtr v : mesh->vertices()) {
outfile << "v " << geom->position(v).x << " " << geom->position(v).y << " " << geom->position(v).z << std::endl;
}
// write uvs
for (VertexPtr v : mesh->vertices()) {
outfile << "vt " << uvCoords[v].x << " " << uvCoords[v].y << std::endl;
}
// write indices
VertexData<size_t> index = mesh->getVertexIndices();
for (FacePtr f : mesh->faces()) {
HalfedgePtr he = f.halfedge();
outfile << "f";
do {
VertexPtr v = he.vertex();
outfile << " " << index[v] + 1 << "/" << index[v] + 1;
he = he.next();
} while (he != f.halfedge());
outfile << std::endl;
}
outfile.close();
}
void ARAP::normalize() {
// compute center of mass
Vector2 cm = {0,0};
for (VertexPtr v : mesh->vertices()) {
Vector2 uv = uvCoords[v];
cm += uv;
}
cm /= mesh->nVertices();
double r = 0;
for (VertexPtr v : mesh->vertices()) {
Vector2 &uv = uvCoords[v];
uv -= cm;
r = std::max(r, norm(uv));
}
for (VertexPtr v : mesh->vertices()) {
Vector2 &uv = uvCoords[v];
uv /= r;
}
}
|
{"hexsha": "1b8f3e544e71a595a1c9784891a9bffb5f94b8d6", "size": 7499, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/arap.cpp", "max_stars_repo_name": "connorzl/geometry-central", "max_stars_repo_head_hexsha": "99114ffaf3efb58c912f94402dd0426cbb17d3f1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/arap.cpp", "max_issues_repo_name": "connorzl/geometry-central", "max_issues_repo_head_hexsha": "99114ffaf3efb58c912f94402dd0426cbb17d3f1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/arap.cpp", "max_forks_repo_name": "connorzl/geometry-central", "max_forks_repo_head_hexsha": "99114ffaf3efb58c912f94402dd0426cbb17d3f1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4776785714, "max_line_length": 133, "alphanum_fraction": 0.5519402587, "num_tokens": 2148}
|
using .SnakeGames
function SnakeGameEnv(; action_style = MINIMAL_ACTION_SET, kw...)
game = SnakeGame(; kw...)
n_snakes = length(game.snakes)
num_agent_style = n_snakes == 1 ? SINGLE_AGENT : MultiAgent{n_snakes}()
SnakeGameEnv{action_style,num_agent_style,typeof(game)}(
game,
map(length, game.snakes),
Vector{CartesianIndex{2}}(undef, length(game.snakes)),
false,
)
end
RLBase.ActionStyle(env::SnakeGameEnv{A}) where {A} = A
RLBase.NumAgentStyle(env::SnakeGameEnv{<:Any,N}) where {N} = N
RLBase.DynamicStyle(env::SnakeGameEnv{<:Any,SINGLE_AGENT}) = SEQUENTIAL
RLBase.DynamicStyle(env::SnakeGameEnv{<:Any,<:MultiAgent}) = SIMULTANEOUS
const SNAKE_GAME_ACTIONS = (
CartesianIndex(-1, 0),
CartesianIndex(1, 0),
CartesianIndex(0, 1),
CartesianIndex(0, -1),
)
function (env::SnakeGameEnv{A})(actions::Vector{CartesianIndex{2}}) where {A}
if A === MINIMAL_ACTION_SET
# avoid turn back
actions = [
a_new == -a_old ? a_old : a_new for
(a_new, a_old) in zip(actions, env.latest_actions)
]
end
env.latest_actions .= actions
map!(length, env.latest_snakes_length, env.game.snakes)
env.is_terminated = !env.game(actions)
end
(env::SnakeGameEnv)(action::Int) = env([SNAKE_GAME_ACTIONS[action]])
(env::SnakeGameEnv)(actions::Vector{Int}) = env(map(a -> SNAKE_GAME_ACTIONS[a], actions))
RLBase.action_space(env::SnakeGameEnv) = 1:4
RLBase.state(env::SnakeGameEnv) = env.game.board
RLBase.state_space(env::SnakeGameEnv) = Space(fill(false..true, size(env.game.board)))
RLBase.reward(env::SnakeGameEnv{<:Any,SINGLE_AGENT}) =
length(env.game.snakes[]) - env.latest_snakes_length[]
RLBase.reward(env::SnakeGameEnv) = length.(env.game.snakes) .- env.latest_snakes_length
RLBase.is_terminated(env::SnakeGameEnv) = env.is_terminated
RLBase.legal_action_space(env::SnakeGameEnv{FULL_ACTION_SET,SINGLE_AGENT}) =
findall(!=(-env.latest_actions[]), SNAKE_GAME_ACTIONS)
RLBase.legal_action_space(env::SnakeGameEnv{FULL_ACTION_SET}) =
[findall(!=(-a), SNAKE_GAME_ACTIONS) for a in env.latest_actions]
RLBase.legal_action_space_mask(env::SnakeGameEnv{FULL_ACTION_SET,SINGLE_AGENT}) =
[a != -env.latest_actions[] for a in SNAKE_GAME_ACTIONS]
RLBase.legal_action_space_mask(env::SnakeGameEnv{FULL_ACTION_SET}) =
[[x != -a for x in SNAKE_GAME_ACTIONS] for a in env.latest_actions]
function RLBase.reset!(env::SnakeGameEnv)
SnakeGames.reset!(env.game)
env.is_terminated = false
fill!(env.latest_actions, CartesianIndex(0, 0))
map!(length, env.latest_snakes_length, env.game.snakes)
end
Base.display(io::IO, m::MIME, env::SnakeGameEnv) = display(io, m, env.game)
|
{"hexsha": "df3a9e87f98266945bb87064756db04df377043a", "size": 2712, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/ReinforcementLearningEnvironments/src/environments/3rd_party/snake.jl", "max_stars_repo_name": "LaarsOman/ReinforcementLearning.jl", "max_stars_repo_head_hexsha": "b04e3f192e71418dbca496331ada44f65b2822d9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 352, "max_stars_repo_stars_event_min_datetime": "2018-08-30T18:41:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T08:41:22.000Z", "max_issues_repo_path": "src/ReinforcementLearningEnvironments/src/environments/3rd_party/snake.jl", "max_issues_repo_name": "LaarsOman/ReinforcementLearning.jl", "max_issues_repo_head_hexsha": "b04e3f192e71418dbca496331ada44f65b2822d9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 325, "max_issues_repo_issues_event_min_datetime": "2018-08-24T12:41:17.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T14:29:23.000Z", "max_forks_repo_path": "src/ReinforcementLearningEnvironments/src/environments/3rd_party/snake.jl", "max_forks_repo_name": "LaarsOman/ReinforcementLearning.jl", "max_forks_repo_head_hexsha": "b04e3f192e71418dbca496331ada44f65b2822d9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 62, "max_forks_repo_forks_event_min_datetime": "2018-09-02T03:40:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T12:35:19.000Z", "avg_line_length": 38.7428571429, "max_line_length": 89, "alphanum_fraction": 0.7149705015, "num_tokens": 769}
|
\subsubsection{Scalable Nonparametric Directed Graphical Model Inference
and Learning}
Author: Kai Londenberg (Kai.Londenberg@gmail.com), June 2014
\paragraph{Abstract}
This short article tries to give an overview over complementary
techniques over MCMC for general inference in arbitrary directed
probabilistic graphical models. The focus lies on techniques and
algorithms for creating hybrid models which can be scaled to high
dimensional problems, problems with huge data sets and distributed among
multiple machines.
\paragraph{Motivation}
Two papers got me thinking:
\href{http://www.dauwels.com/Papers/Particle.pdf}{Dauwels et al:
Particle Methods as Message Passing}, which gives a nice overview of how
to generalize Message Passing methods, by mixing sampling based methods
freely with exact or fast approximate inference algorithms.
The second is \href{http://arxiv.org/pdf/1311.4780v1.pdf}{Neiswanger et
al: Embarassingly Parallel MCMC}, where an algorithm is described which
can be used to scale MCMC to problems with huge data sets.
Both algorithms actually suffer from the same problem, namely the
\textbf{Message Fusion} problem (described way down). A problem which
has luckily been successfully solved before. I add to that list by
proposing a new approach to efficient density estimation from MCMC
models, called \textbf{Density Mapping}
What I hope is, that these ideas can lead to a practical implementation
of a general, flexible inference system with semantics similar to those
found in common MCMC packages for Bayesian Inference, but with the
capability to outperform these for problems with huge data sets or a
large number of dimensions if the distribution can be factorized into
smaller problems somehow.
I hope to be able to extend the existing Bayesian Modeling Toolkit for
Python \href{https://github.com/pymc-devs/pymc}{PyMC3} via my
side-project \href{https://github.com/kadeng/pypgmc}{PyPGMc} to support
the algorithms mentioned in this article.
So this article is both an overview, and sort of a collection of ideas
and roadmap items.
\paragraph{Introduction to Directed Graphical Models}
Directed Probabilistic Graphical Models (DGMs) provide a flexible and
powerful framework for probabilistic reasoning. In all generality, they
are a way to efficiently represent complex probability distributions in
high dimensional spaces by factorizing the joint distribution into
conditional distributions.
Given a set of random variables \$ x\_i \sim X\_i \$ and their joint
random vector \$ x \sim X \$ with \$ x = \{ x\_1, \ldots, x\_N \} \$ we
represent their joint distribution as the product of a set of
conditional distributions
\[
P(X) = \prod_i^N P_i(X_i|{pa}(X_i)
\]
Where \$ pa(X\_i) \$ is the set of parents of variable \$ X\_i \$ in a
directed graph \$ G \$, where each vertex in the graph represents one of
the random variables.
Each vertex in this graph could assign an arbitrary probability
distribution to it's random variables. But this probability distribution
may only depend on the parents of the variable in the graph.
Such a representation has many advantages, even to list them completely
would be out of the scope of this article. An excellent overview is
present in the book
\href{http://mitpress.mit.edu/books/probabilistic-\%20graphical-models}{Probabilistic
Graphical Models} by Daphne Koller, who also offers a
\href{https://www.coursera.org/course/pgm}{Coursera course} by the same
name, which is highly recommended.
\paragraph{D-Separation / Conditional Independence}
Most importantly, the graph encodes information about (conditional)
independencies among the variables. By a property called D-Separation
which can be determined using a few simple rules, we can safely
determine whether the probability distribution of a given set of
variables in the graph can be affected by changes in the probability of
another set of variables \textbf{given} another set of variables which
are held fixed.
If the conditional dependencies that hold over a joint probability
distribution are a subset of the conditional independence assumptions
made by the graph, this distribution is compatible to the graph in the
sense that the distribution can be faithfully represented by a
factorization of the distribution along that graph.
\paragraph{Causal Models}
A very common form of these models is to restrict parent / child
relationships to cause/effect pairs. Furthermore, the network must be
complete in the sense that no common causes of any two variables are
missing from the model.
While it is not neccessary for the machinery of DGMs to work that they
are causal models, this can, under certain circumstances, be used to
perform so called causal inference or causal reasoning using DGMs. More
on that can be found in Judea Pearl's excellent book on
\href{http://bayes.cs.ucla.edu\%20/BOOK-2K/}{Causality}.
One important rule to note is, that in order to perform causal inference
in such a causal network, i.e.~to estimate the impact of an explicit
\textbf{action} where the value of a variable is forced to have a
certain value (in contrast to observing it having that value) it is
neccessary to sever all ties from the parents of said variable to it
(since they are no longer causally connected). Furthermore, the network
In Pearl's see/do calculus, he discerns between \$ P(x\textbar{}y) \$ (
probability of x given that I \emph{see} $y$ ) and \$
P(x\textbar{}do(y)) \$ ( probability of x given that I \emph{do} x).
While not of further concern here, Pearl provides a great deal of
informal insights and formal rules into when and how observations can be
converted into causal claims, how to transfer the results of studies
from one setting to another.
\paragraph{Common types of DGMs}
Some common types of DGMs that you might have heard about include:
\begin{itemize}
\item
Bayesian Networks (BNs)
\item
Hierarchical Bayesian Models
\item
(Gaussian) Mixture Models (GMMs)
\end{itemize}
Also a lot of common models for time-series can be thought of as DGMs,
among them:
\begin{itemize}
\item
Vector Auto-Regressive Models (VAR)
\item
Hidden Markov Models (HMMs)
\item
State Space Models (SSM)
\item
Dynamic Bayesian Networks (DBNs)
\end{itemize}
Many of these models have their own set of specialized inference and
learning algorithms, their own set of advantages and disadvantages.
\paragraph{Inference / Reasoning in Graphical Models}
Generally speaking, we can use these graphical models to reason about
the marginal probability distributions, most likely configurations (MLE
/ MAP configurations) etc. of variables of interest \textbf{given
evidence}. This in turn can be used in many applications, from decision
support (making decisions under uncertainty) and as a key component for
supervised, unsupervised and semi- supervised learning.
\paragraph{Parametric VS Nonparametric representations}
If we can restrict our probability distributions to come from specifiy
families of distributions, inference can be made very efficient in some
cases. But if you want to have a general model which can capture any
kind of weird multi-modal and non-continous distributions, you are
limited to very slow inference using MCMC methods. Also, the scalability
in these cases is very limited, because most MCMC algorithms are not
made to be distributed.
\textbar{} Family \textbar{} Advantages \textbar{} Disadvantages
\textbar{}
\textbar{}------------------------------------------\textbar{}----------------\textbar{}-------------------
--------------------------------------------------------------------------------
-----------------------------------------------------------------------\textbar{}
\textbar{} Conditional Linear Gaussian (CLG) \textbar{} Very Fast
\textbar{} Are your distributions linear combinations of gaussians ?
\textbar{} \textbar{} Discrete (Categorical) \textbar{} Fast \textbar{}
High number of parameters if number of parents of any variable, or
number of discrete ``bins'' of variables becomes too large. Quickly
becomes intractable in these cases. \textbar{} \textbar{} Generic
(arbitrary functions of parents) \textbar{} Most Flexible \textbar{}
Very slow inference (MCMC) or strongly biased approximate inference,
hard to determine convergence / mixing. Usually intractable in high
dimensions. \textbar{}
\paragraph{Hierarchical Bayesian Models and MCMC}
In Bayesian Hierarchical Modeling, we are usually either interested in
estimating marginals of certain model parameters in order to gain
insights into specific problems, or we are interested in evaluating the
expected value of some (utility- or loss-) function over the posterior
of a set of random variables.
Given that in the Bayesian view, the unknown parameters of a model are
random variables like any other, so we can use the machinery of DGM
inference. Since these models can have almost arbitrary functional
relations between variables, it is commong to perform Markov Chain Monte
Carlo simulation to sample from the posterior distribution.
What is problematic about these methods is that inference is usually
slow, and it is hard to determine whether the model has converged
(mixed) to a stable posterior. Generally, MCMC does not scale well to
high-dimensional problems using established methods (yet), despite the
fact that there have been some special areas where MCMC methods could be
applied to solve high dimensional problems such as large scale matrix
factorization for recommender systems.
MCMC, while an approximate approach, is asymptotically exact if applied
correctly.
\paragraph{Message Passing Algorithms}
Among the most efficient algorithms for exact and approximate inference
in discrete and conditional linear gaussian DGMs are so called Message
Passing (MP) or Belief Propagation Algorithms. These algorithms operate
on a so-called factor graph, which is very similar to a DGM, except that
vertices (factors) may represent joint distributions of multiple
variables. If factors share variables, they have to be connected (at
least indirectly) using a chain of factors where each factor contains
that variable.
Correspondingly, edges (along which messages are passed) need to be able
to convey information about joint distributions of several variables at
once.
By collapsing a DGM into a factor graph tree (so called Clique Tree or
Junction Tree), it is possible to perform efficient exact inference on
discrete and conditional linear gaussian networks using message passing
inference. That is, unless the resulting tree has at some point a too
large tree-width (loosely, the result of a large but too dense graph),
which can make exact inference intractable.
Even in those cases where exact inference is intractable, the Loopy
Belief Propagation algorithm can provide very fast approximate
(asymptotically biased) inference, providing good solutions (empirical
results) in cases where other algorithms fail or are too slow.
It is important to note that the core algorithm (message passing) of
approximate Loopy Belief Propagation and exact Clique Tree Inference are
the same.
Again, I refer to the book
\href{http://mitpress.mit.edu/books/probabilistic-graphical-models}{Probabilistic
Graphical Models} by Daphne Koller and her
\href{https://www.coursera.org/course/pgm}{Coursera course} for details.
\paragraph{Nonparametric and Particle Belief Propagation / Message
Passing}
As Dauwels et al. have pointed out in the Paper
\href{http://www.dauwels.com/Papers/Particle.pdf}{Particle Methods as
Message Passing}, it is actually possible to combine parametric exact
inference and nonparametric approximate inference by using
\textbf{Particle Lists} as messages in the message passing algorithm.
They used this approach to show that it is possible to view common MCMC
procedures such as Gibbs Sampling, Metropolis Hastings and Importance
Sampling as special cases of \textbf{Particle Message Passing}.
What is also important here: Each factor \textbf{could be an independend
MCMC sampler working on a subset of the variables and / or evidence}. Or
a faster parametric inference algorithm, if the problem allows.
Other fast inference algorithms such as \textbf{Expectation Propagation}
and other Forms of Variational Inference such as \textbf{Mean-Field}
based method can be also be cast as variants of Message Passing
procedures.
There are several key papers which describe important aspects and
approaches that might be taken:
\begin{itemize}
\item
\href{http://www.dauwels.com/Papers/Particle.pdf}{Dauwels et Al:
Particle Methods as Message Passing}
\item
\href{http://ssg.mit.edu/nbp/papers/nips03.pdf}{Ihler, Sudderth et al:
Nonparametric Belief Propagation}
\item
\href{http://ssg.mit.edu/nbp/papers/nips03.pdf}{Ihler, Sudderth et al:
Efficient Multiscale Sampling from Products of Gaussian Mixtures}
\item
\href{http://machinelearning.wus\%20tl.edu/mlpapers/paper\_files/AISTATS09\_IhlerM.pdf}{Ihler,
Mc. Allister: Particle Belief Propagation}
\item
\href{http://robotics.stanford.edu/~koller/Papers/Koller+al:UAI99.pdf}{Koller
et al: A General Algorithm for Approximate Inference and Its
Application to Hybrid Bayes Nets}
\end{itemize}
\subparagraph{The Message Fusion Problem}
All of the above papers identify a single performance bottleneck in
these algorithms.
If we have two factors which share at least one continuous variable \$ x
\$ with a smoot pdf, the probability that two factors will independently
choose the same value for that variable is essentially zero.
So if we have two factors represented using discrete particle lists \$
\phi(x) \$ and \$ \theta(x) \$, their product will be zero with near
certainty everywhere.
What we need to do is to perform so called \textbf{Message Fusion}, for
which several approaches have been proposed.
The standard approach is to use some form of \textbf{Kernel Density
Estimation} (KDE), which effectively represents each particle/sample not
as a discrete probability spike, but smoothes the density using an
appropriate kernel. Usually, Gaussian Kernels are used. Given an
efficient sampling procedure from products of Gaussian Mixtures, such as
\href{http://ssg.mit.edu/nbp/papers/nips03.pdf}{Ihler, Sudderth et al:
Efficient Multiscale Sampling from Products of Gaussian Mixtures}, we
can efficiently sample from these. More on that approach is found in
\href{http://ssg.mit.edu/nbp/papers/nips03.pdf}{Ihler, Sudderth et al:
Nonparametric Belief Propagation}
But a problem remains: It's computationally expensive to evaluate the
probability density and curvature (Jacobian and Hessian) of these
messages. And that might be important if we would like to sample from a
product of such a mixture density message with the probability density
function of a factor.
\paragraph{Particle Belief Propagation Approach}
In
\href{http://machinelearning.wus\%20tl.edu/mlpapers/paper\_files/AISTATS09\_IhlerM.pdf}{Ihler,
Mc. Allister: Particle Belief Propagation} it has been proposed to
sample from the particles in the messages themselves. This is similar to
what happens in \textbf{Particle Filters}. While potentially a good
idea, it (like Particle Filters) suffers from the \emph{thinning}
problem: If the message and the factor do not agree, the number of
useable particles gets very low and the procedure produces unreliable
and/or inaccurate results.
Like with Particle Filters, one approach to fix this problem is to use
\textbf{resampling}. That is, loosely speaking, we tell the original
factor where we got the message from, that we would like to have samples
of a finer resolution in certain regions. Then the original factor
replies with a new (importance sampled) message list, where it provides
new samples, with more (but downweighted) samples in the corresponding
regions of interest.
This procedure can already provide accurate distributed inference. It
just has one problem: It is probably pretty slow (all this re-sampling)
and requires lots of communication.
If we let the numbers of particles in each list become low, it can be
seen as a form of Gibbs Sampling where we exchange not just one particle
(the sample), but multiple of them.
This procedure, as inefficient as it might seem, might have a distinct
advantage over most MCMC Algorithms: \textbf{It allows for much easier
convergence diagnostics}. By measuring the convergence on a per-message
level, we can probably automatically determine when the algorithm has
converged to a final solution, given that we can determine this for each
factor individually.
While this sounds not so much of a great deal, actually it is: For MCMC
you usually need a human expert who decides if the numbers of samples
have been sufficient, if all relevant states have been visited etc. But
even then, that person can never be sure. Much the less, if the problem
gets high-dimensional. Having a clear convergence diagnostic opens the
door for novel applications in large scale risk analysis.
\paragraph{Compact Message Density Estimation for Message Fusion}
Another approach, which has also been taken or at least proposed by
several researchers ( see
\href{http://mach\%20inelearning.wustl.edu/mlpapers/paper\_files/AISTATS09\_IhlerM.pdf}{Ihler,
Mc. Allister: Particle Belief Propagation} for an overview) is to try to
estimate message densities using some form of nonparametric density
estimation technique which both smoothes the distribution, and
compresses the amount of data required to transfer the message. See
\href{http://robotics.stanford.edu/~koller/Papers/Koller+al:UAI99.pdf}{Koller
et al: A General Algorithm for Approximate Inference and Its Application
to Hybrid Bayes Nets} for a more thorough discussion of this.
In that paper, Density Estimation Trees (DETs) with GMMs at the leaves
have been used with success by Koller et. al. as density estimators, so
that might be a good choice to make as well. They iteratively refined
these density estimates using an iterative approach, similar to the
resampling mentioned above.
Generally, Multivariate Gaussian Mixture Models (GMMs) trained with
Regularized Expectation Maximization (EM) might be a another good
choice. See \href{http://www.cs.ubc.ca/~murphyk/MLbook/}{Kevin Murphy:
Machine Learning: A Probabilistic Perspective, Chapter 11}. These would
lend themselves to the fast methods in
\href{http://ssg.mit.edu/nbp/papers/nips03.pdf}{Ihler, Sudderth et al:
Efficient Multiscale Sampling from Products of Gaussian Mixtures}
But how do we determine the optimal number of mixture components ? Maybe
we can make the algorithm automatically choose the number of components
based on the data ?
One obvious but very slow approach would be to use cross-validation to
select an optimal number of components. But this gets prohibitevely
slow. A different approach would be to use a
\href{http://www.gatsby.ucl.ac.uk/~edward/pub/inf.mix.nips.99.pdf}{Dirichlet
Process Clustering}, also called \textbf{Infinite Gaussian Mixture
Model} to choose a data-dependent number of components. Alternatively, a
possibly better alternative is not to use the Dirichlet Process Prior
for the number of components, but rather a
\href{http://en.wikipedia.org/wiki/Pitman\%E2\%80\%93Yor\_process}{Pitman-Yor
Process}, a more flexible two-parameter generalization of the Dirichlet
Process which allows for Power-Law (fat) tails.
There are a lot of ready-made implementations of these (except for the
Pitman- Yor Process Clustering), see
\href{http\%20://scikit-learn.org/stable/modules/mixture.html}{Scikit-Learn
Documentation: Mixtures}
Another (novel) approach is the following, which might be more efficient
if the Particle Lists are created using MCMC Sampling.
\paragraph{Embarassingly Parallel MCMC}
In a recent paper (
\href{http://arxiv.org/pdf/1311.4780v1.pdf}{Neiswanger et al:
Embarassingly Parallel MCMC} ) an asymptotically exact algorithm for
performing embarassingly parallel distributed MCMC was presented.
Interestingly, the main problem solved in that paper is almost exactly
the Message Fusion problem stated above. So by solving one problem, we
get to solve inference for both high-dimensional and big-data problems.
\paragraph{Density Mapping (DRAFT)}
The core idea for Density Mapping is to combine MCMC, Gradient Ascent or
EM and Kernel Density Estimation (KDE) into a single, more efficient
algorithm for Kernel Density Estimation which can be used in the context
of large scale distributed Nonparametric Message Passing inference
engines.
We sample from a density function, and then modify the sampling density
function by subtracting from it density estimates around local modes.
This way, the probability density gets simultaneously \emph{mapped out},
ensuring that the MCMC chain spends it's computational time efficiently
by mapping so far uncovered regions of the probability space.
Let \$ f\^{}\emph{: \mathbb{R}\^{}D \mapsto \mathbb{R} \$ be our
unnormalized posterior density function which we can evaluate at any
point. The corresponding normalized density is \$ P\^{}} \$ with \$
P\^{}\emph{(x) = \frac{1}{Z} f\^{}}(x) \$ and \$ x \in
\mathbb{R}\^{}D \$, with \$ Z \$ being the normalization constant, i.e.
\$ Z = \int{f^*(x) dx} \$
We assume that we can consistently estimate the density \$ P\^{}* \$
(usually a posterior) using a suitable Markov Chain Monte Carlo
algorithm such as a Metropolis Hastings sampler given the unnormalized
density function \$ f\^{}*(x) \$
Now let us assume we have a kernel probability density estimate which
has been estimated step by step from N point probability masses:
\[
P^{E}(x) = \frac{1}{H} \sum_{i=1}^{N} \gamma_i \cdot K_i(x)
\]
Where \$ H = \sum{\gamma_i} \$ and each \$ K\_i \$ is itself a properly
normalized density function (Kernel) with a single mode \$k\_i =
\{argmax\}\_\{x\} K\_i(x) \$ with \$ k\_i \in \mathbb{R}\^{}D \$. We
define the unnormalized kernel density at time step N as
\[
f_N^E(x) = \sum_{i=1}^{N} \frac{f_i^E(k_i)}{K_i(k_i)} \cdot K_i(x)
\]
with \$ f\_0\^{}E(x) = f\^{}\emph{(x) \$. Correspondingly, we define \$
\gamma\_i = \frac{f^*(k_i)}{f_i^E(k_i)} \$ which ensures that \$
f\_N\^{}E(x) = P\^{}\{E\}(x) = f\^{}}(x) \$ for all points \$ x \in \{
k\_1, \ldots, k\_N \} \$.
We define the \textbf{Unnormalized Sampling Function} as:
\[
f^F(x) = min(max(f^*(x)-f^E(x), f^*(x)^\frac{1}{s}), f^*(x))
\]
With \$ s \textgreater{} 1 \$ being a cooling factor which flattens the
original distribution. Plausible initial values for s might be in the
range from 2 to 100 depending on how flat we would like the distribution
to become.
If we chose the density estimation Kernels \$ K\_i \$ such that they (at
least approximately) have limited support around their mode or mean \$
x\_i \$, this Sampling Function can be calculated (or approximated)
quickly, even if N (the number of kernels in the density estimate) is
large, by just taking the nearest kernels to a given point into account.
Such a lookup can be made efficient using \textbf{KD-Trees} or
\textbf{Cover-Trees} to be performed in \$ O(D \cdot \log(N)) \$ time,
with \$ N \$ being the number of components, and \$ D \$ being the
dimensionality of the points.
During or after MCMC sampling, we should check for each sample whether
\$ f\textsuperscript{*(x)-f}E(x) \$ becomes negative at that point. This
would indicate regions where we over-estimate the density. In such a
case, it should be possible to shrink the variance of the responsible
component of f\^{}E(x) in that direction. Since this is computationally
intensive (we have to re-compute all mixture components), this should be
prevented by scaling down the variance of the kernel components in
directions of high variance.
\paragraph{Density Mapping Algorithm (DRAFT)}
Now, after initializing s to a sensible value, and starting with \$
f\^{}E=0\$ (constant), the density estimation procedure works like this:
\begin{enumerate}[1.]
\item
Run MCMC to collect a number of samples from \$ f\^{}F \$ . Discard
burn-in samples.
\item
Check all sampled points for values with negative
f\textsuperscript{*(x)-f}E(x). Shrink the variance of responsible
components of \$ f\^{}E \$.
\item
Pick a random sample, and perform gradient ascent or EM to find a
local optimum/mode of \$ f\^{}F \$: called \$ k\_i \$
\item
Check if this is a new local optimum. If not: Increase \$ s \$ and
continue at 1.)
\item
Create a Kernel density estimate \$ K\_i \$ around local optimum \$
k\_i \$ (use the Hamiltonian of \$ f\^{}* \$ at \$ k\_i \$ as a
scale/precision matrix, apply sensible regularization)
\item
(Optional): Add \$ k\_i \$ to a KD-Tree index to speed up
nearest-neighbour lookups
\item
Update \$ f\^{}E \$ and \$ f\^{}F \$ using the new kernel estimate \$
K\_i \$
\item
Stopping criterion: Has the density been flattened enough ? Then stop.
\item
otherwise go to 3.) or 1.)
\end{enumerate}
The result (so I hope) is a rather good density estimate. This algorithm
has yet to be tried in practice.
\subparagraph{Conclusions}
It seems like everything is ready to build a generic framework for large
scale inference in directed probabilistic graphical models. Someone just
has to do it.
|
{"hexsha": "a5f43929ab5a409092aebe44a49cd6c4737e6151", "size": 25160, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "docs/Large Scale PGM Inference.tex", "max_stars_repo_name": "kadeng/pypgmc", "max_stars_repo_head_hexsha": "909445fa3a426b07b39b65d2cb8979b1db8cdfca", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2015-03-29T14:57:53.000Z", "max_stars_repo_stars_event_max_datetime": "2018-10-22T11:40:34.000Z", "max_issues_repo_path": "docs/Large Scale PGM Inference.tex", "max_issues_repo_name": "kadeng/pypgmc", "max_issues_repo_head_hexsha": "909445fa3a426b07b39b65d2cb8979b1db8cdfca", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/Large Scale PGM Inference.tex", "max_forks_repo_name": "kadeng/pypgmc", "max_forks_repo_head_hexsha": "909445fa3a426b07b39b65d2cb8979b1db8cdfca", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.8287795993, "max_line_length": 107, "alphanum_fraction": 0.7757154213, "num_tokens": 6090}
|
import pandas as pd
import numpy as np
import tensorflow as tf
from sklearn import metrics
from sklearn import model_selection
from sklearn.model_selection import train_test_split
import keras as K
from keras.utils.np_utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization, Activation, ZeroPadding2D, AveragePooling2D,
from keras.optimizers import RMSprop, Adam, SGD
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
def SELU(x):
# fixed point mean, var (0, 1)
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
return scale * K.elu(x, alpha)
# reproducibility
seed = 777
tf.set_random_seed(seed)
np.random.seed(seed)
train = pd.read_csv("./train.csv")
test = pd.read_csv("./test.csv")
y_train = train['label']
y_train = to_categorical(y_train, num_classes=10)
x_train = train.drop(labels=['label'], axis=1)
del train
# normalize
x_train /= 255.
test /= 255.
# reshape
x_train = x_train.values.reshape(-1, 28, 28, 1)
test = test.values.reshape(-1, 28, 28, 1)
# split train data into (train, valid) data
# instead of this, Using k_fold
# x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train, test_size=0.1, random_state=seed)
def vgg():
def two_conv_pool(x, F1, F2, name, input_):
if input_:
x.add(Conv2D(F1, (3, 3), activation=None, padding='same', name='{}_conv1'.format(name),
input_shape=(28, 28, 1)))
else:
x.add(Conv2D(F1, (3, 3), activation=None, padding='same', name='{}_conv1'.format(name)))
x.add(BatchNormalization())
x.add(Activation('relu'))
x.add(Conv2D(F2, (3, 3), activation=None, padding='same', name='{}_conv2'.format(name)))
x.add(BatchNormalization())
x.add(Activation('relu'))
x.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='{}_pool'.format(name)))
return x
def three_conv_pool(x, F1, F2, F3, name):
x.add(Conv2D(F1, (3, 3), activation=None, padding='same', name='{}_conv1'.format(name)))
x.add(BatchNormalization())
x.add(Activation('relu'))
x.add(Conv2D(F2, (3, 3), activation=None, padding='same', name='{}_conv2'.format(name)))
x.add(BatchNormalization())
x.add(Activation('relu'))
x.add(Conv2D(F3, (3, 3), activation=None, padding='same', name='{}_conv3'.format(name)))
x.add(BatchNormalization())
x.add(Activation('relu'))
x.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='{}_pool'.format(name)))
return x
net = Sequential()
net = two_conv_pool(net, 64, 64, "block1", True)
net = two_conv_pool(net, 128, 128, "block2", False)
net = three_conv_pool(net, 256, 256, 256, "block3")
net = three_conv_pool(net, 512, 512, 512, "block4")
net.add(Flatten())
net.add(Dense(512, activation='relu', name='fc'))
net.add(Dense(10, activation='softmax', name='predictions'))
net.compile(optimizer=SGD(lr=1e-3), loss="categorical_crossentropy", metrics=["accuracy"])
net.summary()
return net
epochs = 600
batch_size = 64
n_fold = 5
kf = model_selection.KFold(n_splits=n_fold, shuffle=True)
eval_fun = metrics.roc_auc_score
def run_oof(tr_x, tr_y, te_x, kf):
preds_test = []
i = 1
for train_index, test_index in kf.split(tr_x):
x_tr = tr_x[train_index]
x_te = tr_x[test_index]
y_tr = tr_y[train_index]
y_te = tr_y[test_index]
model = vgg()
model.load_weights('./mnist-cnn-1.h5')
# tensor_board = TensorBoard(log_dir='./logs/', histogram_freq=5)
early_stopping = EarlyStopping(monitor='val_loss', patience=15, verbose=1, mode='auto')
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=5, verbose=1, factor=0.5, min_lr=1e-5)
data_generate = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
horizontal_flip=False,
vertical_flip=False,
rotation_range=15,
shear_range=0.1,
zoom_range=0.1,
width_shift_range=0.2,
height_shift_range=0.2,
)
data_generate.fit(x_tr)
model.fit_generator(data_generate.flow(x_tr, y_tr, batch_size=batch_size), epochs=epochs,
validation_data=(x_te, y_te), verbose=2,
steps_per_epoch=x_train.shape[0] // batch_size,
callbacks=[learning_rate_reduction, early_stopping,
ModelCheckpoint('./mnist-cnn-2.h5', monitor='val_acc', save_best_only=True)]
)
model.load_weights('./mnist-cnn-2.h5')
results = model.predict(te_x)
results = np.argmax(results, axis=1)
preds_test.append(results)
i += 1
preds = np.array([])
for i in range(28000):
tmp = []
for j in range(n_fold):
tmp.append(preds_test[j][i])
n = np.bincount(tmp).argmax()
preds = np.append(preds, n)
return preds
results = run_oof(x_train, y_train, test, kf)
submission = pd.read_csv('./sample_submission.csv')
submission['Label'] = results
submission['Label'] = submission['Label'].astype(int)
submission.to_csv("submit-2.csv", index=False)
|
{"hexsha": "fc5df4941c5996d48efc11be8dc72df5589a1135", "size": 5661, "ext": "py", "lang": "Python", "max_stars_repo_path": "99_743/mnist-2.py", "max_stars_repo_name": "kozistr/MNIST-Competition", "max_stars_repo_head_hexsha": "dbe899774c52f82454034f9e372960c0daeb4b7c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "99_743/mnist-2.py", "max_issues_repo_name": "kozistr/MNIST-Competition", "max_issues_repo_head_hexsha": "dbe899774c52f82454034f9e372960c0daeb4b7c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "99_743/mnist-2.py", "max_forks_repo_name": "kozistr/MNIST-Competition", "max_forks_repo_head_hexsha": "dbe899774c52f82454034f9e372960c0daeb4b7c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2762430939, "max_line_length": 136, "alphanum_fraction": 0.6398162869, "include": true, "reason": "import numpy", "num_tokens": 1491}
|
[STATEMENT]
lemma happens_before_new_actionD:
assumes hb: "P,E \<turnstile> a \<le>hb a'"
and new: "is_new_action (action_obs E a')"
shows "is_new_action (action_obs E a)" "action_tid E a = action_tid E a'" "a \<le> a'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. is_new_action (action_obs E a) &&& action_tid E a = action_tid E a' &&& a \<le> a'
[PROOF STEP]
using hb
[PROOF STATE]
proof (prove)
using this:
P,E \<turnstile> a \<le>hb a'
goal (1 subgoal):
1. is_new_action (action_obs E a) &&& action_tid E a = action_tid E a' &&& a \<le> a'
[PROOF STEP]
proof(induct rule: converse_tranclp_induct)
[PROOF STATE]
proof (state)
goal (6 subgoals):
1. \<And>y. po_sw P E y a' \<Longrightarrow> is_new_action (action_obs E y)
2. \<And>y. po_sw P E y a' \<Longrightarrow> action_tid E y = action_tid E a'
3. \<And>y. po_sw P E y a' \<Longrightarrow> y \<le> a'
4. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> is_new_action (action_obs E y)
5. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> action_tid E y = action_tid E a'
6. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> y \<le> a'
[PROOF STEP]
case (base a)
[PROOF STATE]
proof (state)
this:
po_sw P E a a'
goal (6 subgoals):
1. \<And>y. po_sw P E y a' \<Longrightarrow> is_new_action (action_obs E y)
2. \<And>y. po_sw P E y a' \<Longrightarrow> action_tid E y = action_tid E a'
3. \<And>y. po_sw P E y a' \<Longrightarrow> y \<le> a'
4. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> is_new_action (action_obs E y)
5. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> action_tid E y = action_tid E a'
6. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> y \<le> a'
[PROOF STEP]
case 1
[PROOF STATE]
proof (state)
this:
goal (6 subgoals):
1. \<And>y. po_sw P E y a' \<Longrightarrow> is_new_action (action_obs E y)
2. \<And>y. po_sw P E y a' \<Longrightarrow> action_tid E y = action_tid E a'
3. \<And>y. po_sw P E y a' \<Longrightarrow> y \<le> a'
4. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> is_new_action (action_obs E y)
5. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> action_tid E y = action_tid E a'
6. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> y \<le> a'
[PROOF STEP]
from new base
[PROOF STATE]
proof (chain)
picking this:
is_new_action (action_obs E a')
po_sw P E a a'
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
is_new_action (action_obs E a')
po_sw P E a a'
goal (1 subgoal):
1. is_new_action (action_obs E a)
[PROOF STEP]
by(auto dest: po_sw_into_action_order elim: action_orderE)
[PROOF STATE]
proof (state)
this:
is_new_action (action_obs E a)
goal (5 subgoals):
1. \<And>y. po_sw P E y a' \<Longrightarrow> action_tid E y = action_tid E a'
2. \<And>y. po_sw P E y a' \<Longrightarrow> y \<le> a'
3. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> is_new_action (action_obs E y)
4. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> action_tid E y = action_tid E a'
5. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> y \<le> a'
[PROOF STEP]
case 2
[PROOF STATE]
proof (state)
this:
goal (5 subgoals):
1. \<And>y. po_sw P E y a' \<Longrightarrow> action_tid E y = action_tid E a'
2. \<And>y. po_sw P E y a' \<Longrightarrow> y \<le> a'
3. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> is_new_action (action_obs E y)
4. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> action_tid E y = action_tid E a'
5. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> y \<le> a'
[PROOF STEP]
from new base
[PROOF STATE]
proof (chain)
picking this:
is_new_action (action_obs E a')
po_sw P E a a'
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
is_new_action (action_obs E a')
po_sw P E a a'
goal (1 subgoal):
1. action_tid E a = action_tid E a'
[PROOF STEP]
by(auto simp add: po_sw_def elim!: sync_withE elim: program_orderE synchronizes_with.cases)
[PROOF STATE]
proof (state)
this:
action_tid E a = action_tid E a'
goal (4 subgoals):
1. \<And>y. po_sw P E y a' \<Longrightarrow> y \<le> a'
2. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> is_new_action (action_obs E y)
3. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> action_tid E y = action_tid E a'
4. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> y \<le> a'
[PROOF STEP]
case 3
[PROOF STATE]
proof (state)
this:
goal (4 subgoals):
1. \<And>y. po_sw P E y a' \<Longrightarrow> y \<le> a'
2. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> is_new_action (action_obs E y)
3. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> action_tid E y = action_tid E a'
4. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> y \<le> a'
[PROOF STEP]
from new base
[PROOF STATE]
proof (chain)
picking this:
is_new_action (action_obs E a')
po_sw P E a a'
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
is_new_action (action_obs E a')
po_sw P E a a'
goal (1 subgoal):
1. a \<le> a'
[PROOF STEP]
by(auto dest: po_sw_into_action_order elim: action_orderE)
[PROOF STATE]
proof (state)
this:
a \<le> a'
goal (3 subgoals):
1. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> is_new_action (action_obs E y)
2. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> action_tid E y = action_tid E a'
3. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> y \<le> a'
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> is_new_action (action_obs E y)
2. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> action_tid E y = action_tid E a'
3. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> y \<le> a'
[PROOF STEP]
case (step a a'')
[PROOF STATE]
proof (state)
this:
po_sw P E a a''
P,E \<turnstile> a'' \<le>hb a'
is_new_action (action_obs E a'')
action_tid E a'' = action_tid E a'
a'' \<le> a'
goal (3 subgoals):
1. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> is_new_action (action_obs E y)
2. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> action_tid E y = action_tid E a'
3. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> y \<le> a'
[PROOF STEP]
note po_sw = \<open>po_sw P E a a''\<close>
and new = \<open>is_new_action (action_obs E a'')\<close>
and tid = \<open>action_tid E a'' = action_tid E a'\<close>
[PROOF STATE]
proof (state)
this:
po_sw P E a a''
is_new_action (action_obs E a'')
action_tid E a'' = action_tid E a'
goal (3 subgoals):
1. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> is_new_action (action_obs E y)
2. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> action_tid E y = action_tid E a'
3. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> y \<le> a'
[PROOF STEP]
case 1
[PROOF STATE]
proof (state)
this:
goal (3 subgoals):
1. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> is_new_action (action_obs E y)
2. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> action_tid E y = action_tid E a'
3. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> y \<le> a'
[PROOF STEP]
from new po_sw
[PROOF STATE]
proof (chain)
picking this:
is_new_action (action_obs E a'')
po_sw P E a a''
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
is_new_action (action_obs E a'')
po_sw P E a a''
goal (1 subgoal):
1. is_new_action (action_obs E a)
[PROOF STEP]
by(auto dest: po_sw_into_action_order elim: action_orderE)
[PROOF STATE]
proof (state)
this:
is_new_action (action_obs E a)
goal (2 subgoals):
1. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> action_tid E y = action_tid E a'
2. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> y \<le> a'
[PROOF STEP]
case 2
[PROOF STATE]
proof (state)
this:
goal (2 subgoals):
1. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> action_tid E y = action_tid E a'
2. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> y \<le> a'
[PROOF STEP]
from new po_sw tid
[PROOF STATE]
proof (chain)
picking this:
is_new_action (action_obs E a'')
po_sw P E a a''
action_tid E a'' = action_tid E a'
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
is_new_action (action_obs E a'')
po_sw P E a a''
action_tid E a'' = action_tid E a'
goal (1 subgoal):
1. action_tid E a = action_tid E a'
[PROOF STEP]
by(auto simp add: po_sw_def elim!: sync_withE elim: program_orderE synchronizes_with.cases)
[PROOF STATE]
proof (state)
this:
action_tid E a = action_tid E a'
goal (1 subgoal):
1. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> y \<le> a'
[PROOF STEP]
case 3
[PROOF STATE]
proof (state)
this:
goal (1 subgoal):
1. \<And>y z. \<lbrakk>po_sw P E y z; P,E \<turnstile> z \<le>hb a'; is_new_action (action_obs E z); action_tid E z = action_tid E a'; z \<le> a'\<rbrakk> \<Longrightarrow> y \<le> a'
[PROOF STEP]
from new po_sw \<open>a'' \<le> a'\<close>
[PROOF STATE]
proof (chain)
picking this:
is_new_action (action_obs E a'')
po_sw P E a a''
a'' \<le> a'
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
is_new_action (action_obs E a'')
po_sw P E a a''
a'' \<le> a'
goal (1 subgoal):
1. a \<le> a'
[PROOF STEP]
by(auto dest!: po_sw_into_action_order elim!: action_orderE)
[PROOF STATE]
proof (state)
this:
a \<le> a'
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 6144, "file": "JinjaThreads_MM_JMM_Spec", "length": 31}
|
#!/usr/bin/env python
import click
import numpy as np
import gym
def chakra_get_action(theta, ob, rng=np.random):
ob_1 = include_bias(ob)
mean = theta.dot(ob_1)
return rng.normal(loc=mean, scale=1.)
@click.command()
@click.argument("env_id", type=str, default="chakra")
def main(env_id):
# Register the environment
rng = np.random.RandomState(42)
if env_id == 'chakra':
from rlpa2 import chakra
env = gym.make('chakra')
get_action = chakra_get_action
obs_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
else:
raise ValueError(
"Unsupported environment: must be 'chakra' ")
env.seed(42)
# Initialize parameters
theta = rng.normal(scale=0.01, size=(action_dim, obs_dim + 1))
while True:
ob = env.reset()
done = False
# Only render the first trajectory
# Collect a new trajectory
rewards = []
while not done:
action = get_action(theta, ob, rng=rng)
next_ob, rew, done, _ = env.step(action)
ob = next_ob
env.render()
rewards.append(rew)
print("Episode reward: %.2f" % np.sum(rewards))
if __name__ == "__main__":
main()
|
{"hexsha": "228e03502d209d7b6bf7ff67f1d34321cafa824a", "size": 1280, "ext": "py", "lang": "Python", "max_stars_repo_path": "Assignment_2/Ques/rollout.py", "max_stars_repo_name": "pragneshrana/Reinforcement-Learning", "max_stars_repo_head_hexsha": "6d891ff42437eec828e97cbbd2ad53fb112aad9e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Assignment_2/Ques/rollout.py", "max_issues_repo_name": "pragneshrana/Reinforcement-Learning", "max_issues_repo_head_hexsha": "6d891ff42437eec828e97cbbd2ad53fb112aad9e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Assignment_2/Ques/rollout.py", "max_forks_repo_name": "pragneshrana/Reinforcement-Learning", "max_forks_repo_head_hexsha": "6d891ff42437eec828e97cbbd2ad53fb112aad9e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.0980392157, "max_line_length": 66, "alphanum_fraction": 0.60078125, "include": true, "reason": "import numpy", "num_tokens": 320}
|
#!/usr/bin/env python3
"""Generate a test model for frugally-deep.
"""
import numbers
import sys
import numpy as np
import keras
from keras.models import Model, load_model, Sequential
from keras.layers import Input, Dense, Dropout, Flatten, Activation
from keras.layers import Conv1D, ZeroPadding1D, Cropping1D
from keras.layers import Conv2D, ZeroPadding2D, Cropping2D
from keras.layers import MaxPooling1D, AveragePooling1D, UpSampling1D
from keras.layers import MaxPooling2D, AveragePooling2D, UpSampling2D
from keras.layers import GlobalAveragePooling1D, GlobalMaxPooling1D
from keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D
from keras.layers import SeparableConv2D, Conv2DTranspose
from keras.layers.advanced_activations import LeakyReLU, ELU
from keras.layers.normalization import BatchNormalization
from keras import backend as K
__author__ = "Tobias Hermann"
__copyright__ = "Copyright 2017, Tobias Hermann"
__license__ = "MIT"
__maintainer__ = "Tobias Hermann, https://github.com/Dobiasd/frugally-deep"
__email__ = "editgym@gmail.com"
def remove_sample_axis_from_shape(shape):
"""Remove first dimension from shape if not fixed."""
if len(shape) == 4:
assert not isinstance(shape[0], numbers.Number)
return shape[1:]
if not isinstance(shape[0], numbers.Number):
return shape[1:]
return shape
def get_shape_for_random_data(data_size, shape):
"""Include size of data to generate into shape."""
if len(shape) == 3:
return (data_size, shape[0], shape[1], shape[2])
if len(shape) == 2:
return (data_size, shape[0], shape[1])
if len(shape) == 1:
return (data_size, shape[0])
assert False
def generate_random_data(data_size, shape):
"""Random data for training."""
return np.random.random(
size=get_shape_for_random_data(data_size,
remove_sample_axis_from_shape(shape)))
def generate_input_data(data_size, input_shapes):
"""Random input data for training."""
return [generate_random_data(data_size, input_shape)
for input_shape in input_shapes]
def generate_output_data(data_size, outputs):
"""Random output data for training."""
# using ._keras_shape instead of .shape because:
# https://github.com/fchollet/keras/issues/6777
return [generate_random_data(data_size, output._keras_shape)
for output in outputs]
def get_test_model_small():
"""Returns a minimalistic test model."""
input_shapes = [
(17, 4),
(16, 18, 3),
(8,),
]
inputs = [Input(shape=s) for s in input_shapes]
outputs = []
outputs.append(Conv1D(2, 3, padding='valid')(inputs[0]))
outputs.append(Conv2D(2, (5, 7), padding='valid')(inputs[1]))
outputs.append(BatchNormalization()(inputs[0]))
outputs.append(BatchNormalization()(inputs[1]))
outputs.append(BatchNormalization()(inputs[2]))
outputs.append(Activation('softmax')(inputs[0]))
outputs.append(Activation('softmax')(inputs[1]))
outputs.append(Activation('softmax')(inputs[2]))
#outputs.append(Conv2DTranspose(2, (3, 3), padding='valid')(inputs[1]))
model = Model(inputs=inputs, outputs=outputs, name='test_model_small')
model.compile(loss='mse', optimizer='nadam')
# fit to dummy data
training_data_size = 1
data_in = generate_input_data(training_data_size, input_shapes)
data_out = generate_output_data(training_data_size, outputs)
model.fit(data_in, data_out, epochs=10)
return model
def get_test_model_sequential():
"""Returns a typical (VGG-like) sequential test model."""
model = Sequential()
model.add(Conv2D(8, (3, 3), activation='relu', input_shape=(32, 32, 3)))
model.add(Conv2D(8, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(16, (3, 3), activation='elu'))
model.add(Conv2D(16, (3, 3)))
model.add(ELU())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(64, activation='sigmoid'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='sgd')
# fit to dummy data
training_data_size = 1
data_in = [np.random.random(size=(training_data_size, 32, 32, 3))]
data_out = [np.random.random(size=(training_data_size, 10))]
model.fit(data_in, data_out, epochs=10)
return model
def get_test_model_full():
"""Returns a maximally complex test model,
using all supported layer types with different parameter combination.
"""
input_shapes = [
(26, 28, 3),
(4, 4, 3),
(4, 4, 3),
(4,),
(2, 3),
(27, 29, 1),
(17, 1),
(17, 4),
]
inputs = [Input(shape=s) for s in input_shapes]
outputs = []
for inp in inputs[6:8]:
for padding in ['valid', 'same']:
for s in range(1, 6):
for out_channels in [1, 2]:
for d in range(1, 4):
outputs.append(
Conv1D(out_channels, s, padding=padding,
dilation_rate=d)(inp))
for padding_size in range(0, 5):
outputs.append(ZeroPadding1D(padding_size)(inp))
for crop_left in range(0, 2):
for crop_right in range(0, 2):
outputs.append(Cropping1D((crop_left, crop_right))(inp))
for upsampling_factor in range(1, 5):
outputs.append(UpSampling1D(upsampling_factor)(inp))
for padding in ['valid', 'same']:
for pool_factor in range(1, 6):
for s in range(1, 4):
outputs.append(
MaxPooling1D(pool_factor, strides=s,
padding=padding)(inp))
outputs.append(
AveragePooling1D(pool_factor, strides=s,
padding=padding)(inp))
outputs.append(GlobalMaxPooling1D()(inp))
outputs.append(GlobalAveragePooling1D()(inp))
for inp in [inputs[0], inputs[5]]:
for padding in ['valid', 'same']:
for h in range(1, 6):
for out_channels in [1, 2]:
for d in range(1, 4):
outputs.append(
Conv2D(out_channels, (h, 1), padding=padding,
dilation_rate=(d, 1))(inp))
outputs.append(
SeparableConv2D(out_channels, (h, 1), padding=padding,
dilation_rate=(d, 1))(inp))
for sy in range(1, 4):
outputs.append(
Conv2D(out_channels, (h, 1), strides=(1, sy),
padding=padding)(inp))
outputs.append(
SeparableConv2D(out_channels, (h, 1),
strides=(sy, sy),
padding=padding)(inp))
for sy in range(1, 4):
outputs.append(
MaxPooling2D((h, 1), strides=(1, sy),
padding=padding)(inp))
for w in range(1, 6):
for out_channels in [1, 2]:
for d in range(1, 4) if sy == 1 else [1]:
outputs.append(
Conv2D(out_channels, (1, w), padding=padding,
dilation_rate=(1, d))(inp))
outputs.append(
SeparableConv2D(out_channels, (1, w), padding=padding,
dilation_rate=(1, d))(inp))
for sx in range(1, 4):
outputs.append(
Conv2D(out_channels, (1, w), strides=(sx, 1),
padding=padding)(inp))
outputs.append(
SeparableConv2D(out_channels, (1, w),
strides=(sx, sx),
padding=padding)(inp))
for sx in range(1, 4):
outputs.append(
MaxPooling2D((1, w), strides=(1, sx),
padding=padding)(inp))
outputs.append(ZeroPadding2D(2)(inputs[0]))
outputs.append(ZeroPadding2D((2, 3))(inputs[0]))
outputs.append(ZeroPadding2D(((1, 2), (3, 4)))(inputs[0]))
outputs.append(Cropping2D(2)(inputs[0]))
outputs.append(Cropping2D((2, 3))(inputs[0]))
outputs.append(Cropping2D(((1, 2), (3, 4)))(inputs[0]))
for y in range(1, 3):
for x in range(1, 3):
outputs.append(UpSampling2D(size=(y, x))(inputs[0]))
outputs.append(GlobalAveragePooling2D()(inputs[0]))
outputs.append(GlobalMaxPooling2D()(inputs[0]))
outputs.append(AveragePooling2D((2, 2))(inputs[0]))
outputs.append(MaxPooling2D((2, 2))(inputs[0]))
outputs.append(UpSampling2D((2, 2))(inputs[0]))
outputs.append(keras.layers.concatenate([inputs[0], inputs[0]]))
outputs.append(Dropout(0.5)(inputs[0]))
outputs.append(BatchNormalization()(inputs[0]))
outputs.append(BatchNormalization(center=False)(inputs[0]))
outputs.append(BatchNormalization(scale=False)(inputs[0]))
outputs.append(Conv2D(2, (3, 3), use_bias=True)(inputs[0]))
outputs.append(Conv2D(2, (3, 3), use_bias=False)(inputs[0]))
outputs.append(SeparableConv2D(2, (3, 3), use_bias=True)(inputs[0]))
outputs.append(SeparableConv2D(2, (3, 3), use_bias=False)(inputs[0]))
outputs.append(Dense(2, use_bias=True)(inputs[3]))
outputs.append(Dense(2, use_bias=False)(inputs[3]))
shared_conv = Conv2D(1, (1, 1),
padding='valid', name='shared_conv', activation='relu')
up_scale_2 = UpSampling2D((2, 2))
x1 = shared_conv(up_scale_2(inputs[1])) # (1, 8, 8)
x2 = shared_conv(up_scale_2(inputs[2])) # (1, 8, 8)
x3 = Conv2D(1, (1, 1), padding='valid')(up_scale_2(inputs[2])) # (1, 8, 8)
x = keras.layers.concatenate([x1, x2, x3]) # (3, 8, 8)
outputs.append(x)
x = Conv2D(3, (1, 1), padding='same', use_bias=False)(x) # (3, 8, 8)
outputs.append(x)
x = Dropout(0.5)(x)
outputs.append(x)
x = keras.layers.concatenate([
MaxPooling2D((2, 2))(x),
AveragePooling2D((2, 2))(x)]) # (6, 4, 4)
outputs.append(x)
x = Flatten()(x) # (1, 1, 96)
x = Dense(4, use_bias=False)(x)
outputs.append(x)
x = Dense(3)(x) # (1, 1, 3)
outputs.append(x)
intermediate_input_shape = (3,)
intermediate_in = Input(intermediate_input_shape)
intermediate_x = intermediate_in
intermediate_x = Dense(8)(intermediate_x)
intermediate_x = Dense(5)(intermediate_x)
intermediate_model = Model(
inputs=[intermediate_in], outputs=[intermediate_x],
name='intermediate_model')
intermediate_model.compile(loss='mse', optimizer='nadam')
x = intermediate_model(x) # (1, 1, 5)
intermediate_model_2 = Sequential()
intermediate_model_2.add(Dense(7, input_shape=(5,)))
intermediate_model_2.add(Dense(5))
intermediate_model_2.compile(optimizer='rmsprop',
loss='categorical_crossentropy')
x = intermediate_model_2(x) # (1, 1, 5)
x = Dense(3)(x) # (1, 1, 3)
shared_activation = Activation('tanh')
outputs = outputs + [
Activation('tanh')(inputs[3]),
Activation('hard_sigmoid')(inputs[3]),
Activation('selu')(inputs[3]),
Activation('sigmoid')(inputs[3]),
Activation('softplus')(inputs[3]),
Activation('softmax')(inputs[3]),
Activation('relu')(inputs[3]),
LeakyReLU()(inputs[3]),
ELU()(inputs[3]),
shared_activation(inputs[3]),
inputs[4],
inputs[1],
x,
shared_activation(x),
]
print('Model has {} outputs.'.format(len(outputs)))
model = Model(inputs=inputs, outputs=outputs, name='test_model_full')
model.compile(loss='mse', optimizer='nadam')
# fit to dummy data
training_data_size = 1
batch_size = 1
epochs = 10
data_in = generate_input_data(training_data_size, input_shapes)
data_out = generate_output_data(training_data_size, outputs)
model.fit(data_in, data_out, epochs=epochs, batch_size=batch_size)
return model
def main():
"""Generate different test models and save them to the given directory."""
if len(sys.argv) != 3:
print('usage: [model name] [destination file path]')
sys.exit(1)
else:
model_name = sys.argv[1]
dest_path = sys.argv[2]
get_model_functions = {
'small': get_test_model_small,
'sequential': get_test_model_sequential,
'full': get_test_model_full
}
if not model_name in get_model_functions:
print('unknown model name: ', model_name)
sys.exit(2)
assert K.backend() == "tensorflow"
assert K.floatx() == "float32"
assert K.image_data_format() == 'channels_last'
np.random.seed(0)
model_func = get_model_functions[model_name]
model = model_func()
model.save(dest_path, include_optimizer=False)
# Make sure models can be loaded again,
# see https://github.com/fchollet/keras/issues/7682
model = load_model(dest_path)
print(model.summary())
if __name__ == "__main__":
main()
|
{"hexsha": "3153385f3db6bb1da050dbcee82d232bfe6e1f03", "size": 13741, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/libfdeep/keras_export/generate_test_models.py", "max_stars_repo_name": "Telecommunication-Telemedia-Assessment/V-BMS360", "max_stars_repo_head_hexsha": "0c974451cfb9e75e20de292dcdeae11104260b3a", "max_stars_repo_licenses": ["Unlicense", "MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2018-07-28T17:01:52.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-19T16:32:58.000Z", "max_issues_repo_path": "lib/libfdeep/keras_export/generate_test_models.py", "max_issues_repo_name": "Telecommunication-Telemedia-Assessment/V-BMS360", "max_issues_repo_head_hexsha": "0c974451cfb9e75e20de292dcdeae11104260b3a", "max_issues_repo_licenses": ["Unlicense", "MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-09-28T13:34:37.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-31T23:53:22.000Z", "max_forks_repo_path": "lib/libfdeep/keras_export/generate_test_models.py", "max_forks_repo_name": "Telecommunication-Telemedia-Assessment/V-BMS360", "max_forks_repo_head_hexsha": "0c974451cfb9e75e20de292dcdeae11104260b3a", "max_forks_repo_licenses": ["Unlicense", "MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.938172043, "max_line_length": 82, "alphanum_fraction": 0.5862746525, "include": true, "reason": "import numpy", "num_tokens": 3462}
|
[STATEMENT]
lemma valofn_le_imp_exponent_le:
fixes x y :: "('e, 'f) float"
assumes "valofn x \<le> valofn y"
shows "exponent x \<le> exponent y"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. IEEE.exponent x \<le> IEEE.exponent y
[PROOF STEP]
proof (rule ccontr)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<not> IEEE.exponent x \<le> IEEE.exponent y \<Longrightarrow> False
[PROOF STEP]
assume "\<not> exponent x \<le> exponent y"
[PROOF STATE]
proof (state)
this:
\<not> IEEE.exponent x \<le> IEEE.exponent y
goal (1 subgoal):
1. \<not> IEEE.exponent x \<le> IEEE.exponent y \<Longrightarrow> False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<not> IEEE.exponent x \<le> IEEE.exponent y
[PROOF STEP]
have "valofn y < 2^exponent x / 2^bias TYPE(('e, 'f) float)"
[PROOF STATE]
proof (prove)
using this:
\<not> IEEE.exponent x \<le> IEEE.exponent y
goal (1 subgoal):
1. valofn y < 2 ^ IEEE.exponent x / 2 ^ bias TYPE(('e, 'f) IEEE.float)
[PROOF STEP]
using valofn_less_2p[of y]
[PROOF STATE]
proof (prove)
using this:
\<not> IEEE.exponent x \<le> IEEE.exponent y
IEEE.exponent y < ?e \<Longrightarrow> valofn y < 2 ^ ?e / 2 ^ bias TYPE(('e, 'f) IEEE.float)
goal (1 subgoal):
1. valofn y < 2 ^ IEEE.exponent x / 2 ^ bias TYPE(('e, 'f) IEEE.float)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
valofn y < 2 ^ IEEE.exponent x / 2 ^ bias TYPE(('e, 'f) IEEE.float)
goal (1 subgoal):
1. \<not> IEEE.exponent x \<le> IEEE.exponent y \<Longrightarrow> False
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
valofn y < 2 ^ IEEE.exponent x / 2 ^ bias TYPE(('e, 'f) IEEE.float)
goal (1 subgoal):
1. \<not> IEEE.exponent x \<le> IEEE.exponent y \<Longrightarrow> False
[PROOF STEP]
have "... \<le> valofn x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 2 ^ IEEE.exponent x / 2 ^ bias TYPE(('e, 'f) IEEE.float) \<le> valofn x
[PROOF STEP]
by (rule valofn_ge_2p)
[PROOF STATE]
proof (state)
this:
2 ^ IEEE.exponent x / 2 ^ bias TYPE(('e, 'f) IEEE.float) \<le> valofn x
goal (1 subgoal):
1. \<not> IEEE.exponent x \<le> IEEE.exponent y \<Longrightarrow> False
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
valofn y < valofn x
[PROOF STEP]
show False
[PROOF STATE]
proof (prove)
using this:
valofn y < valofn x
goal (1 subgoal):
1. False
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
valofn y < valofn x
valofn x \<le> valofn y
goal (1 subgoal):
1. False
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
False
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1135, "file": "MFODL_Monitor_Optimized_Code_Double", "length": 14}
|
%!TeX root = scan_converter
\documentclass[paper.tex]{subfiles}
\begin{document}
\section{Scan converter}
\begin{autem}
This is an example of how the course of action can be distorted by engineering. In a non-pandemic scenario such a diversion might have been a reasonable. Indefensible. I am consoled that it did not account for more than 2 to 3 weeks of work.
\end{autem}
\begin{autem}
This section is deliberately kept vague and without reference to construction details to avoid export restrictions. In any case, it is hard to imagine a technique fully developed and published in the 1940s would be of great secrecy now.
\end{autem}
Because of the small sizes of viruses, all interesting time-domain spectroscopy results require resolving signals with greater than 10 GHz bandwidth.
So called "equivalent-time" or "sampling" oscilloscopes are less expensive; but it was hoped that it would be possible to resolve the breaking of the capsid - a transient, single-shot phenomenon - A digital oscilloscope capable of real-time sampling such a transient is a tall order, and it did not appear that an undergraduate would be allowed access.
Gating time. Photoconductive gating is a particularly interesting technique for this.
There is also the concern that, when measuring a system producing kilovolt pulses using an instrument with an extremely electrically delicate 1V maximum input, some harm might come to a machine worth more than the experimenter.
This turned out to be not unfounded - the kilovolt pulser used was observed to cause the RPi controlling the experiment to
However, transient recordings of sub-nanosecond phenomena were made as early as 1938 by Manfred Von Ardenne using a "micro-oscillograph". By 1946, such oscillographs - now with bandwidths in excess of 10 GHz - were readily available commercially\cite{3beam1946}, and by 1949 found extensive use for quantitative measurements of ultrafast phenomena by Fletcher\cite{Production1949} and others.
"Direct access" tubes to the logical conclusion. The design of Fletcher puts the cathode ray deflection plates essentially inside the.
The main practical limitations on oscilloscope sweep speed are discussed by Mackay\cite{New1948}, and are imposed by the Y axis deflection plate electron transit time and bandwidth (the X-axis speed is of less importance), and the beam current and sensitivity of the phosphor (faster sweeps reduce the electron flux and brightness of the dot).
one of the two-bore tubes from mcmaster, vertical - different path length - then deconvolve frequency response from the two
\begin{figure}[H]
% \makebox[\textwidth][c]{
\centering
\subfloat[Damage to the sensor, believed to be due to. ]{
\includegraphics[width=0.3\textwidth]{e_beam_damage_2}
}
\hfill
\subfloat[]{
\includegraphics[width=0.3\textwidth]{e_beam_damage}
}
\hfill
\subfloat[Direct electron beam detection]{
\includegraphics[width=0.3\textwidth]{direct_electron_beam}
}
\caption{Second.}
\hfill
\end{figure}
\begin{figure}[H]
\captionsetup{singlelinecheck = false, justification=justified}
\centering
\includegraphics[width=0.6\textwidth]{sensor_damage.jpg}
\caption{}
\end{figure}
\begin{figure}[H]
\captionsetup{singlelinecheck = false, justification=justified}
\centering
\caption{}
\end{figure}
The very high voltage actually makes this much easier
means that low-sensitivity diagnostics, like lithium niobate optical probes. Not having a suitable optical bench, an electron-beam probe, similar to a streak camera, itself similar in principle to scan-conversion digitizer tubes in Tek SCD and 7912 series transient digitizers.
If a low voltage sensitivity is required, and because modern high-pixel-density CMOS sensors allow the required deflection (and therefore the entire structure) to be minimized, these are simple to design and inexpensive (much of the complexity is in the slow-wave or travelling-wave structure which is usually required to match the signal velocity to the - slowing down light without affecting its properties or drawing power is an extremely difficult problem) and can offer sample rates in the terasamples/s and analog bandwidths of many dozens of GHz, albeit with very low vertical resolution.
original von Ardenne
\begin{fquote}[Livermore Operations][\cite{Scan2008}]
An internal CCD recorder would provide high sensitivity to the swept electron beam, but a CCD in a sealed vacuum tube has never been attempted or demonstrated in the phototube facility at Livermore Operations (LO). A prototype tube would most likely be built with a phosphor screen.
\end{fquote}
Possible routes:
make the plates super small like Von Ardenne (thin wires). see the papers on thin wires.
make the plates slow-wave structures matched to the speed of the electron; travelling-wave deflection plates.
unfortunately, travelling wave structures necessarily absorb power and affect the impedance of the circuit in question
multiple beams with different path lengths to have different phases?
put the x-axis at a 45 degree angle?
radiation damage to sensor
Possibly some optical method.
however, recording such transients is quite a solved problem, having been accomplished to the required degree
taking "direct access" tube to its logical conclusion.
since the sensor will inevitably outgas, a sealed tube is impractical.
Low vacuum, gas focused tubes are less predictable and more difficult to model. XOOPIC; the standard PIC code WARP has undocumented feature of collisional ionization but Tegerup has a guide to use. Simulation
PIC-LAS
The distortion due to transit-time phasing is
$$ \frac{\sin(\frac{2\pi F \tau}{2})}{(2\pi F \tau)/2} $$
where $\tau$ is the transit time of the electron through the deflection region
Image intensifier microchannel plates where angled holes cause secondary electron
stray light
old canal ray paper
the old WARP has a module for collisional ionization, though it's got a bug \cite{Simulation}.
use a cold barium carbonate emitter to prevent stray light
dissolve camera plastic with acetone or something
\end{document}
|
{"hexsha": "93e72205d351d4af5914a158db9abf0770d0468c", "size": 6134, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "documents/scan_converter.tex", "max_stars_repo_name": "0xDBFB7/covidinator", "max_stars_repo_head_hexsha": "e9c103e5e62bc128169400998df5f5cd13bd8949", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "documents/scan_converter.tex", "max_issues_repo_name": "0xDBFB7/covidinator", "max_issues_repo_head_hexsha": "e9c103e5e62bc128169400998df5f5cd13bd8949", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "documents/scan_converter.tex", "max_forks_repo_name": "0xDBFB7/covidinator", "max_forks_repo_head_hexsha": "e9c103e5e62bc128169400998df5f5cd13bd8949", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.5972222222, "max_line_length": 596, "alphanum_fraction": 0.7939354418, "num_tokens": 1406}
|
[STATEMENT]
lemma Sup_real_close:
fixes e :: real
assumes "0 < e"
and S: "bdd_above S" "S \<noteq> {}"
shows "\<exists>x\<in>S. Sup S - e < x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>x\<in>S. Sup S - e < x
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<exists>x\<in>S. Sup S - e < x
[PROOF STEP]
have \<open>Sup (ereal ` S) \<noteq> \<infinity>\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Sup (ereal ` S) \<noteq> \<infinity>
[PROOF STEP]
by (metis assms(2) bdd_above_def ereal_less_eq(3) less_SUP_iff less_ereal.simps(4) not_le)
[PROOF STATE]
proof (state)
this:
Sup (ereal ` S) \<noteq> \<infinity>
goal (1 subgoal):
1. \<exists>x\<in>S. Sup S - e < x
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
Sup (ereal ` S) \<noteq> \<infinity>
goal (1 subgoal):
1. \<exists>x\<in>S. Sup S - e < x
[PROOF STEP]
have \<open>Sup (ereal ` S) \<noteq> -\<infinity>\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Sup (ereal ` S) \<noteq> - \<infinity>
[PROOF STEP]
by (simp add: SUP_eq_iff assms(3))
[PROOF STATE]
proof (state)
this:
Sup (ereal ` S) \<noteq> - \<infinity>
goal (1 subgoal):
1. \<exists>x\<in>S. Sup S - e < x
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
Sup (ereal ` S) \<noteq> \<infinity>
Sup (ereal ` S) \<noteq> - \<infinity>
[PROOF STEP]
have Sup_bdd: \<open>\<bar>Sup (ereal ` S)\<bar> \<noteq> \<infinity>\<close>
[PROOF STATE]
proof (prove)
using this:
Sup (ereal ` S) \<noteq> \<infinity>
Sup (ereal ` S) \<noteq> - \<infinity>
goal (1 subgoal):
1. \<bar>Sup (ereal ` S)\<bar> \<noteq> \<infinity>
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<bar>Sup (ereal ` S)\<bar> \<noteq> \<infinity>
goal (1 subgoal):
1. \<exists>x\<in>S. Sup S - e < x
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<bar>Sup (ereal ` S)\<bar> \<noteq> \<infinity>
[PROOF STEP]
have \<open>\<exists>x'\<in>ereal ` S. Sup (ereal ` S) - ereal e < x'\<close>
[PROOF STATE]
proof (prove)
using this:
\<bar>Sup (ereal ` S)\<bar> \<noteq> \<infinity>
goal (1 subgoal):
1. \<exists>x'\<in>ereal ` S. Sup (ereal ` S) - ereal e < x'
[PROOF STEP]
apply (rule_tac Sup_ereal_close)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<bar>Sup (ereal ` S)\<bar> \<noteq> \<infinity> \<Longrightarrow> 0 < ereal e
2. \<bar>Sup (ereal ` S)\<bar> \<noteq> \<infinity> \<Longrightarrow> \<bar>Sup (ereal ` S)\<bar> \<noteq> \<infinity>
3. \<bar>Sup (ereal ` S)\<bar> \<noteq> \<infinity> \<Longrightarrow> ereal ` S \<noteq> {}
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
0 < e
bdd_above S
S \<noteq> {}
goal (3 subgoals):
1. \<bar>Sup (ereal ` S)\<bar> \<noteq> \<infinity> \<Longrightarrow> 0 < ereal e
2. \<bar>Sup (ereal ` S)\<bar> \<noteq> \<infinity> \<Longrightarrow> \<bar>Sup (ereal ` S)\<bar> \<noteq> \<infinity>
3. \<bar>Sup (ereal ` S)\<bar> \<noteq> \<infinity> \<Longrightarrow> ereal ` S \<noteq> {}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<exists>x'\<in>ereal ` S. Sup (ereal ` S) - ereal e < x'
goal (1 subgoal):
1. \<exists>x\<in>S. Sup S - e < x
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<exists>x'\<in>ereal ` S. Sup (ereal ` S) - ereal e < x'
[PROOF STEP]
obtain x where \<open>x \<in> S\<close> and Sup_x: \<open>Sup (ereal ` S) - ereal e < ereal x\<close>
[PROOF STATE]
proof (prove)
using this:
\<exists>x'\<in>ereal ` S. Sup (ereal ` S) - ereal e < x'
goal (1 subgoal):
1. (\<And>x. \<lbrakk>x \<in> S; Sup (ereal ` S) - ereal e < ereal x\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x \<in> S
Sup (ereal ` S) - ereal e < ereal x
goal (1 subgoal):
1. \<exists>x\<in>S. Sup S - e < x
[PROOF STEP]
have \<open>Sup (ereal ` S) = ereal (Sup S)\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Sup (ereal ` S) = ereal (Sup S)
[PROOF STEP]
using Sup_bdd
[PROOF STATE]
proof (prove)
using this:
\<bar>Sup (ereal ` S)\<bar> \<noteq> \<infinity>
goal (1 subgoal):
1. Sup (ereal ` S) = ereal (Sup S)
[PROOF STEP]
by (rule ereal_Sup[symmetric])
[PROOF STATE]
proof (state)
this:
Sup (ereal ` S) = ereal (Sup S)
goal (1 subgoal):
1. \<exists>x\<in>S. Sup S - e < x
[PROOF STEP]
with Sup_x
[PROOF STATE]
proof (chain)
picking this:
Sup (ereal ` S) - ereal e < ereal x
Sup (ereal ` S) = ereal (Sup S)
[PROOF STEP]
have \<open>ereal (Sup S - e) < ereal x\<close>
[PROOF STATE]
proof (prove)
using this:
Sup (ereal ` S) - ereal e < ereal x
Sup (ereal ` S) = ereal (Sup S)
goal (1 subgoal):
1. ereal (Sup S - e) < ereal x
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
ereal (Sup S - e) < ereal x
goal (1 subgoal):
1. \<exists>x\<in>S. Sup S - e < x
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
ereal (Sup S - e) < ereal x
[PROOF STEP]
have \<open>Sup S - e < x\<close>
[PROOF STATE]
proof (prove)
using this:
ereal (Sup S - e) < ereal x
goal (1 subgoal):
1. Sup S - e < x
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
Sup S - e < x
goal (1 subgoal):
1. \<exists>x\<in>S. Sup S - e < x
[PROOF STEP]
with \<open>x \<in> S\<close>
[PROOF STATE]
proof (chain)
picking this:
x \<in> S
Sup S - e < x
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
x \<in> S
Sup S - e < x
goal (1 subgoal):
1. \<exists>x\<in>S. Sup S - e < x
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<exists>x\<in>S. Sup S - e < x
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 2563, "file": "Complex_Bounded_Operators_extra_Extra_General", "length": 30}
|
# O(n^3) matrix multiplication
import time
import numpy as np
def matrixmul(A,B):
I,K = A.shape
J = B.shape[1]
C = np.zeros((I,J), dtype=A.dtype)
for i in range(I):
for j in range(J):
for k in range(K):
C[i,j] += A[i,k]*B[k,j]
return C
A = np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]])
B = np.array([[1,2],[3,4],[5,6]])
N = 100000
s = time.time()
for i in range(N):
C = np.matmul(A,B)
e = time.time()
print("np.matmul: %0.6f" % (e-s,))
s = time.time()
for i in range(N):
C = matrixmul(A,B)
e = time.time()
print("matrixmul: %0.6f" % (e-s,))
|
{"hexsha": "5d2a4e032f092beed5e48dd237c40aaf9fbec5b5", "size": 617, "ext": "py", "lang": "Python", "max_stars_repo_path": "chapter_05/matrixmul.py", "max_stars_repo_name": "rkneusel9/MathForDeepLearning", "max_stars_repo_head_hexsha": "8db1a85ce3cef4b48aab01ebe156e3fab2dfa271", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 23, "max_stars_repo_stars_event_min_datetime": "2021-10-12T19:53:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T12:41:23.000Z", "max_issues_repo_path": "chapter_05/matrixmul.py", "max_issues_repo_name": "mohit-n-rajput/MathForDeepLearning", "max_issues_repo_head_hexsha": "8db1a85ce3cef4b48aab01ebe156e3fab2dfa271", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chapter_05/matrixmul.py", "max_forks_repo_name": "mohit-n-rajput/MathForDeepLearning", "max_forks_repo_head_hexsha": "8db1a85ce3cef4b48aab01ebe156e3fab2dfa271", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2021-06-16T17:21:41.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-16T09:22:50.000Z", "avg_line_length": 19.28125, "max_line_length": 50, "alphanum_fraction": 0.510534846, "include": true, "reason": "import numpy", "num_tokens": 225}
|
# SPDX-FileCopyrightText: 2021 Division of Intelligent Medical Systems, DKFZ
# SPDX-FileCopyrightText: 2021 Janek Groehl
# SPDX-License-Identifier: MIT
from simpa.utils import Tags, Settings
from simpa.utils.tissue_properties import TissueProperties
from simpa.io_handling import load_data_field, save_data_field
from simpa.core.processing_components import ProcessingComponent
from simpa.core.device_digital_twins import DigitalDeviceTwinBase, PhotoacousticDevice
import numpy as np
class FieldOfViewCropping(ProcessingComponent):
def __init__(self, global_settings, settings_key=None):
if settings_key is None:
# TODO Extract from global settings all the fields that should be cropped
global_settings["FieldOfViewCropping"] = Settings({
Tags.DATA_FIELD: TissueProperties.property_tags +
[Tags.DATA_FIELD_FLUENCE,
Tags.DATA_FIELD_INITIAL_PRESSURE]})
super(FieldOfViewCropping, self).__init__(global_settings, "FieldOfViewCropping")
"""
Applies Gaussian noise to the defined data field.
The noise will be applied to all wavelengths.
Component Settings
**Tags.DATA_FIELD required
"""
def run(self, device: DigitalDeviceTwinBase):
self.logger.info("Cropping field of view...")
if Tags.DATA_FIELD not in self.component_settings.keys():
msg = f"The field {Tags.DATA_FIELD} must be set in order to use the fov cropping."
self.logger.critical(msg)
raise KeyError(msg)
if not isinstance(self.component_settings[Tags.DATA_FIELD], list):
msg = f"The field {Tags.DATA_FIELD} must be of type list."
self.logger.critical(msg)
raise TypeError(msg)
data_fields = self.component_settings[Tags.DATA_FIELD]
if isinstance(device, PhotoacousticDevice):
field_of_view_mm = device.detection_geometry.get_field_of_view_mm()
else:
field_of_view_mm = device.get_field_of_view_mm()
self.logger.debug(f"FOV (mm): {field_of_view_mm}")
field_of_view_voxels = np.round(field_of_view_mm / self.global_settings[Tags.SPACING_MM]).astype(np.int32)
self.logger.debug(f"FOV (voxels): {field_of_view_voxels}")
# In case it should be cropped from A to A, then crop from A to A+1
x_offset_correct = 1 if (field_of_view_voxels[1] - field_of_view_voxels[0]) < 1 else 0
y_offset_correct = 1 if (field_of_view_voxels[3] - field_of_view_voxels[2]) < 1 else 0
z_offset_correct = 1 if (field_of_view_voxels[5] - field_of_view_voxels[4]) < 1 else 0
self.logger.debug(f"field of view to crop: {field_of_view_voxels}")
for data_field in data_fields:
self.logger.debug(f"Cropping data field {data_field}...")
# load
wavelength = self.global_settings[Tags.WAVELENGTH]
data_array = load_data_field(self.global_settings[Tags.SIMPA_OUTPUT_PATH], data_field, wavelength)
self.logger.debug(f"data array shape before cropping: {np.shape(data_array)}")
self.logger.debug(f"data array shape len: {len(np.shape(data_array))}")
# input validation
if not isinstance(data_array, np.ndarray):
self.logger.warning(f"The data field {data_field} was not of type np.ndarray. Skipping...")
continue
data_field_shape = np.shape(data_array)
if len(data_field_shape) == 3:
if ((np.array([field_of_view_voxels[1] - field_of_view_voxels[0],
field_of_view_voxels[3] - field_of_view_voxels[2],
field_of_view_voxels[5] - field_of_view_voxels[4]]) - data_field_shape) == 0).all():
self.logger.warning(f"The data field {data_field} is already cropped. Skipping...")
continue
# crop
data_array = np.squeeze(data_array[field_of_view_voxels[0]:field_of_view_voxels[1] + x_offset_correct,
field_of_view_voxels[2]:field_of_view_voxels[3] + y_offset_correct,
field_of_view_voxels[4]:field_of_view_voxels[5] + z_offset_correct])
elif len(data_field_shape) == 2:
# Assumption that the data field is already in 2D shape in the y-plane
if (np.array([field_of_view_voxels[1] - field_of_view_voxels[0],
field_of_view_voxels[5] - field_of_view_voxels[4]]) - data_field_shape == 0).all():
self.logger.warning(f"The data field {data_field} is already cropped. Skipping...")
continue
# crop
data_array = np.squeeze(data_array[field_of_view_voxels[0]:field_of_view_voxels[1] + x_offset_correct,
field_of_view_voxels[4]:field_of_view_voxels[5] + z_offset_correct])
self.logger.debug(f"data array shape after cropping: {np.shape(data_array)}")
# save
save_data_field(data_array, self.global_settings[Tags.SIMPA_OUTPUT_PATH], data_field, wavelength)
self.logger.info("Cropping field of view...[Done]")
|
{"hexsha": "bc24bef7364a70cd58b2d35a000d77241b14b207", "size": 5333, "ext": "py", "lang": "Python", "max_stars_repo_path": "simpa/core/processing_components/monospectral/field_of_view_cropping.py", "max_stars_repo_name": "IMSY-DKFZ/simpa", "max_stars_repo_head_hexsha": "b8bddcf43a4bff2564f0ec208dc511b82e49bfb4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2022-03-14T15:40:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T02:34:25.000Z", "max_issues_repo_path": "simpa/core/processing_components/monospectral/field_of_view_cropping.py", "max_issues_repo_name": "jgroehl/simpa", "max_issues_repo_head_hexsha": "e56f0802e5a8555ee8bb139dd4f776025e7e9267", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2022-03-18T07:19:12.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T12:15:19.000Z", "max_forks_repo_path": "simpa/core/processing_components/monospectral/field_of_view_cropping.py", "max_forks_repo_name": "IMSY-DKFZ/simpa", "max_forks_repo_head_hexsha": "b8bddcf43a4bff2564f0ec208dc511b82e49bfb4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 52.2843137255, "max_line_length": 118, "alphanum_fraction": 0.6476654791, "include": true, "reason": "import numpy", "num_tokens": 1203}
|
// ------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License (MIT). See License.txt in the repo root for license information.
// ------------------------------------------------------------
#include "stdafx.h"
#include <boost/test/unit_test.hpp>
#include "Common/boost-taef.h"
#define ALLOC_TAG 'ttHC'
namespace UtilitiesTests
{
using namespace Common;
using namespace ktl;
using namespace Data;
using namespace Data::Utilities;
class ConcurrentHashTableStressTest
{
public:
typedef ConcurrentHashTable<LONG64, LONG64> LongLongHashTable;
enum OperationType
{
Read,
Update,
Add,
Remove
};
struct HashTableOperation
{
OperationType Type;
LONG64 Key;
// Value to set for Add/Update operations. Expected value for read/remove operations
LONG64 Value;
bool ShouldSucceed = true;
};
static LONG32 Compare(__in KeyValuePair<LONG64, LONG64> const & itemOne, __in KeyValuePair<LONG64, LONG64> const & itemTwo)
{
return itemOne.Key > itemTwo.Key ? 1 : (itemOne.Key < itemTwo.Key ? -1 : 0);
}
ConcurrentHashTableStressTest()
{
NTSTATUS status;
status = KtlSystem::Initialize(FALSE, &ktlSystem_);
CODING_ERROR_ASSERT(NT_SUCCESS(status));
ktlSystem_->SetStrictAllocationChecks(TRUE);
}
~ConcurrentHashTableStressTest()
{
ktlSystem_->Shutdown();
}
KAllocator& GetAllocator()
{
return ktlSystem_->NonPagedAllocator();
}
Random GetRandom()
{
auto seed = Common::Stopwatch::Now().Ticks;
Random random(static_cast<int>(seed));
cout << "Random seed (use this seed to reproduce test failures): " << seed << endl;
return random;
}
LongLongHashTable::SPtr CreateEmptyHashTable(__in ULONG concurrencyLevel=16)
{
LongLongHashTable::SPtr result = nullptr;
LongLongHashTable::Create(K_DefaultHashFunction, concurrencyLevel, GetAllocator(), result);
return result;
}
Awaitable<void> ApplyOperationsAsync(
__in LongLongHashTable & hashTable,
__in KSharedArray<HashTableOperation> & items,
__in ULONG offset,
__in ULONG count,
__in bool verifyContainsKey = true,
__in bool verifyValue = true)
{
co_await CorHelper::ThreadPoolThread(GetAllocator().GetKtlSystem().DefaultThreadPool());
for (ULONG i = offset; i < offset + count; i++)
{
bool successful = false;
bool added = false;
LONG64 outValue = -1;
HashTableOperation item = items[i];
switch (item.Type)
{
case OperationType::Add:
hashTable.AddOrUpdate(item.Key, item.Value, successful);
break;
case OperationType::Update:
hashTable.AddOrUpdate(item.Key, item.Value, added);
successful = !added;
break;
case OperationType::Remove:
successful = hashTable.TryRemove(item.Key, outValue);
break;
case OperationType::Read:
successful = hashTable.TryGetValue(item.Key, outValue);
break;
default:
CODING_ERROR_ASSERT(false);
}
if (verifyValue)
{
CODING_ERROR_ASSERT(item.ShouldSucceed == successful);
if (item.ShouldSucceed && (item.Type == OperationType::Read || item.Type == OperationType::Remove))
{
CODING_ERROR_ASSERT(item.Value == outValue);
}
}
if (verifyContainsKey)
{
CODING_ERROR_ASSERT(item.ShouldSucceed == successful);
}
}
}
Awaitable<void> ApplyOperationsAsync(
__in LongLongHashTable & hashTable,
__in KSharedArray<HashTableOperation> & items,
__in ULONG numTasks,
__in bool verifyContainsKey = true,
__in bool verifyValue = true)
{
KArray<Awaitable<void>> tasks = KArray<Awaitable<void>>(GetAllocator(), numTasks);
const ULONG keysPerTask = items.Count() / numTasks;
for (ULONG32 i = 0; i < numTasks; i++)
{
tasks.Append(ApplyOperationsAsync(hashTable, items, i * keysPerTask, keysPerTask, verifyContainsKey, verifyValue));
}
co_await TaskUtilities<void>::WhenAll(tasks);
}
public:
Awaitable<void> ConcurrentHashTable_AddReadUpdateReadDeleteRead_Sequential_Test()
{
const LONG64 numKeys = 10'000'000;
const ULONG numTasks = 1; // Sequential test
KSharedArray<HashTableOperation>::SPtr operationsSPtr = _new(ALLOC_TAG, GetAllocator()) KSharedArray<HashTableOperation>();
for (LONG64 key = 0; key < numKeys; key++)
{
HashTableOperation operation = { OperationType::Add, key, key };
operationsSPtr->Append(operation);
}
for (LONG64 key = 0; key < numKeys; key++)
{
HashTableOperation operation = { OperationType::Read, key, key };
operationsSPtr->Append(operation);
}
for (LONG64 key = 0; key < numKeys; key++)
{
HashTableOperation operation = { OperationType::Update, key, key + 10 };
operationsSPtr->Append(operation);
}
for (LONG64 key = 0; key < numKeys; key++)
{
HashTableOperation operation = { OperationType::Read, key, key + 10 };
operationsSPtr->Append(operation);
}
for (LONG64 key = 0; key < numKeys; key++)
{
HashTableOperation operation = { OperationType::Remove, key, key + 10 };
operationsSPtr->Append(operation);
}
for (LONG64 key = 0; key < numKeys; key++)
{
HashTableOperation operation = { OperationType::Read, key, 0, false };
operationsSPtr->Append(operation);
}
LongLongHashTable::SPtr hashTableSPtr = CreateEmptyHashTable();
co_await ApplyOperationsAsync(*hashTableSPtr, *operationsSPtr, numTasks);
}
Awaitable<void> ConcurrentHashTable_ConcurrentAdd_Test()
{
const LONG64 numKeys = 10'000'000;
const ULONG numAddTasks = 50;
const ULONG numReadTasks = 1; // Verify sequentially
KSharedArray<HashTableOperation>::SPtr addOperationsSPtr = _new(ALLOC_TAG, GetAllocator()) KSharedArray<HashTableOperation>();
for (LONG64 key = 0; key < numKeys; key++)
{
HashTableOperation operation = { OperationType::Add, key, key };
addOperationsSPtr->Append(operation);
}
KSharedArray<HashTableOperation>::SPtr readOperationsSPtr = _new(ALLOC_TAG, GetAllocator()) KSharedArray<HashTableOperation>();
for (LONG64 key = 0; key < numKeys; key++)
{
HashTableOperation operation = { OperationType::Read, key, key };
readOperationsSPtr->Append(operation);
}
LongLongHashTable::SPtr hashTableSPtr = CreateEmptyHashTable();
co_await ApplyOperationsAsync(*hashTableSPtr, *addOperationsSPtr, numAddTasks);
co_await ApplyOperationsAsync(*hashTableSPtr, *readOperationsSPtr, numReadTasks);
}
Awaitable<void> ConcurrentHashTable_ConcurrentUpdate_Test()
{
const LONG64 numKeys = 10'000'000;
const ULONG numWriteTasks = 50;
const ULONG numReadTasks = 1; // Verify sequentially
KSharedArray<HashTableOperation>::SPtr addOperationsSPtr = _new(ALLOC_TAG, GetAllocator()) KSharedArray<HashTableOperation>();
for (LONG64 key = 0; key < numKeys; key++)
{
HashTableOperation operation = { OperationType::Add, key, key };
addOperationsSPtr->Append(operation);
}
KSharedArray<HashTableOperation>::SPtr updateOperationsSPtr = _new(ALLOC_TAG, GetAllocator()) KSharedArray<HashTableOperation>();
for (LONG64 key = 0; key < numKeys; key++)
{
HashTableOperation operation = { OperationType::Update, key, key };
updateOperationsSPtr->Append(operation);
}
KSharedArray<HashTableOperation>::SPtr readOperationsSPtr = _new(ALLOC_TAG, GetAllocator()) KSharedArray<HashTableOperation>();
for (LONG64 key = 0; key < numKeys; key++)
{
HashTableOperation operation = { OperationType::Read, key, key };
readOperationsSPtr->Append(operation);
}
LongLongHashTable::SPtr hashTableSPtr = CreateEmptyHashTable();
co_await ApplyOperationsAsync(*hashTableSPtr, *addOperationsSPtr, numWriteTasks);
co_await ApplyOperationsAsync(*hashTableSPtr, *updateOperationsSPtr, numWriteTasks);
co_await ApplyOperationsAsync(*hashTableSPtr, *readOperationsSPtr, numReadTasks);
}
Awaitable<void> ConcurrentHashTable_ConcurrentRead_Test()
{
const LONG64 numKeys = 10'000'000;
const ULONG numTasks = 50;
KSharedArray<HashTableOperation>::SPtr addOperationsSPtr = _new(ALLOC_TAG, GetAllocator()) KSharedArray<HashTableOperation>();
for (LONG64 key = 0; key < numKeys; key++)
{
HashTableOperation operation = { OperationType::Add, key, key };
addOperationsSPtr->Append(operation);
}
KSharedArray<HashTableOperation>::SPtr readOperationsSPtr = _new(ALLOC_TAG, GetAllocator()) KSharedArray<HashTableOperation>();
for (LONG64 key = 0; key < numKeys; key++)
{
HashTableOperation operation = { OperationType::Read, key, key };
readOperationsSPtr->Append(operation);
}
LongLongHashTable::SPtr hashTableSPtr = CreateEmptyHashTable();
co_await ApplyOperationsAsync(*hashTableSPtr, *addOperationsSPtr, numTasks);
co_await ApplyOperationsAsync(*hashTableSPtr, *readOperationsSPtr, numTasks);
}
Awaitable<void> ConcurrentHashTable_ConcurrentRemove_Test()
{
const LONG64 numKeys = 10'000'000;
const ULONG numWriteTasks = 50;
const ULONG numReadTasks = 1; //Verify sequentially
KSharedArray<HashTableOperation>::SPtr addOperationsSPtr = _new(ALLOC_TAG, GetAllocator()) KSharedArray<HashTableOperation>();
for (LONG64 key = 0; key < numKeys; key++)
{
HashTableOperation operation = { OperationType::Add, key, key };
addOperationsSPtr->Append(operation);
}
KSharedArray<HashTableOperation>::SPtr removeOperationsSPtr = _new(ALLOC_TAG, GetAllocator()) KSharedArray<HashTableOperation>();
for (LONG64 key = 0; key < numKeys; key++)
{
HashTableOperation operation = { OperationType::Remove, key, key };
removeOperationsSPtr->Append(operation);
}
KSharedArray<HashTableOperation>::SPtr readOperationsSPtr = _new(ALLOC_TAG, GetAllocator()) KSharedArray<HashTableOperation>();
for (LONG64 key = 0; key < numKeys; key++)
{
HashTableOperation operation = { OperationType::Read, key, key, false };
readOperationsSPtr->Append(operation);
}
LongLongHashTable::SPtr hashTableSPtr = CreateEmptyHashTable();
co_await ApplyOperationsAsync(*hashTableSPtr, *addOperationsSPtr, numWriteTasks);
co_await ApplyOperationsAsync(*hashTableSPtr, *removeOperationsSPtr, numWriteTasks);
co_await ApplyOperationsAsync(*hashTableSPtr, *readOperationsSPtr, numReadTasks);
}
Awaitable<void> ConcurrentHashTable_ConcurrentRandomAdds_Test()
{
LongLongHashTable::SPtr hashTableSPtr = CreateEmptyHashTable();
Random random = GetRandom();
const ULONG numKeys = 10'000'000;
const ULONG numTasks = 50;
KAutoHashTable<LONG64, bool> keysSet(100, K_DefaultHashFunction, GetAllocator());
KSharedArray<HashTableOperation>::SPtr writeOperationsSPtr = _new(ALLOC_TAG, GetAllocator()) KSharedArray<HashTableOperation>();
KSharedArray<HashTableOperation>::SPtr readOperationsSPtr = _new(ALLOC_TAG, GetAllocator()) KSharedArray<HashTableOperation>();
for (ULONG i = 0; i < numKeys; i++)
{
LONG64 key = random.Next(100'000'000);
if (keysSet.ContainsKey(key))
{
i--;
continue;
}
keysSet.Put(key, true);
LONG64 value = random.Next(1'000'000);
HashTableOperation writeOperation = { OperationType::Add, key, value };
writeOperationsSPtr->Append(writeOperation);
HashTableOperation readOperation = { OperationType::Read, key, value };
readOperationsSPtr->Append(readOperation);
}
// Write in random order
co_await ApplyOperationsAsync(*hashTableSPtr, *writeOperationsSPtr, numTasks);
// Read in random order
co_await ApplyOperationsAsync(*hashTableSPtr, *readOperationsSPtr, numTasks);
}
Awaitable<void> ConcurrentHashTable_ConcurrentRandomAddOrUpdateAndRead_Test()
{
LongLongHashTable::SPtr hashTableSPtr = CreateEmptyHashTable();
Random random = GetRandom();
const ULONG numKeys = 10'000'000;
const ULONG numTasks = 50;
const int maxKey = 500; // High contention
KAutoHashTable<LONG64, bool> keysSet(100, K_DefaultHashFunction, GetAllocator());
KSharedArray<HashTableOperation>::SPtr operationsSPtr = _new(ALLOC_TAG, GetAllocator()) KSharedArray<HashTableOperation>();
for (ULONG i = 0; i < numKeys; i++)
{
OperationType opType = (i % 2 == 0) ? OperationType::Add : OperationType::Read;
LONG64 key = random.Next(maxKey);
if (keysSet.ContainsKey(key))
{
opType = OperationType::Update;
}
keysSet.Put(key, true);
LONG64 value = random.Next(maxKey);
HashTableOperation operation = { opType, key, value };
operationsSPtr->Append(operation);
}
co_await ApplyOperationsAsync(*hashTableSPtr, *operationsSPtr, numTasks, false, false);
}
Awaitable<void> ConcurrentHashTable_Stress_Test()
{
LongLongHashTable::SPtr hashTableSPtr = CreateEmptyHashTable();
Random random = GetRandom();
const ULONG numOps = 50'000'000;
const ULONG numTasks = 12;
const int maxKey = 1'000'000;
KSharedArray<HashTableOperation>::SPtr operationsSPtr = _new(ALLOC_TAG, GetAllocator()) KSharedArray<HashTableOperation>();
for (ULONG i = 0; i < numOps; i++)
{
double choice = random.NextDouble();
OperationType opType;
if (choice < 0.70)
{
opType = OperationType::Update;
}
else if (choice < 0.95)
{
opType = OperationType::Read;
}
else
{
opType = OperationType::Remove;
}
LONG64 key = random.Next(maxKey);
LONG64 value = random.Next(maxKey);
HashTableOperation operation = { opType, key, value };
operationsSPtr->Append(operation);
}
co_await ApplyOperationsAsync(*hashTableSPtr, *operationsSPtr, numTasks, false, false);
}
Awaitable<void> TestAdd(__in ULONG concurrencyLevel, __in ULONG numTasks, __in ULONG addsPerTask)
{
auto hashTable = CreateEmptyHashTable(concurrencyLevel);
KArray<Awaitable<void>> tasks = KArray<Awaitable<void>>(GetAllocator(), numTasks);
for (ULONG32 i = 0; i < numTasks; i++)
{
tasks.Append([&] (ULONG32 taskNum) -> Awaitable<void> {
co_await CorHelper::ThreadPoolThread(GetAllocator().GetKtlSystem().DefaultThreadPool());
for (LONG64 j = 0; j < addsPerTask; j++)
{
hashTable->Add(j + taskNum * addsPerTask, -(j + taskNum * addsPerTask));
}
co_return;
}(i));
}
co_await TaskUtilities<void>::WhenAll(tasks);
KSharedArray<KeyValuePair<LONG64, LONG64>>::SPtr items = _new(ALLOC_TAG, GetAllocator()) KSharedArray<KeyValuePair<LONG64, LONG64>>();
auto enumerator = hashTable->GetEnumerator();
while (enumerator->MoveNext())
{
auto current = enumerator->Current();
CODING_ERROR_ASSERT(current.Key == -current.Value);
items->Append(current);
}
ULONG32 expectedCount = numTasks * addsPerTask;
CODING_ERROR_ASSERT(items->Count() == expectedCount);
Sort<KeyValuePair<LONG64, LONG64>>::QuickSort(true, Compare, items);
for (ULONG32 i = 0; i < expectedCount; i++)
{
CODING_ERROR_ASSERT((*items)[i].Key == static_cast<LONG64>(i));
}
CODING_ERROR_ASSERT(items->Count() == expectedCount);
}
Awaitable<void> TestUpdate(__in ULONG concurrencyLevel, __in ULONG numTasks, __in ULONG updatesPerTask)
{
auto hashTable = CreateEmptyHashTable(concurrencyLevel);
for (ULONG32 i = 1; i <= updatesPerTask; i++)
{
hashTable->Add(i, i);
}
KArray<Awaitable<void>> tasks = KArray<Awaitable<void>>(GetAllocator(), numTasks);
for (ULONG32 i = 0; i < numTasks; i++)
{
tasks.Append([&] (ULONG32 taskNum) -> Awaitable<void> {
co_await CorHelper::ThreadPoolThread(GetAllocator().GetKtlSystem().DefaultThreadPool());
for (LONG64 j = 1; j <= updatesPerTask; j++)
{
bool added = false;
hashTable->AddOrUpdate(j, (taskNum + 2) * j, added);
CODING_ERROR_ASSERT(added == false);
}
co_return;
}(i));
}
co_await TaskUtilities<void>::WhenAll(tasks);
KSharedArray<KeyValuePair<LONG64, LONG64>>::SPtr items = _new(ALLOC_TAG, GetAllocator()) KSharedArray<KeyValuePair<LONG64, LONG64>>();
auto enumerator = hashTable->GetEnumerator();
while (enumerator->MoveNext())
{
auto current = enumerator->Current();
items->Append(current);
auto div = current.Value / current.Key;
auto rem = current.Value % current.Key;
CODING_ERROR_ASSERT(0 == rem);
CODING_ERROR_ASSERT(div > 1 && div <= numTasks + 1);
}
Sort<KeyValuePair<LONG64, LONG64>>::QuickSort(true, Compare, items);
ULONG32 expectedCount = updatesPerTask;
CODING_ERROR_ASSERT(items->Count() == expectedCount);
for (ULONG32 i = 0; i < expectedCount; i++)
{
CODING_ERROR_ASSERT((*items)[i].Key == static_cast<LONG64>(i + 1));
}
CODING_ERROR_ASSERT(items->Count() == expectedCount);
}
Awaitable<void> TestRead(__in ULONG concurrencyLevel, __in ULONG numTasks, __in ULONG readsPerTask)
{
auto hashTable = CreateEmptyHashTable(concurrencyLevel);
for (ULONG32 i = 0; i < readsPerTask; i += 2)
{
hashTable->Add(i, i);
}
KArray<Awaitable<void>> tasks = KArray<Awaitable<void>>(GetAllocator(), numTasks);
for (ULONG32 i = 0; i < numTasks; i++)
{
tasks.Append([&] (ULONG32 taskNum) -> Awaitable<void> {
co_await CorHelper::ThreadPoolThread(GetAllocator().GetKtlSystem().DefaultThreadPool());
for (LONG64 j = 0; j < readsPerTask; j++)
{
LONG64 val = 0;
bool found = hashTable->TryGetValue(j, val);
if (found)
{
CODING_ERROR_ASSERT(j % 2 == 0);
CODING_ERROR_ASSERT(j == val);
}
else
{
CODING_ERROR_ASSERT(j % 2 == 1);
}
}
co_return;
}(i));
}
co_await TaskUtilities<void>::WhenAll(tasks);
}
Awaitable<void> TestRemove(__in ULONG concurrencyLevel, __in ULONG numTasks, __in ULONG removesPerTask)
{
auto hashTable = CreateEmptyHashTable(concurrencyLevel);
ULONG32 numKeys = 2 * numTasks * removesPerTask;
for (LONG64 i = 0; i < numKeys; i++)
{
hashTable->Add(i, -i);
}
KArray<Awaitable<void>> tasks = KArray<Awaitable<void>>(GetAllocator(), numTasks);
for (ULONG32 i = 0; i < numTasks; i++)
{
tasks.Append([&] (ULONG32 taskNum) -> Awaitable<void> {
co_await CorHelper::ThreadPoolThread(GetAllocator().GetKtlSystem().DefaultThreadPool());
for (LONG64 j = 0; j < removesPerTask; j++)
{
LONG64 val = 0;
// Only remove even keys
LONG64 key = 2 * (taskNum + j * numTasks);
bool removed = hashTable->TryRemove(key, val);
CODING_ERROR_ASSERT(removed);
CODING_ERROR_ASSERT(key == -val);
}
co_return;
}(i));
}
co_await TaskUtilities<void>::WhenAll(tasks);
KSharedArray<KeyValuePair<LONG64, LONG64>>::SPtr items = _new(ALLOC_TAG, GetAllocator()) KSharedArray<KeyValuePair<LONG64, LONG64>>();
auto enumerator = hashTable->GetEnumerator();
while (enumerator->MoveNext())
{
auto current = enumerator->Current();
items->Append(current);
CODING_ERROR_ASSERT(current.Key == -current.Value);
}
Sort<KeyValuePair<LONG64, LONG64>>::QuickSort(true, Compare, items);
for (ULONG32 i = 0; i < (numTasks * removesPerTask); i++)
{
LONG64 expected = 2 * i + 1;
CODING_ERROR_ASSERT(expected == (*items)[i].Key);
}
CODING_ERROR_ASSERT(numTasks * removesPerTask == hashTable->Count);
}
private:
KtlSystem* ktlSystem_;
};
BOOST_FIXTURE_TEST_SUITE(ConcurrentHashTableStressTestSuite, ConcurrentHashTableStressTest)
BOOST_AUTO_TEST_CASE(ConcurrentHashTable_AddReadUpdateReadDeleteRead_ShouldSucceed)
{
SyncAwait(ConcurrentHashTable_AddReadUpdateReadDeleteRead_Sequential_Test());
}
BOOST_AUTO_TEST_CASE(ConcurrentHashTable_ConcurrentAdd_ShouldSucceed)
{
SyncAwait(ConcurrentHashTable_ConcurrentAdd_Test());
}
BOOST_AUTO_TEST_CASE(ConcurrentHashTable_ConcurrentUpdate_ShouldSucceed)
{
SyncAwait(ConcurrentHashTable_ConcurrentUpdate_Test());
}
BOOST_AUTO_TEST_CASE(ConcurrentHashTable_ConcurrentRead_ShouldSucceed)
{
SyncAwait(ConcurrentHashTable_ConcurrentRead_Test());
}
BOOST_AUTO_TEST_CASE(ConcurrentHashTable_ConcurrentRemove_ShouldSucceed)
{
SyncAwait(ConcurrentHashTable_ConcurrentRemove_Test());
}
BOOST_AUTO_TEST_CASE(ConcurrentHashTable_ConcurrentRandomAdds_ShouldSucceed)
{
SyncAwait(ConcurrentHashTable_ConcurrentRandomAdds_Test());
}
BOOST_AUTO_TEST_CASE(ConcurrentHashTable_ConcurrentRandomAddOrUpdateAndRead_ShouldSucceed)
{
SyncAwait(ConcurrentHashTable_ConcurrentRandomAddOrUpdateAndRead_Test());
}
BOOST_AUTO_TEST_CASE(ConcurrentHashTable_Stress_ShouldSucceed)
{
SyncAwait(ConcurrentHashTable_Stress_Test());
}
BOOST_AUTO_TEST_CASE(ConcurrentHashTable_EnumerateSimple_ShouldSucceed)
{
ULONG numKeys = 1'000;
LongLongHashTable::SPtr hashTableSPtr = CreateEmptyHashTable();
KSharedArray<KeyValuePair<LONG64, LONG64>>::SPtr items = _new(ALLOC_TAG, GetAllocator()) KSharedArray<KeyValuePair<LONG64, LONG64>>();
for (LONG64 i = 0; i < numKeys; i++)
{
hashTableSPtr->Add(i, i);
}
auto enumerator = hashTableSPtr->GetEnumerator();
ULONG count = 0;
while (enumerator->MoveNext())
{
items->Append(enumerator->Current());
count++;
}
CODING_ERROR_ASSERT(count == numKeys);
Sort<KeyValuePair<LONG64, LONG64>>::QuickSort(true, ConcurrentHashTableStressTest::Compare, items);
for (LONG64 i = 0; i < items->Count(); i++)
{
auto item = (*items)[static_cast<ULONG>(i)];
CODING_ERROR_ASSERT(item.Key == i);
CODING_ERROR_ASSERT(item.Value == i);
}
}
BOOST_AUTO_TEST_CASE(ConcurrentHashTable_Enumerate_ShouldSucceed)
{
LongLongHashTable::SPtr hashTableSPtr = CreateEmptyHashTable();
auto enumerator = hashTableSPtr->GetEnumerator();
ULONG count = 0;
while (enumerator->MoveNext())
{
count++;
}
CODING_ERROR_ASSERT(count == 0);
}
BOOST_AUTO_TEST_CASE(ConcurrentHashTable_Enumerate_AfterEnd_ShouldSucceed)
{
ULONG numKeys = 1'000;
LongLongHashTable::SPtr hashTableSPtr = CreateEmptyHashTable();
for (LONG64 i = 0; i < numKeys; i++)
{
hashTableSPtr->Add(i, i);
}
auto enumerator = hashTableSPtr->GetEnumerator();
ULONG count = 0;
while (enumerator->MoveNext())
{
count++;
}
CODING_ERROR_ASSERT(count == numKeys);
CODING_ERROR_ASSERT(enumerator->MoveNext() == false);
CODING_ERROR_ASSERT(enumerator->MoveNext() == false);
}
BOOST_AUTO_TEST_CASE(ConcurrentHashTable_EnumerationAborted_LockShouldBeReleased)
{
ULONG numKeys = 1'000;
LongLongHashTable::SPtr hashTableSPtr = CreateEmptyHashTable();
for (LONG64 i = 0; i < numKeys; i++)
{
hashTableSPtr->Add(i, i);
}
auto enumerator = hashTableSPtr->GetEnumerator();
enumerator->MoveNext(); // This will lock on bucket 0
enumerator = nullptr;
// This should not deadlock since the enumerator is disposed
bool added = false;
hashTableSPtr->AddOrUpdate(0, 1, added);
}
BOOST_AUTO_TEST_CASE(ConcurrentHashTable_ConcurrentEnumerations_ShouldSucceed)
{
ULONG numKeys = 1'000;
LongLongHashTable::SPtr hashTableSPtr = CreateEmptyHashTable();
for (LONG64 i = 0; i < numKeys; i++)
{
hashTableSPtr->Add(i, i);
}
auto enumerator1 = hashTableSPtr->GetEnumerator();
auto enumerator2 = hashTableSPtr->GetEnumerator();
ULONG count1 = 0;
ULONG count2 = 0;
while (true)
{
bool hasNext1 = enumerator1->MoveNext();
bool hasNext2 = enumerator2->MoveNext();
if (!hasNext1 && !hasNext2)
{
break;
}
count1 += hasNext1 ? 1 : 0;
count2 += hasNext2 ? 1 : 0;
}
CODING_ERROR_ASSERT(count1 == numKeys);
CODING_ERROR_ASSERT(count2 == numKeys);
}
BOOST_AUTO_TEST_CASE(TestAdd_ShouldSucceed)
{
SyncAwait(TestAdd( 1, 1, 10000));
SyncAwait(TestAdd( 5, 1, 10000));
SyncAwait(TestAdd( 1, 2, 5000));
SyncAwait(TestAdd( 1, 5, 2000));
SyncAwait(TestAdd( 4, 4, 2000));
SyncAwait(TestAdd(16, 4, 2000));
SyncAwait(TestAdd(64, 5, 5000));
SyncAwait(TestAdd( 5, 5, 2500));
}
BOOST_AUTO_TEST_CASE(TestUpdate_ShouldSucceed)
{
SyncAwait(TestUpdate( 1, 1, 10000));
SyncAwait(TestUpdate( 5, 1, 10000));
SyncAwait(TestUpdate( 1, 2, 5000));
SyncAwait(TestUpdate( 1, 5, 2001));
SyncAwait(TestUpdate( 4, 4, 2001));
SyncAwait(TestUpdate(15, 5, 2001));
SyncAwait(TestUpdate(64, 5, 5000));
SyncAwait(TestUpdate( 5, 5, 25000));
}
BOOST_AUTO_TEST_CASE(TestRead_ShouldSucceed)
{
SyncAwait(TestRead( 1, 1, 10000));
SyncAwait(TestRead( 5, 1, 10000));
SyncAwait(TestRead( 1, 2, 5000));
SyncAwait(TestRead( 1, 5, 2001));
SyncAwait(TestRead( 4, 4, 2001));
SyncAwait(TestRead(15, 5, 2001));
SyncAwait(TestRead(64, 5, 5000));
SyncAwait(TestRead( 5, 5, 25000));
}
BOOST_AUTO_TEST_CASE(TestRemove_ShouldSucceed)
{
SyncAwait(TestRemove( 1, 1, 10000));
SyncAwait(TestRemove( 5, 1, 10000));
SyncAwait(TestRemove( 1, 2, 5000));
SyncAwait(TestRemove( 1, 5, 2001));
SyncAwait(TestRemove( 4, 4, 2001));
SyncAwait(TestRemove(15, 5, 2001));
SyncAwait(TestRemove(64, 5, 5000));
SyncAwait(TestRemove( 5, 5, 25000));
}
BOOST_AUTO_TEST_SUITE_END()
}
|
{"hexsha": "830614a45c5ea386e9af5ecaf29158affe7dcd18", "size": 30966, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/prod/src/data/utilities/ConcurrentHashTable.StressTest.cpp", "max_stars_repo_name": "gridgentoo/ServiceFabricAzure", "max_stars_repo_head_hexsha": "c3e7a07617e852322d73e6cc9819d266146866a4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2542.0, "max_stars_repo_stars_event_min_datetime": "2018-03-14T21:56:12.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-06T01:18:20.000Z", "max_issues_repo_path": "src/prod/src/data/utilities/ConcurrentHashTable.StressTest.cpp", "max_issues_repo_name": "gridgentoo/ServiceFabricAzure", "max_issues_repo_head_hexsha": "c3e7a07617e852322d73e6cc9819d266146866a4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 994.0, "max_issues_repo_issues_event_min_datetime": "2019-05-07T02:39:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T13:23:04.000Z", "max_forks_repo_path": "src/prod/src/data/utilities/ConcurrentHashTable.StressTest.cpp", "max_forks_repo_name": "gridgentoo/ServiceFabricAzure", "max_forks_repo_head_hexsha": "c3e7a07617e852322d73e6cc9819d266146866a4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 300.0, "max_forks_repo_forks_event_min_datetime": "2018-03-14T21:57:17.000Z", "max_forks_repo_forks_event_max_datetime": "2019-05-06T20:07:00.000Z", "avg_line_length": 36.6461538462, "max_line_length": 146, "alphanum_fraction": 0.5644254989, "num_tokens": 6813}
|
import math
import random
import collections
import numpy as np
from scipy import sparse
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from .. import BaseModel, register_model
class GraphConvolution(nn.Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter("bias", None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.normal_(-stdv, stdv)
if self.bias is not None:
self.bias.data.normal_(-stdv, stdv)
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return (
self.__class__.__name__
+ " ("
+ str(self.in_features)
+ " -> "
+ str(self.out_features)
+ ")"
)
@register_model("asgcn")
class ASGCN(BaseModel):
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument("--num-features", type=int)
parser.add_argument("--num-classes", type=int)
parser.add_argument("--hidden-size", type=int, default=64)
parser.add_argument("--dropout", type=float, default=0.5)
parser.add_argument("--num-layers", type=int, default=3)
parser.add_argument("--sample-size", type=int, nargs='+', default=[64,64,32])
# fmt: on
@classmethod
def build_model_from_args(cls, args):
return cls(
args.num_features,
args.num_classes,
args.hidden_size,
args.num_layers,
args.dropout,
args.sample_size,
)
def __init__(self, num_features, num_classes, hidden_size, num_layers, dropout, sample_size):
super(ASGCN, self).__init__()
self.num_features = num_features
self.num_classes = num_classes
self.hidden_size = hidden_size
self.num_layers = num_layers
self.dropout = dropout
self.sample_size = sample_size
self.w_s0 = Parameter(torch.FloatTensor(num_features))
self.w_s1 = Parameter(torch.FloatTensor(num_features))
shapes = [num_features] + [hidden_size] * (num_layers - 1) + [num_classes]
self.convs = nn.ModuleList(
[
GraphConvolution(shapes[layer], shapes[layer + 1])
for layer in range(num_layers)
]
)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
self.w_s0.data.normal_(-stdv, stdv)
self.w_s1.data.normal_(-stdv, stdv)
def set_adj(self, edge_index, num_nodes):
self.sparse_adj = sparse.coo_matrix(
(np.ones(edge_index.shape[1]), (edge_index[0], edge_index[1])),
shape=(num_nodes, num_nodes),
).tocsr()
self.num_nodes = num_nodes
self.adj = self.compute_adjlist(self.sparse_adj)
self.adj = torch.tensor(self.adj)
def compute_adjlist(self, sp_adj, max_degree=32):
"""Transfer sparse adjacent matrix to adj-list format"""
num_data = sp_adj.shape[0]
adj = num_data + np.zeros((num_data+1, max_degree), dtype=np.int32)
for v in range(num_data):
neighbors = np.nonzero(sp_adj[v, :])[1]
len_neighbors = len(neighbors)
if len_neighbors > max_degree:
neighbors = np.random.choice(neighbors, max_degree, replace=False)
adj[v] = neighbors
else:
adj[v, :len_neighbors] = neighbors
return adj
def from_adjlist(self, adj):
"""Transfer adj-list format to sparsetensor"""
u_sampled, index = torch.unique(torch.flatten(adj), return_inverse=True)
row = (torch.range(0, index.shape[0]-1) / adj.shape[1]).long().to(adj.device)
col = index
values = torch.ones(index.shape[0]).float().to(adj.device)
indices = torch.cat([row.unsqueeze(1), col.unsqueeze(1)], axis=1).t()
dense_shape = (adj.shape[0], u_sampled.shape[0])
support = torch.sparse_coo_tensor(indices, values, dense_shape)
return support, u_sampled.long()
def _sample_one_layer(self, x, adj, v, sample_size):
support, u = self.from_adjlist(adj)
h_v = torch.sum(torch.matmul(x[v], self.w_s1))
h_u = torch.matmul(x[u], self.w_s0)
attention = (F.relu(h_v + h_u) + 1) * (1.0 / sample_size)
g_u = F.relu(h_u) + 1
p1 = attention * g_u
p1 = p1.cpu()
if self.num_nodes in u:
p1[u == self.num_nodes] = 0
p1 = p1 / torch.sum(p1)
samples = torch.multinomial(p1, sample_size, False)
u_sampled = u[samples]
support_sampled = torch.index_select(support, 1, samples)
return u_sampled, support_sampled
def sampling(self, x, v):
all_support = [[] for _ in range(self.num_layers)]
sampled = v
x = torch.cat((x, torch.zeros(1, x.shape[1]).to(x.device)), dim=0)
for i in range(self.num_layers - 1, -1, -1):
cur_sampled, cur_support = self._sample_one_layer(x, self.adj[sampled], sampled, self.sample_size[i])
all_support[i] = cur_support.to(x.device)
sampled = cur_sampled
return x[sampled.to(x.device)], all_support, 0
def forward(self, x, adj):
for index, conv in enumerate(self.convs[:-1]):
x = F.relu(conv(x, adj[index]))
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.convs[-1](x, adj[-1])
return F.log_softmax(x, dim=1)
|
{"hexsha": "b444d9409896fce67158efe333cd99592ee3cccf", "size": 6308, "ext": "py", "lang": "Python", "max_stars_repo_path": "cogdl/models/nn/asgcn.py", "max_stars_repo_name": "xssstory/cogdl", "max_stars_repo_head_hexsha": "ae8de495c365993f19f04774f083960fd282c2a3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-07-20T07:14:50.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-20T07:14:50.000Z", "max_issues_repo_path": "cogdl/models/nn/asgcn.py", "max_issues_repo_name": "xssstory/cogdl", "max_issues_repo_head_hexsha": "ae8de495c365993f19f04774f083960fd282c2a3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cogdl/models/nn/asgcn.py", "max_forks_repo_name": "xssstory/cogdl", "max_forks_repo_head_hexsha": "ae8de495c365993f19f04774f083960fd282c2a3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-06-17T02:44:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-17T02:44:09.000Z", "avg_line_length": 33.9139784946, "max_line_length": 113, "alphanum_fraction": 0.5995561192, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1510}
|
import numpy as np
import homog as hg
try:
from glumpy.api.matplotlib import *
from glumpy.transforms import *
_have_glumpy = True
except ImportError:
_have_glumpy = False
try:
import matplotlib.pyplot as plt
_have_matplotlib = True
except ImportError:
_have_matplotlib = False
def scatter(*args, **kwargs):
plt.scatter(*args, **kwargs)
plt.show()
def plot3d(data, norm=True):
if not _have_glumpy:
print("plot3d: glumpy not available, no 3d plotting")
return
if norm:
data = data.copy()
com = data.mean(axis=0)
data -= com
rg = np.sqrt(np.sum(data**2) / len(data))
print("plot3d com", com)
print("plot3d rg", rg)
data /= rg
figure = Figure((24, 12))
# use shared Trackball iface
tball = Trackball(name="trackball")
left = figure.add_axes([0.0, 0.0, 0.5, 1.0], interface=tball, facecolor=(1, 1, 1, 0.25))
right = figure.add_axes(
[0.5, 0.0, 0.5, 1.0],
xscale=LinearScale(),
yscale=LinearScale(),
zscale=LinearScale(),
interface=tball,
facecolor=(1, 1, 1, 0.25),
)
collection1 = PointCollection("agg")
collection2 = PointCollection("agg")
left.add_drawable(collection1)
right.add_drawable(collection2)
dat2 = (hg.rot([0, 0, 1], -0.08) @ data[..., None]).squeeze()
collection1.append(data)
collection2.append(dat2)
# Show figure
figure.show()
|
{"hexsha": "672dba03a34f1e4f63e2fb6417b046992003971a", "size": 1420, "ext": "py", "lang": "Python", "max_stars_repo_path": "worms/vis/plot.py", "max_stars_repo_name": "abiedermann/worms", "max_stars_repo_head_hexsha": "026c45a88d5c71b0e035ac83de6f4dc107316ed8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2018-01-30T23:13:43.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-12T22:36:54.000Z", "max_issues_repo_path": "worms/vis/plot.py", "max_issues_repo_name": "abiedermann/worms", "max_issues_repo_head_hexsha": "026c45a88d5c71b0e035ac83de6f4dc107316ed8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2018-02-23T00:52:25.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-26T00:02:32.000Z", "max_forks_repo_path": "worms/vis/plot.py", "max_forks_repo_name": "abiedermann/worms", "max_forks_repo_head_hexsha": "026c45a88d5c71b0e035ac83de6f4dc107316ed8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2018-06-28T21:30:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T17:50:42.000Z", "avg_line_length": 22.9032258065, "max_line_length": 91, "alphanum_fraction": 0.6302816901, "include": true, "reason": "import numpy", "num_tokens": 431}
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 1 19:57:03 2018
@author: 39105
"""
import os #only convenient for clusters of files
import numpy as np
import re
import shutil
import pandas as pd
path=r'C:\Users\39105\Downloads\精确控制频率\new_folder\新建文件夹\results'
pattern=r"\d+\.\d*"
pd.set_option('precision', 18)
files=os.listdir(path)
pattern=re.compile(pattern)
if os.path.exists(path+'\\'+'results'):
shutil.rmtree(path+'\\'+'results')
if not os.path.exists(path+'\\'+'results'):
os.makedirs(path+'\\'+'results')
if os.path.exists(path+'\\'+'results'+'\\'+'output.txt'):
os.remove(path+'\\'+'results'+'\\'+'output.txt')
all_data = np.ones((1,11,2,4))
for file in files:
if os.path.isdir(path+'\\'+file):
continue
if file=='note.txt':
continue
f=open(path+'\\'+file,'r')
txt=f.readlines()
data=pattern.findall(str(txt))
f.close()
data=[float(e) for e in data]
data=np.array(data)
data=data.reshape(1,11,2,4)
all_data=np.vstack([all_data,data])
for i in range(all_data.shape[0]):
person=all_data[i]
person=person.reshape(22,4)
pd_data = pd.DataFrame(person,columns=['channel0','channel1','channel2','channel3'],index=['distance','similarity']*11)
pd_data.to_csv((path+'\\'+'results'+'\\'+'person_%d.csv')%i)
|
{"hexsha": "2d5d9a21da3db86a1343a8f5f4602f2fee9e5bd6", "size": 1338, "ext": "py", "lang": "Python", "max_stars_repo_path": "trivial data analysis/txt2excel.py", "max_stars_repo_name": "Nemo-Cartman/trivial_data_analysis", "max_stars_repo_head_hexsha": "d3494d1dd301dc11b6c752011e4661d01c43ff14", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-07-31T15:18:37.000Z", "max_stars_repo_stars_event_max_datetime": "2018-07-31T15:18:37.000Z", "max_issues_repo_path": "trivial data analysis/txt2excel.py", "max_issues_repo_name": "Nemo-Cartman/trivial_data_analysis", "max_issues_repo_head_hexsha": "d3494d1dd301dc11b6c752011e4661d01c43ff14", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "trivial data analysis/txt2excel.py", "max_forks_repo_name": "Nemo-Cartman/trivial_data_analysis", "max_forks_repo_head_hexsha": "d3494d1dd301dc11b6c752011e4661d01c43ff14", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.4090909091, "max_line_length": 124, "alphanum_fraction": 0.6225710015, "include": true, "reason": "import numpy", "num_tokens": 390}
|
subroutine lmn_output(a,itask)
use typre
use Mod_Postpr
use Mod_Stream
use Mod_LowMach
implicit none
class(LowMachProblem) :: a
integer(ip) :: ndime,itask
integer(ip), save :: dopost(12)
interface
subroutine nsi_outtpo(a)
use typre
import LowMachProblem
implicit none
class(LowMachProblem) :: a
end subroutine
end interface
!Decide which postprocesses need to be done
call SetupDoPost(itask,a%istep,size(a%npp_stepi),a%npp_inits,a%npp_stepi,a%pos_alrea,dopost)
if (dopost(1) == 1) then
call a%FilePostpr%postpr(a%veloc(:,:,1),'Velocity',a%istep,a%ctime,a%Mesh)
end if
if (dopost(2) == 1) then
call a%FilePostpr%postpr(a%press(:,1),'Pressure',a%istep,a%ctime,a%Mesh)
end if
if (dopost(3) == 1) then
call a%FilePostpr%postpr(a%tempe(:,1),'Temperature',a%istep,a%ctime,a%Mesh)
end if
if (dopost(4) == 1) then
call a%ComputeDensity
call a%FilePostpr%postpr(a%densf(:),'Density',a%istep,a%ctime,a%Mesh)
end if
if (dopost(5) == 1) then
call a%FilePostpr%postpr(a%pther(1),'Therm_Pressure',a%istep,a%ctime,a%Mesh)
end if
if (dopost(7) == 1) then
call a%Mesh%GetNdime(ndime)
call stream(a%veloc,ndime,a%istep,a%ctime,a%Mesh,a%Memor,a%FilePostpr,a%MPIcomm,a%MPIroot,a%MPIrank,a%MPISize)
end if
!Subscales
if (dopost(8) == 1) then
call a%FilePostpr%postgp(a%vesgs,'VelocitySGS',a%istep,a%ctime,a%Mesh)
end if
if (dopost(9) == 1) then
call a%FilePostpr%postgp(a%tesgs,'TemperatureSGS',a%istep,a%ctime,a%Mesh,'SCALAR')
end if
if (dopost(10) == 1) then
call a%FilePostpr%postgp(a%prsgs,'PressureSGS',a%istep,a%ctime,a%Mesh)
end if
!Residual
if (dopost(11) == 1) then
call a%FilePostpr%postgp(a%residualU,'ResidualU',a%istep,a%ctime,a%Mesh)
call a%FilePostpr%postgp(a%residualP,'ResidualP',a%istep,a%ctime,a%Mesh)
call a%FilePostpr%postgp(a%residualT,'ResidualT',a%istep,a%ctime,a%Mesh)
end if
!Repro
if (dopost(12) == 1) then
call a%Mesh%GetNdime(ndime)
call a%FilePostpr%postpr(a%repro(1:ndime,:),'ResidualProjU',a%istep,a%ctime,a%Mesh)
call a%FilePostpr%postpr(a%repro(ndime+2,:),'ResidualProjP',a%istep,a%ctime,a%Mesh)
call a%FilePostpr%postpr(a%repro(ndime+1,:),'ResidualProjT',a%istep,a%ctime,a%Mesh)
end if
select case(itask)
case(0)
a%pos_alrea=0
!Tracking of points.
if(a%nptra > 0) then
call lmn_outtpo(a)
end if
end select
end subroutine lmn_output
|
{"hexsha": "35e8e6f1f55969a994d7b02e12789d65f1c5b0b9", "size": 2601, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "Sources/modules/lmachn/lmn_output.f90", "max_stars_repo_name": "ciaid-colombia/InsFEM", "max_stars_repo_head_hexsha": "be7eb35baa75c31e3b175e95286549ccd84f8d40", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-24T08:19:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-24T08:19:54.000Z", "max_issues_repo_path": "Sources/modules/lmachn/lmn_output.f90", "max_issues_repo_name": "ciaid-colombia/InsFEM", "max_issues_repo_head_hexsha": "be7eb35baa75c31e3b175e95286549ccd84f8d40", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Sources/modules/lmachn/lmn_output.f90", "max_forks_repo_name": "ciaid-colombia/InsFEM", "max_forks_repo_head_hexsha": "be7eb35baa75c31e3b175e95286549ccd84f8d40", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.6, "max_line_length": 116, "alphanum_fraction": 0.6439830834, "num_tokens": 979}
|
import numpy as np
import pandas as pd
import re
def one_hot_encode_vector(x, dict_size):
a = np.array(x)
v = np.zeros((dict_size, len(x)))
v[a, np.arange(len(x))] = 1
return v
def one_hot_encode_matrix(A, dict_size):
return np.array([one_hot_encode_vector(x, dict_size)
for x in A])
class dictionary(dict):
"""
Extends python dictionary in order to have
index --> word
but also
word --> index
"""
def __init__(self):
super(dictionary, self).__init__()
self.index = {}
self.size = 0
def __setitem__(self, key, value):
super(dictionary, self).__setitem__(key, value)
self.index[value] = key
self.size += 1
def __delitem__(self, key):
value = super().pop(key)
ignore = self.index.pop(value)
self.size -=1
def process_corpus(corpus, context_size, dictionary, fixed_dictionary=False):
list_of_points = []
for document in corpus:
list_of_points += process_document(document, context_size, dictionary, fixed_dictionary)
return list_of_points
def process_document(document, context_size, dictionary, fixed_dictionary=False):
"""
Given a dictionary, extract the tuples of words of length equal to
context_size. Each word is represented by a unique integer number.
If fixed_dictionary is True, only take consecutive tuples of words
being (all of them) in the dictionary.
Example:
document = "This is a new document"
context_size = 4
dictionary = {
0: "this",
1: "is",
2: "a",
3: "new",
4: "document"
}
return
[(0, 1, 2, 3), (1, 2, 3, 4)]
"""
text = document.lower()
p = re.compile("[a-z]+")
tokens = p.findall(text)
list_of_points = []
for i in range(len(tokens) - context_size + 1):
data_point = [0 for l in range(context_size)]
add_new_data_point = True
for j in range(context_size):
k = i+j
if tokens[k] not in dictionary.index:
if fixed_dictionary:
# only takes series of words in the dictionary
add_new_data_point = False
break
else:
new_Ix = dictionary.size
dictionary[new_Ix] = tokens[k]
data_point[j] = dictionary.index[tokens[k]]
if add_new_data_point:
list_of_points.append(tuple(data_point))
return list_of_points
def create_training_dataset_arXiv(path2file, context_size, dict_size):
"""
Create the training data set from the arXiv data for a given
context_size and dict_size
"""
data = pd.read_csv("./arxiv_articles.csv", sep="|")
mydict = dictionary()
# Process the corpus one first time to create the dictionary
dataset = process_corpus(data['summary'], context_size, mydict)
data_df = pd.DataFrame(dataset)
word_counts = data_df.iloc[:, 0].value_counts()
# We sort the words, starting from the most frequent ones
words2keep = word_counts.keys()[:dict_size]
# Create a new clean dictionary with the
# words selected in the previous step
new_dictionary = dictionary()
for i in range(len(words2keep)):
new_dictionary[i] = mydict[words2keep[i]]
# Build the new training dataset using the new dictionary
# and the series of context_size words appearing in the text
new_dataset = process_corpus(data['summary'], context_size, new_dictionary, fixed_dictionary=True)
return new_dataset, new_dictionary
|
{"hexsha": "94eb75da45616b2e35b1e042e58c1f1e5fc0ff7d", "size": 3670, "ext": "py", "lang": "Python", "max_stars_repo_path": "nplm/data_manipulation.py", "max_stars_repo_name": "willyrv/nplm", "max_stars_repo_head_hexsha": "1ab35a1aa447572f35704787c3765f00fe3280f6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "nplm/data_manipulation.py", "max_issues_repo_name": "willyrv/nplm", "max_issues_repo_head_hexsha": "1ab35a1aa447572f35704787c3765f00fe3280f6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nplm/data_manipulation.py", "max_forks_repo_name": "willyrv/nplm", "max_forks_repo_head_hexsha": "1ab35a1aa447572f35704787c3765f00fe3280f6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.4778761062, "max_line_length": 102, "alphanum_fraction": 0.6207084469, "include": true, "reason": "import numpy", "num_tokens": 850}
|
------------------------------------------------------------------------
-- The Agda standard library
--
-- The identity function
------------------------------------------------------------------------
{-# OPTIONS --without-K --safe #-}
module Function.Construct.Identity where
open import Data.Product using (_,_)
open import Function using (id)
open import Function.Bundles
import Function.Definitions as Definitions
import Function.Structures as Structures
open import Level
open import Relation.Binary
open import Relation.Binary.PropositionalEquality using (_≡_; setoid)
private
variable
a ℓ : Level
A : Set a
------------------------------------------------------------------------
-- Properties
module _ (_≈_ : Rel A ℓ) where
open Definitions _≈_ _≈_
injective : Injective id
injective = id
surjective : Reflexive _≈_ → Surjective id
surjective refl x = x , refl
bijective : Reflexive _≈_ → Bijective id
bijective refl = injective , surjective refl
inverseˡ : Reflexive _≈_ → Inverseˡ id id
inverseˡ refl x = refl
inverseʳ : Reflexive _≈_ → Inverseʳ id id
inverseʳ refl x = refl
inverseᵇ : Reflexive _≈_ → Inverseᵇ id id
inverseᵇ refl = inverseˡ refl , inverseʳ refl
------------------------------------------------------------------------
-- Structures
module _ {_≈_ : Rel A ℓ} (isEq : IsEquivalence _≈_) where
open Structures _≈_ _≈_
open IsEquivalence isEq
isCongruent : IsCongruent id
isCongruent = record
{ cong = id
; isEquivalence₁ = isEq
; isEquivalence₂ = isEq
}
isInjection : IsInjection id
isInjection = record
{ isCongruent = isCongruent
; injective = injective _≈_
}
isSurjection : IsSurjection id
isSurjection = record
{ isCongruent = isCongruent
; surjective = surjective _≈_ refl
}
isBijection : IsBijection id
isBijection = record
{ isInjection = isInjection
; surjective = surjective _≈_ refl
}
isLeftInverse : IsLeftInverse id id
isLeftInverse = record
{ isCongruent = isCongruent
; cong₂ = id
; inverseˡ = inverseˡ _≈_ refl
}
isRightInverse : IsRightInverse id id
isRightInverse = record
{ isCongruent = isCongruent
; cong₂ = id
; inverseʳ = inverseʳ _≈_ refl
}
isInverse : IsInverse id id
isInverse = record
{ isLeftInverse = isLeftInverse
; inverseʳ = inverseʳ _≈_ refl
}
------------------------------------------------------------------------
-- Setoid bundles
module _ (S : Setoid a ℓ) where
open Setoid S
injection : Injection S S
injection = record
{ f = id
; cong = id
; injective = injective _≈_
}
surjection : Surjection S S
surjection = record
{ f = id
; cong = id
; surjective = surjective _≈_ refl
}
bijection : Bijection S S
bijection = record
{ f = id
; cong = id
; bijective = bijective _≈_ refl
}
equivalence : Equivalence S S
equivalence = record
{ f = id
; g = id
; cong₁ = id
; cong₂ = id
}
leftInverse : LeftInverse S S
leftInverse = record
{ f = id
; g = id
; cong₁ = id
; cong₂ = id
; inverseˡ = inverseˡ _≈_ refl
}
rightInverse : RightInverse S S
rightInverse = record
{ f = id
; g = id
; cong₁ = id
; cong₂ = id
; inverseʳ = inverseʳ _≈_ refl
}
inverse : Inverse S S
inverse = record
{ f = id
; f⁻¹ = id
; cong₁ = id
; cong₂ = id
; inverse = inverseᵇ _≈_ refl
}
------------------------------------------------------------------------
-- Propositional bundles
module _ (A : Set a) where
id-↣ : A ↣ A
id-↣ = injection (setoid A)
id-↠ : A ↠ A
id-↠ = surjection (setoid A)
id-⤖ : A ⤖ A
id-⤖ = bijection (setoid A)
id-⇔ : A ⇔ A
id-⇔ = equivalence (setoid A)
id-↩ : A ↩ A
id-↩ = leftInverse (setoid A)
id-↪ : A ↪ A
id-↪ = rightInverse (setoid A)
id-↔ : A ↔ A
id-↔ = inverse (setoid A)
|
{"hexsha": "2207a5789894e6497f96028bb47d708c27d0845e", "size": 4077, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "agda-stdlib/src/Function/Construct/Identity.agda", "max_stars_repo_name": "DreamLinuxer/popl21-artifact", "max_stars_repo_head_hexsha": "fb380f2e67dcb4a94f353dbaec91624fcb5b8933", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-10-07T12:07:53.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-10T21:41:32.000Z", "max_issues_repo_path": "agda-stdlib/src/Function/Construct/Identity.agda", "max_issues_repo_name": "DreamLinuxer/popl21-artifact", "max_issues_repo_head_hexsha": "fb380f2e67dcb4a94f353dbaec91624fcb5b8933", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "agda-stdlib/src/Function/Construct/Identity.agda", "max_forks_repo_name": "DreamLinuxer/popl21-artifact", "max_forks_repo_head_hexsha": "fb380f2e67dcb4a94f353dbaec91624fcb5b8933", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-04T06:54:45.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-04T06:54:45.000Z", "avg_line_length": 21.3455497382, "max_line_length": 72, "alphanum_fraction": 0.5405935737, "num_tokens": 1251}
|
from ..stats import irr, sequence
import itertools
import numpy
import pandas
import unittest
class SequenceTest(unittest.TestCase):
def setUp(self) -> None:
pass
def tearDown(self) -> None:
pass
def test_transition_matrix(self):
# Test that works as expected with a list, no filtration, at lag 1
events = ['C', 'B', 'C', 'C', 'A', 'B', 'A', 'A', 'C', 'A', 'C', 'A', 'A', 'B', 'B', 'A', 'A', 'B', 'B', 'C']
tm = sequence.joint_frequencies(events)
expected = pandas.DataFrame([[3, 3, 2],
[2, 2, 2],
[3, 1, 1]], index=['A', 'B', 'C'], columns=['A', 'B', 'C'])
for row, col in itertools.product(expected.index, expected.columns):
self.assertAlmostEqual(expected.loc[row, col], tm.loc[row, col], delta=0.001)
# Test that works as expected with a list, no filtration at lag > 1
tm = sequence.joint_frequencies(events, lag=2)
expected = pandas.DataFrame([[3, 4, 1],
[3, 0, 2],
[2, 1, 2]], index=['A', 'B', 'C'], columns=['A', 'B', 'C'])
for row, col in itertools.product(expected.index, expected.columns):
self.assertAlmostEqual(expected.loc[row, col], tm.loc[row, col], delta=0.001)
# Test that works as expected with a list, filter pre-events at lag 1
tm = sequence.joint_frequencies(events, pre=['A'])
expected = pandas.DataFrame([[3, 3, 2]], index=['A'], columns=['A', 'B', 'C'])
for row, col in itertools.product(expected.index, expected.columns):
self.assertAlmostEqual(expected.loc[row, col], tm.loc[row, col], delta=0.001)
# Test that works as expected with a list, filter post-events at lag 1
tm = sequence.joint_frequencies(events, post=['B', 'C'])
expected = pandas.DataFrame([[3, 2],
[2, 2],
[1, 1]], index=['A', 'B', 'C'], columns=['B', 'C'])
for row, col in itertools.product(expected.index, expected.columns):
self.assertAlmostEqual(expected.loc[row, col], tm.loc[row, col], delta=0.001)
# Test that works as expected with a list, filter pre and post events at lag 1
tm = sequence.joint_frequencies(events, pre=['A'], post=['B', 'C'])
expected = pandas.DataFrame([[3, 2]], index=['A'], columns=['B', 'C'])
for row, col in itertools.product(expected.index, expected.columns):
self.assertAlmostEqual(expected.loc[row, col], tm.loc[row, col], delta=0.001)
def test_transition_stats(self):
jntf_idx = ['ELCTD', 'ELCTP', 'ELN', 'ELSTD', 'ELSTP', 'MIIN', 'MIREL', 'OTHER']
jntf_cols = ['CTD', 'CTP', 'FN', 'STD', 'STP']
exp_cols = ['given', 'target', 'jntf', 'expf', 'conp', 'rsdl', 'adjr', 'pval', 'odds', 'lnor']
exp_raw = [['ELCTD', 'CTD', 2820, 747.6519, 0.72962, 2072.348, 92.53701, 0, 29.07529, 3.36989],
['ELSTP', 'FN', 39, 124.0356, 0.19697, -85.03562, -12.5467, 0, 0.14395, -1.93832]]
jntf_raw = [[2820, 65, 832, 146, 2],
[67, 939, 257, 24, 52],
[551, 232, 6795, 261, 41],
[86, 7, 307, 988, 9],
[5, 27, 39, 13, 114],
[32, 35, 162, 8, 5],
[114, 282, 1643, 46, 24],
[767, 339, 4350, 426, 51]]
jntf = pandas.DataFrame(jntf_raw, index=jntf_idx, columns=jntf_cols)
expm = pandas.DataFrame(exp_raw, columns=exp_cols).set_index(['given', 'target'])
actm, c, p, dof = sequence.sequence_stats(jntf)
# Test that transition statistics match expectations within 3 decimal places
for r, c in itertools.product(expm.index.values, expm.columns.values):
expected = expm.loc[r, c]
actual = actm.loc[r, c]
if isinstance(expected, (int, float, numpy.int64, numpy.float64)):
self.assertAlmostEqual(expected, actual, 3)
else:
self.assertEqual(expected, actual)
|
{"hexsha": "3043fbbaf2fe57a722cfe76b13b80539cf2656c4", "size": 4180, "ext": "py", "lang": "Python", "max_stars_repo_path": "caastools/tests/test_stats.py", "max_stars_repo_name": "Awesomium40/caastools", "max_stars_repo_head_hexsha": "acfae5e9117f0d9a49ebf78fd20c471070cf3915", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "caastools/tests/test_stats.py", "max_issues_repo_name": "Awesomium40/caastools", "max_issues_repo_head_hexsha": "acfae5e9117f0d9a49ebf78fd20c471070cf3915", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "caastools/tests/test_stats.py", "max_forks_repo_name": "Awesomium40/caastools", "max_forks_repo_head_hexsha": "acfae5e9117f0d9a49ebf78fd20c471070cf3915", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.5416666667, "max_line_length": 117, "alphanum_fraction": 0.5363636364, "include": true, "reason": "import numpy", "num_tokens": 1178}
|
[STATEMENT]
lemma Digamma_real_three_halves_pos: "Digamma (3/2 :: real) > 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 < Digamma (3 / 2)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. 0 < Digamma (3 / 2)
[PROOF STEP]
have "-Digamma (3/2 :: real) = -Digamma (of_nat 1 + 1/2)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. - Digamma (3 / 2) = - Digamma (real 1 + 1 / 2)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
- Digamma (3 / 2) = - Digamma (real 1 + 1 / 2)
goal (1 subgoal):
1. 0 < Digamma (3 / 2)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
- Digamma (3 / 2) = - Digamma (real 1 + 1 / 2)
goal (1 subgoal):
1. 0 < Digamma (3 / 2)
[PROOF STEP]
have "\<dots> = 2 * ln 2 + euler_mascheroni - 2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. - Digamma (real 1 + 1 / 2) = 2 * ln 2 + euler_mascheroni - 2
[PROOF STEP]
by (subst Digamma_half_integer) simp
[PROOF STATE]
proof (state)
this:
- Digamma (real 1 + 1 / 2) = 2 * ln 2 + euler_mascheroni - 2
goal (1 subgoal):
1. 0 < Digamma (3 / 2)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
- Digamma (real 1 + 1 / 2) = 2 * ln 2 + euler_mascheroni - 2
goal (1 subgoal):
1. 0 < Digamma (3 / 2)
[PROOF STEP]
note euler_mascheroni_less_13_over_22
[PROOF STATE]
proof (state)
this:
euler_mascheroni < 13 / 22
goal (1 subgoal):
1. 0 < Digamma (3 / 2)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
euler_mascheroni < 13 / 22
goal (1 subgoal):
1. 0 < Digamma (3 / 2)
[PROOF STEP]
note ln2_le_25_over_36
[PROOF STATE]
proof (state)
this:
ln 2 \<le> 25 / 36
goal (1 subgoal):
1. 0 < Digamma (3 / 2)
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>\<And>x y. x < y \<Longrightarrow> 2 * ln 2 + x - 2 < 2 * ln 2 + y - 2; \<And>x y. x \<le> y \<Longrightarrow> 2 * x + 13 / 22 - 2 \<le> 2 * y + 13 / 22 - 2\<rbrakk> \<Longrightarrow> - Digamma (3 / 2) < 2 * (25 / 36) + 13 / 22 - 2
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>\<And>x y. x < y \<Longrightarrow> 2 * ln 2 + x - 2 < 2 * ln 2 + y - 2; \<And>x y. x \<le> y \<Longrightarrow> 2 * x + 13 / 22 - 2 \<le> 2 * y + 13 / 22 - 2\<rbrakk> \<Longrightarrow> - Digamma (3 / 2) < 2 * (25 / 36) + 13 / 22 - 2
goal (1 subgoal):
1. 0 < Digamma (3 / 2)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
0 < Digamma (3 / 2)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1218, "file": null, "length": 14}
|
import numpy as np
from typing import Callable
def SimulatedAnnealing(cost_function, x0=0, alpha=0.01, T0=100, Tf=1, neighbourhood_sigma=1, find_max=False, maxiter=1000000):
iteration=0
if alpha < 0 or alpha>1:
print("alpha should be between 0 and 1")
return None
x = x0
x_cost = cost_function(x)
if find_max == True:
x_cost = -x_cost
x_best = (x, x_cost)
T = T0
while (T > Tf):
#sample neighbourhood
x_candidate = np.random.normal(x, neighbourhood_sigma, 1)
#evaluate new candidate
x_candidate_cost = cost_function(x_candidate)
#print("candidate: " + str(x_candidate) + " cost: " + str(x_candidate_cost))
if find_max == True:
x_candidate_cost = -x_candidate_cost
delta_cost = x_candidate_cost-x_cost
rand_value = np.random.sample()
if delta_cost < 0 or rand_value < np.exp(-delta_cost/T):
x = x_candidate
x_cost = x_candidate_cost
if x_cost < x_best[1]:
x_best = (x, x_cost)
T -= alpha*T
if iteration > maxiter:
print("maxiter reached")
return None
iteration += 1
if find_max == True:
x_best[1] = -x_best[1]
return x_best
|
{"hexsha": "09bf9ea9e6cc3ae1765d42852d954e87ef3438d8", "size": 1090, "ext": "py", "lang": "Python", "max_stars_repo_path": "simulatedAnnealing.py", "max_stars_repo_name": "caldasdeoliveira/simulatedAnnealing", "max_stars_repo_head_hexsha": "7802001cc11147654412e854b53f504e58d8d5e0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "simulatedAnnealing.py", "max_issues_repo_name": "caldasdeoliveira/simulatedAnnealing", "max_issues_repo_head_hexsha": "7802001cc11147654412e854b53f504e58d8d5e0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "simulatedAnnealing.py", "max_forks_repo_name": "caldasdeoliveira/simulatedAnnealing", "max_forks_repo_head_hexsha": "7802001cc11147654412e854b53f504e58d8d5e0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.9487179487, "max_line_length": 126, "alphanum_fraction": 0.7, "include": true, "reason": "import numpy", "num_tokens": 335}
|
[STATEMENT]
lemma length_remdups_less_eq:
assumes "set xs \<subseteq> set ys"
shows "length (remdups xs) \<le> length (remdups ys)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. length (remdups xs) \<le> length (remdups ys)
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
set xs \<subseteq> set ys
goal (1 subgoal):
1. length (remdups xs) \<le> length (remdups ys)
[PROOF STEP]
by (auto simp: length_remdups_card_conv card_mono)
|
{"llama_tokens": 189, "file": "Regular_Tree_Relations_Tree_Automata_Tree_Automata_Pumping", "length": 2}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.