text
stringlengths
0
27.1M
meta
dict
import tactic.basic import tactic.omega import .ch11_imp open imp /- Open Scope imp_scope. Fixpoint ceval_step2 (st : state) (c : com) (i : nat) : state := match i with | O ⇒ empty_st | S i' ⇒ match c with | SKIP ⇒ st | l ::= a1 ⇒ (l !-> aeval st a1 ; st) | c1 ;; c2 ⇒ let st' := ceval_step2 st c1 i' in ceval_step2 st' c2 i' | TEST b THEN c1 ELSE c2 FI ⇒ if (beval st b) then ceval_step2 st c1 i' else ceval_step2 st c2 i' | WHILE b1 DO c1 END ⇒ if (beval st b1) then let st' := ceval_step2 st c1 i' in ceval_step2 st' c i' else st end end. Close Scope imp_scope. -/ open nat def ceval_step₂ : imp.state → com → ℕ → imp.state | st c 0 := empty_st | st SKIP (succ i) := st | st (l ::= a₁) (succ i) := l !→ aeval st a₁ ; st | st (c₁ ;; c₂) (succ i) := let st' := ceval_step₂ st c₁ i in ceval_step₂ st' c₂ i | st (TEST b THEN c₁ ELSE c₂ FI) (succ i) := if beval st b then ceval_step₂ st c₁ i else ceval_step₂ st c₂ i | st (WHILE b DO c END) (succ i) := if beval st b then let st' := ceval_step₂ st c i in ceval_step₂ st' c i else st /- Open Scope imp_scope. Fixpoint ceval_step3 (st : state) (c : com) (i : nat) : option state := match i with | O ⇒ None | S i' ⇒ match c with | SKIP ⇒ Some st | l ::= a1 ⇒ Some (l !-> aeval st a1 ; st) | c1 ;; c2 ⇒ match (ceval_step3 st c1 i') with | Some st' ⇒ ceval_step3 st' c2 i' | None ⇒ None end | TEST b THEN c1 ELSE c2 FI ⇒ if (beval st b) then ceval_step3 st c1 i' else ceval_step3 st c2 i' | WHILE b1 DO c1 END ⇒ if (beval st b1) then match (ceval_step3 st c1 i') with | Some st' ⇒ ceval_step3 st' c i' | None ⇒ None end else Some st end end. Close Scope imp_scope. -/ def ceval_step₃ : imp.state → com → ℕ → option imp.state | st c 0 := none | st SKIP (succ i) := some st | st (l ::= a₁) (succ i) := some $ l !→ aeval st a₁ ; st | st (c₁ ;; c₂) (succ i) := match ceval_step₃ st c₁ i with | some st' := ceval_step₃ st' c₂ i | none := none end | st (TEST b THEN c₁ ELSE c₂ FI) (succ i) := if beval st b then ceval_step₃ st c₁ i else ceval_step₃ st c₂ i | st (WHILE b DO c END) (succ i) := if beval st b then match ceval_step₃ st c i with | some st' := ceval_step₃ st' c i | none := none end else st /- Notation "'LETOPT' x <== e1 'IN' e2" := (match e1 with | Some x ⇒ e2 | None ⇒ None end) (right associativity, at level 60). Open Scope imp_scope. Fixpoint ceval_step (st : state) (c : com) (i : nat) : option state := match i with | O ⇒ None | S i' ⇒ match c with | SKIP ⇒ Some st | l ::= a1 ⇒ Some (l !-> aeval st a1 ; st) | c1 ;; c2 ⇒ LETOPT st' <== ceval_step st c1 i' IN ceval_step st' c2 i' | TEST b THEN c1 ELSE c2 FI ⇒ if (beval st b) then ceval_step st c1 i' else ceval_step st c2 i' | WHILE b1 DO c1 END ⇒ if (beval st b1) then LETOPT st' <== ceval_step st c1 i' IN ceval_step st' c i' else Some st end end. Close Scope imp_scope. Definition test_ceval (st:state) (c:com) := match ceval_step st c 500 with | None ⇒ None | Some st ⇒ Some (st X, st Y, st Z) end. (* Compute (test_ceval empty_st (X ::= 2;; TEST (X <= 1) THEN Y ::= 3 ELSE Z ::= 4 FI)). ====> Some (2, 0, 4) *) -/ /- nah, not doing the notation -/ /- but the match is better -/ def ceval_step : imp.state → com → ℕ → option imp.state | st c 0 := none | st c (succ i) := match c with | SKIP := some st | l ::= a₁ := some $ l !→ aeval st a₁ ; st | c₁ ;; c₂ := do st' ← ceval_step st c₁ i, ceval_step st' c₂ i | TEST b THEN c₁ ELSE c₂ FI := if beval st b then ceval_step st c₁ i else ceval_step st c₂ i | WHILE b DO c₁ END := if beval st b then do st' ← ceval_step st c₁ i, ceval_step st' c i else st end def test_ceval (st c) := do st ← ceval_step st c 500, pure (st X, st Y, st Z) #eval test_ceval empty_st $ X ::= 2;; TEST X ≤' 1 THEN Y ::= 3 ELSE Z ::= 4 FI /- Definition pup_to_n : com (* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted. (* Example pup_to_n_1 : test_ceval (X !-> 5) pup_to_n = Some (0, 15, 0). Proof. reflexivity. Qed. *) -/ def pup_to_n' : com := Y ::= 0;; WHILE ¬X == 0 DO Y ::= Y + X;; X ::= X - 1 END example : test_ceval (X !→ 5) pup_to_n' = some (0, 15, 0) := rfl def is_even : com := WHILE 2 ≤' X DO X ::= X - 2 END;; TEST X == 0 THEN Z ::= 0 ELSE Z ::= 1 FI example : test_ceval (X !→ 5) is_even = some (1, 0, 1) := rfl example : test_ceval (X !→ 10) is_even = some (0, 0, 0) := rfl /- Theorem ceval_step__ceval: ∀c st st', (∃i, ceval_step st c i = Some st') → st =[ c ]⇒ st'. Proof. intros c st st' H. inversion H as [i E]. clear H. generalize dependent st'. generalize dependent st. generalize dependent c. induction i as [| i' ]. - (* i = 0 -- contradictory *) intros c st st' H. discriminate H. - (* i = S i' *) intros c st st' H. destruct c; simpl in H; inversion H; subst; clear H. + (* SKIP *) apply E_Skip. + (* ::= *) apply E_Ass. reflexivity. + (* ;; *) destruct (ceval_step st c1 i') eqn:Heqr1. * (* Evaluation of r1 terminates normally *) apply E_Seq with s. apply IHi'. rewrite Heqr1. reflexivity. apply IHi'. simpl in H1. assumption. * (* Otherwise -- contradiction *) discriminate H1. + (* TEST *) destruct (beval st b) eqn:Heqr. * (* r = true *) apply E_IfTrue. rewrite Heqr. reflexivity. apply IHi'. assumption. * (* r = false *) apply E_IfFalse. rewrite Heqr. reflexivity. apply IHi'. assumption. + (* WHILE *) destruct (beval st b) eqn :Heqr. * (* r = true *) destruct (ceval_step st c i') eqn:Heqr1. { (* r1 = Some s *) apply E_WhileTrue with s. rewrite Heqr. reflexivity. apply IHi'. rewrite Heqr1. reflexivity. apply IHi'. simpl in H1. assumption. } { (* r1 = None *) discriminate H1. } * (* r = false *) injection H1. intros H2. rewrite <- H2. apply E_WhileFalse. apply Heqr. Qed. -/ open imp.com imp.ceval theorem ceval_step__ceval {c st st'} (h : ∃i, ceval_step st c i = some st') : st =[ c ]⇒ st' := begin cases h with i h, induction i with i ih generalizing c st st', cases h, cases c; simp [ceval_step] at h, case CSkip { subst h, exact E_Skip st, }, case CAss : x a { subst h, apply E_Ass, refl, }, case CSeq : c₁ c₂ { cases h with a h, exact E_Seq (ih h.left) (ih h.right), }, case CIf : b c₁ c₂ { cases heq: beval st b; rw heq at h; simp at h, exact E_IfFalse c₁ heq (ih h), exact E_IfTrue c₂ heq (ih h), }, case CWhile : b c { cases heq: beval st b; rw heq at h; simp at h, cases h, exact E_WhileFalse c heq, cases h with a h, exact E_WhileTrue heq (ih h.left) (ih h.right), }, end /- Theorem ceval_step_more: ∀i1 i2 st st' c, i1 ≤ i2 → ceval_step st c i1 = Some st' → ceval_step st c i2 = Some st'. Proof. induction i1 as [|i1']; intros i2 st st' c Hle Hceval. - (* i1 = 0 *) simpl in Hceval. discriminate Hceval. - (* i1 = S i1' *) destruct i2 as [|i2']. inversion Hle. assert (Hle': i1' ≤ i2') by omega. destruct c. + (* SKIP *) simpl in Hceval. inversion Hceval. reflexivity. + (* ::= *) simpl in Hceval. inversion Hceval. reflexivity. + (* ;; *) simpl in Hceval. simpl. destruct (ceval_step st c1 i1') eqn:Heqst1'o. * (* st1'o = Some *) apply (IHi1' i2') in Heqst1'o; try assumption. rewrite Heqst1'o. simpl. simpl in Hceval. apply (IHi1' i2') in Hceval; try assumption. * (* st1'o = None *) discriminate Hceval. + (* TEST *) simpl in Hceval. simpl. destruct (beval st b); apply (IHi1' i2') in Hceval; assumption. + (* WHILE *) simpl in Hceval. simpl. destruct (beval st b); try assumption. destruct (ceval_step st c i1') eqn: Heqst1'o. * (* st1'o = Some *) apply (IHi1' i2') in Heqst1'o; try assumption. rewrite → Heqst1'o. simpl. simpl in Hceval. apply (IHi1' i2') in Hceval; try assumption. * (* i1'o = None *) simpl in Hceval. discriminate Hceval. Qed. -/ theorem ceval_step_more {i₁ i₂ st st' c} (hl : i₁ ≤ i₂) (h: ceval_step st c i₁ = some st') : ceval_step st c i₂ = some st' := begin induction i₁ with i₁ ih generalizing i₂ st st' c, unfold ceval_step at h, cases h, cases i₂, cases hl, /- omega failed here (yikes) -/ have hl, exact le_of_succ_le_succ hl, cases c, case CSkip { cases h, unfold ceval_step, }, case CAss : x a { unfold ceval_step at *, assumption, }, case CSeq : c₁ c₂ { unfold ceval_step at *, cases h₁ : ceval_step st c₁ i₁ with st'', simp only [ceval_step, h₁] at h, contradiction, simp only [h₁, option.some_bind] at h, simp only [ih hl h₁, ih hl h, option.some_bind], }, case CIf : b c₁ c₂ { unfold ceval_step at *, cases beval st b; simp at *; exact ih hl h, }, case CWhile : b c { unfold ceval_step at *, cases beval st b; simp at *, exact h, cases h with a h, exact ⟨a, ih hl h.left, ih hl h.right⟩, }, end /- Theorem ceval__ceval_step: ∀c st st', st =[ c ]⇒ st' → ∃i, ceval_step st c i = Some st'. Proof. intros c st st' Hce. induction Hce. (* FILL IN HERE *) Admitted. -/ lemma le_max (n m : ℕ) : n ≤ max n m ∧ m ≤ max n m := begin simp only [le_max_iff], split, exact or.inl (refl _), exact or.inr (refl _), end theorem ceval__ceval_step {c st st'} (h : st =[ c ]⇒ st') : ∃i, ceval_step st c i = some st' := begin induction h, case E_Skip { exact ⟨1, rfl⟩, }, case E_Ass : st a n x h { exact ⟨1, by simp only [ceval_step, h]⟩, }, case E_Seq : c₁ c₂ st'' st''' st'''' h₁ h₂ ih₁ ih₂ { cases ih₁ with i₁ ih₁, cases ih₂ with i₂ ih₂, exact ⟨max i₁ i₂ + 1, by { unfold ceval_step, have hl, exact le_max i₁ i₂, simp [ceval_step_more hl.left ih₁, ceval_step_more hl.right ih₂], }⟩, }, case E_IfTrue : st'' st''' b c₁ c₂ h₁ h₂ ih { cases ih with i ih, exact ⟨i + 1, by { unfold ceval_step, simp [h₁, ih], }⟩, }, case E_IfFalse : st'' st''' b c₁ c₂ h₁ h₂ ih { cases ih with i ih, exact ⟨i + 1, by { unfold ceval_step, simp [h₁, ih], }⟩, }, case E_WhileFalse : b st'' c h { exact ⟨1, by { unfold ceval_step, simp [h], refl, }⟩, }, case E_WhileTrue : st'' st''' st'''' b c hb h₂ h₃ ih₁ ih₂ { cases ih₁ with i₁ ih₁, cases ih₂ with i₂ ih₂, exact ⟨max i₁ i₂ + 1, by { unfold ceval_step, simp [hb], exact ⟨st''', by { have hl, exact le_max i₁ i₂, exact ⟨ceval_step_more hl.left ih₁, ceval_step_more hl.right ih₂⟩, }⟩, }⟩, }, end /- Theorem ceval_and_ceval_step_coincide: ∀c st st', st =[ c ]⇒ st' ↔ ∃i, ceval_step st c i = Some st'. Proof. intros c st st'. split. apply ceval__ceval_step. apply ceval_step__ceval. Qed. -/ theorem ceval_and_ceval_step_coincide (c st st') : (st =[ c ]⇒ st') ↔ ∃i, ceval_step st c i = some st' := ⟨ceval__ceval_step, ceval_step__ceval⟩ /- Theorem ceval_deterministic' : ∀c st st1 st2, st =[ c ]⇒ st1 → st =[ c ]⇒ st2 → st1 = st2. Proof. intros c st st1 st2 He1 He2. apply ceval__ceval_step in He1. apply ceval__ceval_step in He2. inversion He1 as [i1 E1]. inversion He2 as [i2 E2]. apply ceval_step_more with (i2 := i1 + i2) in E1. apply ceval_step_more with (i2 := i1 + i2) in E2. rewrite E1 in E2. inversion E2. reflexivity. omega. omega. Qed. -/ theorem ceval_deterministic' {c st st₁ st₂} (h₁ : st =[ c ]⇒ st₁) (h₂ : st =[ c ]⇒ st₂) : st₁ = st₂ := begin cases ceval__ceval_step h₁ with i₁ h₁, cases ceval__ceval_step h₂ with i₂ h₂, replace h₂, exact ceval_step_more (le_max i₁ i₂).right h₂, rw ceval_step_more (le_max i₁ i₂).left h₁ at h₂, injection h₂, end
{ "alphanum_fraction": null, "author": "michens", "avg_line_length": null, "converted": null, "ext": null, "file": null, "hexsha": null, "include": null, "lang": null, "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": null, "max_forks_repo_licenses": null, "max_forks_repo_name": null, "max_forks_repo_path": null, "max_issues_count": null, "max_issues_repo_head_hexsha": null, "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": null, "max_issues_repo_name": null, "max_issues_repo_path": null, "max_line_length": null, "max_stars_count": null, "max_stars_repo_head_hexsha": null, "max_stars_repo_licenses": null, "max_stars_repo_name": null, "max_stars_repo_path": null, "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": null, "path": "github-repos/lean/michens-learn-lean/learn-lean-f38fc342780ddff5a164a18e5482163dea506ccd/sf/v1/ch14_impcevalfun.lean", "reason": null, "repo": "learn-lean", "save_path": "github-repos/lean/michens-learn-lean", "sha": "f38fc342780ddff5a164a18e5482163dea506ccd", "size": null }
/- Copyright (c) 2022 Damiano Testa. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Damiano Testa -/ import data.polynomial.algebra_map import ring_theory.localization.basic /-! # Laurent polynomials We introduce Laurent polynomials over a semiring `R`. Mathematically, they are expressions of the form $$ \sum_{i \in \mathbb{Z}} a_i T ^ i $$ where the sum extends over a finite subset of `ℤ`. Thus, negative exponents are allowed. The coefficients come from the semiring `R` and the variable `T` commutes with everything. Since we are going to convert back and forth between polynomials and Laurent polynomials, we decided to maintain some distinction by using the symbol `T`, rather than `X`, as the variable for Laurent polynomials ## Notation The symbol `R[T;T⁻¹]` stands for `laurent_polynomial R`. We also define * `C : R →+* R[T;T⁻¹]` the inclusion of constant polynomials, analogous to the one for `R[X]`; * `T : ℤ → R[T;T⁻¹]` the sequence of powers of the variable `T`. ## Implementation notes We define Laurent polynomials as `add_monoid_algebra R ℤ`. Thus, they are essentially `finsupp`s `ℤ →₀ R`. This choice differs from the current irreducible design of `polynomial`, that instead shields away the implementation via `finsupp`s. It is closer to the original definition of polynomials. As a consequence, `laurent_polynomial` plays well with polynomials, but there is a little roughness in establishing the API, since the `finsupp` implementation of `R[X]` is well-shielded. Unlike the case of polynomials, I felt that the exponent notation was not too easy to use, as only natural exponents would be allowed. Moreover, in the end, it seems likely that we should aim to perform computations on exponents in `ℤ` anyway and separating this via the symbol `T` seems convenient. I made a *heavy* use of `simp` lemmas, aiming to bring Laurent polynomials to the form `C a * T n`. Any comments or suggestions for improvements is greatly appreciated! ## Future work Lots is missing! -- (Riccardo) add inclusion into Laurent series. -- (Riccardo) giving a morphism (as `R`-alg, so in the commutative case) from `R[T,T⁻¹]` to `S` is the same as choosing a unit of `S`. -- A "better" definition of `trunc` would be as an `R`-linear map. This works: -- ``` -- def trunc : R[T;T⁻¹] →[R] R[X] := -- begin -- refine (_ : add_monoid_algebra R ℕ →[R] R[X]).comp _, -- { exact ⟨(to_finsupp_iso R).symm, by simp⟩ }, -- { refine ⟨λ r, comap_domain _ r (set.inj_on_of_injective (λ a b ab, int.of_nat.inj ab) _), _⟩, -- exact λ r f, comap_domain_smul _ _ _ } -- end -- ``` -- but it would make sense to bundle the maps better, for a smoother user experience. -- I (DT) did not have the strength to embark on this (possibly short!) journey, after getting to -- this stage of the Laurent process! -- This would likely involve adding a `comap_domain` analogue of -- `add_monoid_algebra.map_domain_alg_hom` and an `R`-linear version of -- `polynomial.to_finsupp_iso`. -- Add `degree, int_degree, int_trailing_degree, leading_coeff, trailing_coeff,...`. -/ open_locale polynomial big_operators open polynomial add_monoid_algebra finsupp noncomputable theory variables {R : Type*} /-- The semiring of Laurent polynomials with coefficients in the semiring `R`. We denote it by `R[T;T⁻¹]`. The ring homomorphism `C : R →+* R[T;T⁻¹]` includes `R` as the constant polynomials. -/ abbreviation laurent_polynomial (R : Type*) [semiring R] := add_monoid_algebra R ℤ local notation R`[T;T⁻¹]`:9000 := laurent_polynomial R /-- The ring homomorphism, taking a polynomial with coefficients in `R` to a Laurent polynomial with coefficients in `R`. -/ def polynomial.to_laurent [semiring R] : R[X] →+* R[T;T⁻¹] := (map_domain_ring_hom R int.of_nat_hom).comp (to_finsupp_iso R) /-- This is not a simp lemma, as it is usually preferable to use the lemmas about `C` and `X` instead. -/ lemma polynomial.to_laurent_apply [semiring R] (p : R[X]) : p.to_laurent = p.to_finsupp.map_domain coe := rfl /-- The `R`-algebra map, taking a polynomial with coefficients in `R` to a Laurent polynomial with coefficients in `R`. -/ def polynomial.to_laurent_alg [comm_semiring R] : R[X] →ₐ[R] R[T;T⁻¹] := begin refine alg_hom.comp _ (to_finsupp_iso_alg R).to_alg_hom, exact (map_domain_alg_hom R R int.of_nat_hom), end @[simp] lemma polynomial.to_laurent_alg_apply [comm_semiring R] (f : R[X]) : f.to_laurent_alg = f.to_laurent := rfl namespace laurent_polynomial section semiring variables [semiring R] lemma single_zero_one_eq_one : (single 0 1 : R[T;T⁻¹]) = (1 : R[T;T⁻¹]) := rfl /-! ### The functions `C` and `T`. -/ /-- The ring homomorphism `C`, including `R` into the ring of Laurent polynomials over `R` as the constant Laurent polynomials. -/ def C : R →+* R[T;T⁻¹] := single_zero_ring_hom lemma algebra_map_apply {R A : Type*} [comm_semiring R] [semiring A] [algebra R A] (r : R) : algebra_map R (laurent_polynomial A) r = C (algebra_map R A r) := rfl /-- When we have `[comm_semiring R]`, the function `C` is the same as `algebra_map R R[T;T⁻¹]`. (But note that `C` is defined when `R` is not necessarily commutative, in which case `algebra_map` is not available.) -/ lemma C_eq_algebra_map {R : Type*} [comm_semiring R] (r : R) : C r = algebra_map R R[T;T⁻¹] r := rfl lemma single_eq_C (r : R) : single 0 r = C r := rfl /-- The function `n ↦ T ^ n`, implemented as a sequence `ℤ → R[T;T⁻¹]`. Using directly `T ^ n` does not work, since we want the exponents to be of Type `ℤ` and there is no `ℤ`-power defined on `R[T;T⁻¹]`. Using that `T` is a unit introduces extra coercions. For these reasons, the definition of `T` is as a sequence. -/ def T (n : ℤ) : R[T;T⁻¹] := single n 1 @[simp] lemma T_zero : (T 0 : R[T;T⁻¹]) = 1 := rfl lemma T_add (m n : ℤ) : (T (m + n) : R[T;T⁻¹]) = T m * T n := by { convert single_mul_single.symm, simp [T] } lemma T_sub (m n : ℤ) : (T (m - n) : R[T;T⁻¹]) = T m * T (-n) := by rw [← T_add, sub_eq_add_neg] @[simp] lemma T_pow (m : ℤ) (n : ℕ) : (T m ^ n : R[T;T⁻¹]) = T (n * m) := by rw [T, T, single_pow n, one_pow, nsmul_eq_mul] /-- The `simp` version of `mul_assoc`, in the presence of `T`'s. -/ @[simp] lemma mul_T_assoc (f : R[T;T⁻¹]) (m n : ℤ) : f * T m * T n = f * T (m + n) := by simp [← T_add, mul_assoc] @[simp] lemma single_eq_C_mul_T (r : R) (n : ℤ) : (single n r : R[T;T⁻¹]) = (C r * T n : R[T;T⁻¹]) := by convert single_mul_single.symm; simp -- This lemma locks in the right changes and is what Lean proved directly. -- The actual `simp`-normal form of a Laurent monomial is `C a * T n`, whenever it can be reached. @[simp] lemma _root_.polynomial.to_laurent_C_mul_T (n : ℕ) (r : R) : ((polynomial.monomial n r).to_laurent : R[T;T⁻¹]) = C r * T n := show map_domain coe (monomial n r).to_finsupp = (C r * T n : R[T;T⁻¹]), by rw [to_finsupp_monomial, map_domain_single, single_eq_C_mul_T] @[simp] lemma _root_.polynomial.to_laurent_C (r : R) : (polynomial.C r).to_laurent = C r := begin convert polynomial.to_laurent_C_mul_T 0 r, simp only [int.coe_nat_zero, T_zero, mul_one], end @[simp] lemma _root_.polynomial.to_laurent_X : (polynomial.X.to_laurent : R[T;T⁻¹]) = T 1 := begin have : (polynomial.X : R[X]) = monomial 1 1, { simp [← C_mul_X_pow_eq_monomial] }, simp [this, polynomial.to_laurent_C_mul_T], end @[simp] lemma _root_.polynomial.to_laurent_one : (polynomial.to_laurent : R[X] → R[T;T⁻¹]) 1 = 1 := map_one polynomial.to_laurent @[simp] lemma _root_.polynomial.to_laurent_C_mul_eq (r : R) (f : R[X]) : (polynomial.C r * f).to_laurent = C r * f.to_laurent := by simp only [_root_.map_mul, polynomial.to_laurent_C] @[simp] lemma _root_.polynomial.to_laurent_X_pow (n : ℕ) : (X ^ n : R[X]).to_laurent = T n := by simp only [map_pow, polynomial.to_laurent_X, T_pow, mul_one] @[simp] lemma _root_.polynomial.to_laurent_C_mul_X_pow (n : ℕ) (r : R) : (polynomial.C r * X ^ n).to_laurent = C r * T n := by simp only [_root_.map_mul, polynomial.to_laurent_C, polynomial.to_laurent_X_pow] instance invertible_T (n : ℤ) : invertible (T n : R[T;T⁻¹]) := { inv_of := T (- n), inv_of_mul_self := by rw [← T_add, add_left_neg, T_zero], mul_inv_of_self := by rw [← T_add, add_right_neg, T_zero] } @[simp] lemma inv_of_T (n : ℤ) : ⅟ (T n : R[T;T⁻¹]) = T (- n) := rfl lemma is_unit_T (n : ℤ) : is_unit (T n : R[T;T⁻¹]) := is_unit_of_invertible _ @[elab_as_eliminator] protected lemma induction_on {M : R[T;T⁻¹] → Prop} (p : R[T;T⁻¹]) (h_C : ∀ a, M (C a)) (h_add : ∀ {p q}, M p → M q → M (p + q)) (h_C_mul_T : ∀ (n : ℕ) (a : R), M (C a * T n) → M (C a * T (n + 1))) (h_C_mul_T_Z : ∀ (n : ℕ) (a : R), M (C a * T (- n)) → M (C a * T (- n - 1))) : M p := begin have A : ∀ {n : ℤ} {a : R}, M (C a * T n), { assume n a, apply n.induction_on, { simpa only [T_zero, mul_one] using h_C a }, { exact λ m, h_C_mul_T m a }, { exact λ m, h_C_mul_T_Z m a } }, have B : ∀ (s : finset ℤ), M (s.sum (λ (n : ℤ), C (p.to_fun n) * T n)), { apply finset.induction, { convert h_C 0, simp only [finset.sum_empty, _root_.map_zero] }, { assume n s ns ih, rw finset.sum_insert ns, exact h_add A ih } }, convert B p.support, ext a, simp_rw [← single_eq_C_mul_T, finset.sum_apply', single_apply, finset.sum_ite_eq'], split_ifs with h h, { refl }, { exact finsupp.not_mem_support_iff.mp h } end /-- To prove something about Laurent polynomials, it suffices to show that * the condition is closed under taking sums, and * it holds for monomials. -/ @[elab_as_eliminator] protected lemma induction_on' {M : R[T;T⁻¹] → Prop} (p : R[T;T⁻¹]) (h_add : ∀p q, M p → M q → M (p + q)) (h_C_mul_T : ∀(n : ℤ) (a : R), M (C a * T n)) : M p := begin refine p.induction_on (λ a, _) h_add _ _; try { exact λ n f _, h_C_mul_T _ f }, convert h_C_mul_T 0 a, exact (mul_one _).symm, end lemma commute_T (n : ℤ) (f : R[T;T⁻¹]) : commute (T n) f := f.induction_on' (λ p q Tp Tq, commute.add_right Tp Tq) $ λ m a, show T n * _ = _, by { rw [T, T, ← single_eq_C, single_mul_single, single_mul_single, single_mul_single], simp [add_comm] } @[simp] lemma T_mul (n : ℤ) (f : R[T;T⁻¹]) : T n * f = f * T n := (commute_T n f).eq /-- `trunc : R[T;T⁻¹] →+ R[X]` maps a Laurent polynomial `f` to the polynomial whose terms of nonnegative degree coincide with the ones of `f`. The terms of negative degree of `f` "vanish". `trunc` is a left-inverse to `polynomial.to_laurent`. -/ def trunc : R[T;T⁻¹] →+ R[X] := ((to_finsupp_iso R).symm.to_add_monoid_hom).comp $ comap_domain.add_monoid_hom $ λ a b, int.of_nat.inj @[simp] lemma trunc_C_mul_T (n : ℤ) (r : R) : trunc (C r * T n) = ite (0 ≤ n) (monomial n.to_nat r) 0 := begin apply (to_finsupp_iso R).injective, rw [← single_eq_C_mul_T, trunc, add_monoid_hom.coe_comp, function.comp_app, comap_domain.add_monoid_hom_apply, to_finsupp_iso_apply], by_cases n0 : 0 ≤ n, { lift n to ℕ using n0, erw [comap_domain_single, to_finsupp_iso_symm_apply], simp only [int.coe_nat_nonneg, int.to_nat_coe_nat, if_true, to_finsupp_iso_apply, to_finsupp_monomial] }, { lift (- n) to ℕ using (neg_pos.mpr (not_le.mp n0)).le with m, rw [to_finsupp_iso_apply, to_finsupp_inj, if_neg n0], erw to_finsupp_iso_symm_apply, ext a, have := ((not_le.mp n0).trans_le (int.coe_zero_le a)).ne', simp only [coeff, comap_domain_apply, int.of_nat_eq_coe, coeff_zero, single_apply_eq_zero, this, is_empty.forall_iff] } end @[simp] lemma left_inverse_trunc_to_laurent : function.left_inverse (trunc : R[T;T⁻¹] → R[X]) polynomial.to_laurent := begin refine λ f, f.induction_on' _ _, { exact λ f g hf hg, by simp only [hf, hg, _root_.map_add] }, { exact λ n r, by simp only [polynomial.to_laurent_C_mul_T, trunc_C_mul_T, int.coe_nat_nonneg, int.to_nat_coe_nat, if_true] } end @[simp] lemma _root_.polynomial.trunc_to_laurent (f : R[X]) : trunc f.to_laurent = f := left_inverse_trunc_to_laurent _ lemma _root_.polynomial.to_laurent_injective : function.injective (polynomial.to_laurent : R[X] → R[T;T⁻¹]) := left_inverse_trunc_to_laurent.injective @[simp] lemma _root_.polynomial.to_laurent_inj (f g : R[X]) : f.to_laurent = g.to_laurent ↔ f = g := ⟨λ h, polynomial.to_laurent_injective h, congr_arg _⟩ lemma _root_.polynomial.to_laurent_ne_zero {f : R[X]} : f ≠ 0 ↔ f.to_laurent ≠ 0 := (map_ne_zero_iff _ (by exact polynomial.to_laurent_injective)).symm lemma exists_T_pow (f : R[T;T⁻¹]) : ∃ (n : ℕ) (f' : R[X]), f'.to_laurent = f * T n := begin apply f.induction_on' _ (λ n a, _); clear f, { rintros f g ⟨m, fn, hf⟩ ⟨n, gn, hg⟩, refine ⟨m + n, fn * X ^ n + gn * X ^ m, _⟩, simp only [hf, hg, add_mul, add_comm (n : ℤ), map_add, map_mul, polynomial.to_laurent_X_pow, mul_T_assoc, int.coe_nat_add] }, { cases n with n n, { exact ⟨0, polynomial.C a * X ^ n, by simp⟩ }, { refine ⟨n + 1, polynomial.C a, _⟩, simp only [int.neg_succ_of_nat_eq, polynomial.to_laurent_C, int.coe_nat_succ, mul_T_assoc, add_left_neg, T_zero, mul_one] } } end /-- This is a version of `exists_T_pow` stated as an induction principle. -/ @[elab_as_eliminator] lemma induction_on_mul_T {Q : R[T;T⁻¹] → Prop} (f : R[T;T⁻¹]) (Qf : ∀ {f : R[X]} {n : ℕ}, Q (f.to_laurent * T (- n))) : Q f := begin rcases f.exists_T_pow with ⟨n, f', hf⟩, rw [← mul_one f, ← T_zero, ← nat.cast_zero, ← nat.sub_self n, nat.cast_sub rfl.le, T_sub, ← mul_assoc, ← hf], exact Qf, end /-- Suppose that `Q` is a statement about Laurent polynomials such that * `Q` is true on *ordinary* polynomials; * `Q (f * T)` implies `Q f`; it follow that `Q` is true on all Laurent polynomials. -/ lemma reduce_to_polynomial_of_mul_T (f : R[T;T⁻¹]) {Q : R[T;T⁻¹] → Prop} (Qf : ∀ (f : R[X]), Q f.to_laurent) (QT : ∀ f, Q (f * T 1) → Q f) : Q f := begin induction f using laurent_polynomial.induction_on_mul_T with f n, induction n with n hn, { simpa only [int.coe_nat_zero, neg_zero, T_zero, mul_one] using Qf _ }, { convert QT _ _, simpa using hn } end section support lemma support_C_mul_T (a : R) (n : ℤ) : (C a * T n).support ⊆ {n} := by simpa only [← single_eq_C_mul_T] using support_single_subset lemma support_C_mul_T_of_ne_zero {a : R} (a0 : a ≠ 0) (n : ℤ) : (C a * T n).support = {n} := begin rw ← single_eq_C_mul_T, exact support_single_ne_zero _ a0, end /-- The support of a polynomial `f` is a finset in `ℕ`. The lemma `to_laurent_support f` shows that the support of `f.to_laurent` is the same finset, but viewed in `ℤ` under the natural inclusion `ℕ ↪ ℤ`. -/ lemma to_laurent_support (f : R[X]) : f.to_laurent.support = f.support.map nat.cast_embedding := begin generalize' hd : f.support = s, revert f, refine finset.induction_on s _ _; clear s, { simp only [polynomial.support_eq_empty, map_zero, finsupp.support_zero, eq_self_iff_true, implies_true_iff, finset.map_empty] {contextual := tt} }, { intros a s as hf f fs, have : (erase a f).to_laurent.support = s.map nat.cast_embedding := hf (f.erase a) (by simp only [fs, finset.erase_eq_of_not_mem as, polynomial.support_erase, finset.erase_insert_eq_erase]), rw [← monomial_add_erase f a, finset.map_insert, ← this, map_add, polynomial.to_laurent_C_mul_T, support_add_eq, finset.insert_eq], { congr, exact support_C_mul_T_of_ne_zero (polynomial.mem_support_iff.mp (by simp [fs])) _ }, { rw this, exact disjoint.mono_left (support_C_mul_T _ _) (by simpa) } } end end support section degrees /-- The degree of a Laurent polynomial takes values in `with_bot ℤ`. If `f : R[T;T⁻¹]` is a Laurent polynomial, then `f.degree` is the maximum of its support of `f`, or `⊥`, if `f = 0`. -/ def degree (f : R[T;T⁻¹]) : with_bot ℤ := f.support.max @[simp] lemma degree_zero : degree (0 : R[T;T⁻¹]) = ⊥ := rfl @[simp] section exact_degrees open_locale classical @[simp] lemma degree_C_mul_T (n : ℤ) (a : R) (a0 : a ≠ 0) : (C a * T n).degree = n := begin rw degree, convert finset.max_singleton, refine support_eq_singleton.mpr _, simp only [← single_eq_C_mul_T, single_eq_same, a0, ne.def, not_false_iff, eq_self_iff_true, and_self], end lemma degree_C_mul_T_ite (n : ℤ) (a : R) : (C a * T n).degree = ite (a = 0) ⊥ n := by split_ifs with h h; simp only [h, map_zero, zero_mul, degree_zero, degree_C_mul_T, ne.def, not_false_iff] @[simp] lemma degree_T [nontrivial R] (n : ℤ) : (T n : R[T;T⁻¹]).degree = n := begin rw [← one_mul (T n), ← map_one C], exact degree_C_mul_T n 1 (one_ne_zero : (1 : R) ≠ 0), end lemma degree_C {a : R} (a0 : a ≠ 0) : (C a).degree = 0 := begin rw [← mul_one (C a), ← T_zero], exact degree_C_mul_T 0 a a0 end lemma degree_C_ite (a : R) : (C a).degree = ite (a = 0) ⊥ 0 := by split_ifs with h h; simp only [h, map_zero, degree_zero, degree_C, ne.def, not_false_iff] end exact_degrees section degree_bounds lemma degree_C_mul_T_le (n : ℤ) (a : R) : (C a * T n).degree ≤ n := begin by_cases a0 : a = 0, { simp only [a0, map_zero, zero_mul, degree_zero, bot_le] }, { exact (degree_C_mul_T n a a0).le } end lemma degree_T_le (n : ℤ) : (T n : R[T;T⁻¹]).degree ≤ n := (le_of_eq (by rw [map_one, one_mul])).trans (degree_C_mul_T_le n (1 : R)) lemma degree_C_le (a : R) : (C a).degree ≤ 0 := (le_of_eq (by rw [T_zero, mul_one])).trans (degree_C_mul_T_le 0 a) end degree_bounds end degrees instance : module R[X] R[T;T⁻¹] := module.comp_hom _ polynomial.to_laurent instance (R : Type*) [semiring R] : is_scalar_tower R[X] R[X] R[T;T⁻¹] := { smul_assoc := λ x y z, by simp only [has_smul.smul, has_smul.comp.smul, map_mul, mul_assoc] } end semiring section comm_semiring variable [comm_semiring R] instance algebra_polynomial (R : Type*) [comm_semiring R] : algebra R[X] R[T;T⁻¹] := { commutes' := λ f l, by simp [mul_comm], smul_def' := λ f l, rfl, .. polynomial.to_laurent } lemma algebra_map_X_pow (n : ℕ) : algebra_map R[X] R[T;T⁻¹] (X ^ n) = T n := polynomial.to_laurent_X_pow n @[simp] lemma algebra_map_eq_to_laurent (f : R[X]) : algebra_map R[X] R[T;T⁻¹] f = f.to_laurent := rfl lemma is_localization : is_localization (submonoid.closure ({X} : set R[X])) R[T;T⁻¹] := { map_units := λ t, begin cases t with t ht, rcases submonoid.mem_closure_singleton.mp ht with ⟨n, rfl⟩, simp only [is_unit_T n, set_like.coe_mk, algebra_map_eq_to_laurent, polynomial.to_laurent_X_pow] end, surj := λ f, begin induction f using laurent_polynomial.induction_on_mul_T with f n, have := (submonoid.closure ({X} : set R[X])).pow_mem submonoid.mem_closure_singleton_self n, refine ⟨(f, ⟨_, this⟩), _⟩, simp only [set_like.coe_mk, algebra_map_eq_to_laurent, polynomial.to_laurent_X_pow, mul_T_assoc, add_left_neg, T_zero, mul_one], end, eq_iff_exists := λ f g, begin rw [algebra_map_eq_to_laurent, algebra_map_eq_to_laurent, polynomial.to_laurent_inj], refine ⟨_, _⟩, { rintro rfl, exact ⟨1, rfl⟩ }, { rintro ⟨⟨h, hX⟩, h⟩, rcases submonoid.mem_closure_singleton.mp hX with ⟨n, rfl⟩, exact mul_X_pow_injective n h } end } end comm_semiring end laurent_polynomial
{ "alphanum_fraction": null, "author": "leanprover-community", "avg_line_length": null, "converted": null, "ext": null, "file": null, "hexsha": null, "include": null, "lang": null, "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": null, "max_forks_repo_licenses": null, "max_forks_repo_name": null, "max_forks_repo_path": null, "max_issues_count": null, "max_issues_repo_head_hexsha": null, "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": null, "max_issues_repo_name": null, "max_issues_repo_path": null, "max_line_length": null, "max_stars_count": null, "max_stars_repo_head_hexsha": null, "max_stars_repo_licenses": null, "max_stars_repo_name": null, "max_stars_repo_path": null, "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": null, "path": "github-repos/lean/leanprover-community-mathlib/mathlib-5e526d18cea33550268dcbbddcb822d5cde40654/src/data/polynomial/laurent.lean", "reason": null, "repo": "mathlib", "save_path": "github-repos/lean/leanprover-community-mathlib", "sha": "5e526d18cea33550268dcbbddcb822d5cde40654", "size": null }
from collections import OrderedDict from enum import Enum, unique import numpy as np from typing import Dict, Union, Iterator, Type, Tuple from meio.gsm.dag_gsm import GuaranteedServiceModelDAG from meio.gsm.tree_gsm import Stage, GuaranteedServiceModelTree, GuaranteedServiceModel def create_supply_chain_network_from_iterator(supply_chain: Iterator) -> Dict[str, Stage]: """ Method to read from an iterator and initialise the dictionary of stages forming the supply chain The data file is a comma separated txt with one stage per row and the following field names for columns: - stage_id - lead_time - max_s_time - cost_rate - cap_constraint - risk_pool - ext_demand_mean - ext_demand_std - ext_demand_thres - up_stages (up_stage_1,phi_1,up_stage_2,phi_2,...,up_stage_n,phi_n) :returns: dictionary of stage objects with keys being the unique ids of the stages """ stage_configs = OrderedDict() # type: OrderedDict[str,Dict] u_stages = {} # type: Dict[str,Dict[str,int]] for i, row in enumerate(supply_chain): if i == 0: continue line = row.strip("\n").split(",") stage_config = {} # type: Dict[str, Union[str, int, float, Dict[str, int]]] stage_config['_id'] = stage_id = line[0] stage_config['lead_time'] = int(float(line[1])) stage_config["max_s_time"] = int(line[2]) if line[2] != "" else np.inf stage_config['added_cost'] = float(line[3]) if line[4] != "": stage_config['risk_pool'] = int(line[4]) if line[5] != "": stage_config['is_ext_demand_stage'] = True stage_config['demand_mean'] = float(line[5]) stage_config['demand_std'] = float(line[6]) stage_config['demand_thres'] = float(line[7]) up_stages = {} # type: Dict[str, int] up_stages_list = line[8:] if len(up_stages_list) > 1: for s in range(0, len(up_stages_list), 2): up_stage_id = str(up_stages_list[s]) phi = int(up_stages_list[s+1]) up_stages[up_stage_id] = phi stage_config["up_stages"] = u_stages[stage_id] = up_stages stage_configs[stage_id] = stage_config d_stages = {stage_id:{} for stage_id in u_stages} # type: Dict[str, Dict[str, int]] for stage_id,up_stages in u_stages.items(): for up_stage_id,phi in up_stages.items(): d_stages[up_stage_id][stage_id] = phi for stage_id in stage_configs: stage_configs[stage_id]["up_stages"] = u_stages[stage_id] stage_configs[stage_id]["down_stages"] = d_stages[stage_id] stages = OrderedDict((stage_id,Stage(**stage_config)) for stage_id, stage_config in stage_configs.items()) return stages def read_supply_chain_from_txt(supply_chain_txt_file: str) -> Dict[str, Stage]: """ Method to read from file and initialise the dictionary of stages forming the supply chain :returns: dictionary of stage objects with keys being the unique ids of the stages """ with open(supply_chain_txt_file, "r") as f: stages = create_supply_chain_network_from_iterator(f) return stages @unique class GSM(Enum): Tree = 'Tree' # Spanning tree CoC = 'CoC' # Clusters of commonality DAG = 'DAG' # Directed Acyclic graphs def create_gsm_instance(gsm_type: GSM, supply_chain_filename: str) \ -> Tuple[Dict[str, Stage], GuaranteedServiceModel]: """ GSM Factory method. Does not necessarily check for compatibility of given GSM type with the network topology described in the config file; any checking is a bonus of the Tree model construction. :param gsm_type: The type of the GSM model (e.g. spanning tree or clusters of commonality). :param supply_chain_filename: The name of the config file defining the topology and other parameters. :raise UnSupportedGSMException: Not all types of GSM can be created with this utility. :raise IncompatibleGraphTopology: The specified type is inconsistent with the topology described in the config file. :raises: InconsistentGSMConfiguration: Network topology labels not as expected. :return: The stages of the nework and the gsm model of the appropriate type, if config is compatible with what was asked for. """ stages = read_supply_chain_from_txt(supply_chain_filename) creator = { 'Tree': GuaranteedServiceModelTree, 'DAG': GuaranteedServiceModelDAG } # type: Dict[str, Type[GuaranteedServiceModel]] return stages, creator[gsm_type.value](stages)
{ "alphanum_fraction": 0.6679357022, "author": null, "avg_line_length": 41.1130434783, "converted": null, "ext": "py", "file": null, "hexsha": "5bf5139bc1a0e62f24a0c6dbd55e68e2b46b69f6", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2021-04-19T09:01:12.000Z", "max_forks_repo_forks_event_min_datetime": "2021-03-24T17:20:06.000Z", "max_forks_repo_head_hexsha": "c2da8c1e9ecdc42c59b9de73224b3d50ee1c9786", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "dmcnamee/snc", "max_forks_repo_path": "src/meio/gsm/utils.py", "max_issues_count": 3, "max_issues_repo_head_hexsha": "c2da8c1e9ecdc42c59b9de73224b3d50ee1c9786", "max_issues_repo_issues_event_max_datetime": "2021-05-08T22:06:47.000Z", "max_issues_repo_issues_event_min_datetime": "2021-03-26T01:16:08.000Z", "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "dmcnamee/snc", "max_issues_repo_path": "src/meio/gsm/utils.py", "max_line_length": 100, "max_stars_count": 5, "max_stars_repo_head_hexsha": "c2da8c1e9ecdc42c59b9de73224b3d50ee1c9786", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "dmcnamee/snc", "max_stars_repo_path": "src/meio/gsm/utils.py", "max_stars_repo_stars_event_max_datetime": "2021-11-17T12:44:51.000Z", "max_stars_repo_stars_event_min_datetime": "2021-03-24T16:23:10.000Z", "num_tokens": 1159, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 4728 }
[STATEMENT] lemma eventually_nhds_top: fixes P :: "'a :: {order_top,linorder_topology} \<Rightarrow> bool" and b :: 'a assumes "b < top" shows "eventually P (nhds top) \<longleftrightarrow> (\<exists>b<top. (\<forall>z. b < z \<longrightarrow> P z))" [PROOF STATE] proof (prove) goal (1 subgoal): 1. eventually P (nhds top) = (\<exists>b<top. \<forall>z>b. P z) [PROOF STEP] unfolding eventually_nhds [PROOF STATE] proof (prove) goal (1 subgoal): 1. (\<exists>S. open S \<and> top \<in> S \<and> Ball S P) = (\<exists>b<top. \<forall>z>b. P z) [PROOF STEP] proof safe [PROOF STATE] proof (state) goal (2 subgoals): 1. \<And>S. \<lbrakk>open S; top \<in> S; Ball S P\<rbrakk> \<Longrightarrow> \<exists>b<top. \<forall>z>b. P z 2. \<And>b. \<lbrakk>b < top; \<forall>z>b. P z\<rbrakk> \<Longrightarrow> \<exists>S. open S \<and> top \<in> S \<and> Ball S P [PROOF STEP] fix S :: "'a set" [PROOF STATE] proof (state) goal (2 subgoals): 1. \<And>S. \<lbrakk>open S; top \<in> S; Ball S P\<rbrakk> \<Longrightarrow> \<exists>b<top. \<forall>z>b. P z 2. \<And>b. \<lbrakk>b < top; \<forall>z>b. P z\<rbrakk> \<Longrightarrow> \<exists>S. open S \<and> top \<in> S \<and> Ball S P [PROOF STEP] assume "open S" "top \<in> S" [PROOF STATE] proof (state) this: open S top \<in> S goal (2 subgoals): 1. \<And>S. \<lbrakk>open S; top \<in> S; Ball S P\<rbrakk> \<Longrightarrow> \<exists>b<top. \<forall>z>b. P z 2. \<And>b. \<lbrakk>b < top; \<forall>z>b. P z\<rbrakk> \<Longrightarrow> \<exists>S. open S \<and> top \<in> S \<and> Ball S P [PROOF STEP] note open_left[OF this \<open>b < top\<close>] [PROOF STATE] proof (state) this: \<exists>b<top. {b<..top} \<subseteq> S goal (2 subgoals): 1. \<And>S. \<lbrakk>open S; top \<in> S; Ball S P\<rbrakk> \<Longrightarrow> \<exists>b<top. \<forall>z>b. P z 2. \<And>b. \<lbrakk>b < top; \<forall>z>b. P z\<rbrakk> \<Longrightarrow> \<exists>S. open S \<and> top \<in> S \<and> Ball S P [PROOF STEP] moreover [PROOF STATE] proof (state) this: \<exists>b<top. {b<..top} \<subseteq> S goal (2 subgoals): 1. \<And>S. \<lbrakk>open S; top \<in> S; Ball S P\<rbrakk> \<Longrightarrow> \<exists>b<top. \<forall>z>b. P z 2. \<And>b. \<lbrakk>b < top; \<forall>z>b. P z\<rbrakk> \<Longrightarrow> \<exists>S. open S \<and> top \<in> S \<and> Ball S P [PROOF STEP] assume "\<forall>s\<in>S. P s" [PROOF STATE] proof (state) this: \<forall>s\<in>S. P s goal (2 subgoals): 1. \<And>S. \<lbrakk>open S; top \<in> S; Ball S P\<rbrakk> \<Longrightarrow> \<exists>b<top. \<forall>z>b. P z 2. \<And>b. \<lbrakk>b < top; \<forall>z>b. P z\<rbrakk> \<Longrightarrow> \<exists>S. open S \<and> top \<in> S \<and> Ball S P [PROOF STEP] ultimately [PROOF STATE] proof (chain) picking this: \<exists>b<top. {b<..top} \<subseteq> S \<forall>s\<in>S. P s [PROOF STEP] show "\<exists>b<top. \<forall>z>b. P z" [PROOF STATE] proof (prove) using this: \<exists>b<top. {b<..top} \<subseteq> S \<forall>s\<in>S. P s goal (1 subgoal): 1. \<exists>b<top. \<forall>z>b. P z [PROOF STEP] by (auto simp: subset_eq Ball_def) [PROOF STATE] proof (state) this: \<exists>b<top. \<forall>z>b. P z goal (1 subgoal): 1. \<And>b. \<lbrakk>b < top; \<forall>z>b. P z\<rbrakk> \<Longrightarrow> \<exists>S. open S \<and> top \<in> S \<and> Ball S P [PROOF STEP] next [PROOF STATE] proof (state) goal (1 subgoal): 1. \<And>b. \<lbrakk>b < top; \<forall>z>b. P z\<rbrakk> \<Longrightarrow> \<exists>S. open S \<and> top \<in> S \<and> Ball S P [PROOF STEP] fix b [PROOF STATE] proof (state) goal (1 subgoal): 1. \<And>b. \<lbrakk>b < top; \<forall>z>b. P z\<rbrakk> \<Longrightarrow> \<exists>S. open S \<and> top \<in> S \<and> Ball S P [PROOF STEP] assume "b < top" "\<forall>z>b. P z" [PROOF STATE] proof (state) this: b < top \<forall>z>b. P z goal (1 subgoal): 1. \<And>b. \<lbrakk>b < top; \<forall>z>b. P z\<rbrakk> \<Longrightarrow> \<exists>S. open S \<and> top \<in> S \<and> Ball S P [PROOF STEP] then [PROOF STATE] proof (chain) picking this: b < top \<forall>z>b. P z [PROOF STEP] show "\<exists>S. open S \<and> top \<in> S \<and> (\<forall>xa\<in>S. P xa)" [PROOF STATE] proof (prove) using this: b < top \<forall>z>b. P z goal (1 subgoal): 1. \<exists>S. open S \<and> top \<in> S \<and> (\<forall>xa\<in>S. P xa) [PROOF STEP] by (intro exI[of _ "{b <..}"]) auto [PROOF STATE] proof (state) this: \<exists>S. open S \<and> top \<in> S \<and> (\<forall>xa\<in>S. P xa) goal: No subgoals! [PROOF STEP] qed
{ "alphanum_fraction": null, "author": null, "avg_line_length": null, "converted": null, "ext": null, "file": null, "hexsha": null, "include": null, "lang": null, "length": 17, "llama_tokens": 1977, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": null, "max_forks_repo_licenses": null, "max_forks_repo_name": null, "max_forks_repo_path": null, "max_issues_count": null, "max_issues_repo_head_hexsha": null, "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": null, "max_issues_repo_name": null, "max_issues_repo_path": null, "max_line_length": null, "max_stars_count": null, "max_stars_repo_head_hexsha": null, "max_stars_repo_licenses": null, "max_stars_repo_name": null, "max_stars_repo_path": null, "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": null, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": null }
import numpy as np import pytest from sklearn.utils._readonly_array_wrapper import ReadonlyArrayWrapper, _test_sum from sklearn.utils._testing import create_memmap_backed_data def _readonly_array_copy(x): """Return a copy of x with flag writeable set to False.""" y = x.copy() y.flags["WRITEABLE"] = False return y def _create_memmap_backed_data(data): return create_memmap_backed_data( data, mmap_mode="r", return_folder=False, aligned=True ) @pytest.mark.parametrize("readonly", [_readonly_array_copy, _create_memmap_backed_data]) @pytest.mark.parametrize("dtype", [np.float32, np.float64, np.int32, np.int64]) def test_readonly_array_wrapper(readonly, dtype): """Test that ReadonlyWrapper allows working with fused-typed.""" x = np.arange(10).astype(dtype) sum_origin = _test_sum(x) # ReadonlyArrayWrapper works with writable buffers sum_writable = _test_sum(ReadonlyArrayWrapper(x)) assert sum_writable == pytest.approx(sum_origin, rel=1e-11) # Now, check on readonly buffers x_readonly = readonly(x) with pytest.raises(ValueError, match="buffer source array is read-only"): _test_sum(x_readonly) x_readonly = ReadonlyArrayWrapper(x_readonly) sum_readonly = _test_sum(x_readonly) assert sum_readonly == pytest.approx(sum_origin, rel=1e-11)
{ "alphanum_fraction": 0.7447916667, "author": null, "avg_line_length": 32, "converted": null, "ext": "py", "file": null, "hexsha": "38163cc2461ce878eb638401b74bea21507f701f", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 26886, "max_forks_repo_forks_event_max_datetime": "2022-03-31T18:03:23.000Z", "max_forks_repo_forks_event_min_datetime": "2015-01-01T00:59:27.000Z", "max_forks_repo_head_hexsha": "92bc7fbe1040f49e820473e33cc3902a5a7177c7", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "13rianlucero/CrabAgePrediction", "max_forks_repo_path": "crabageprediction/venv/Lib/site-packages/sklearn/utils/tests/test_readonly_wrapper.py", "max_issues_count": 17065, "max_issues_repo_head_hexsha": "92bc7fbe1040f49e820473e33cc3902a5a7177c7", "max_issues_repo_issues_event_max_datetime": "2022-03-31T23:48:34.000Z", "max_issues_repo_issues_event_min_datetime": "2015-01-01T02:01:58.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "13rianlucero/CrabAgePrediction", "max_issues_repo_path": "crabageprediction/venv/Lib/site-packages/sklearn/utils/tests/test_readonly_wrapper.py", "max_line_length": 88, "max_stars_count": 50961, "max_stars_repo_head_hexsha": "92bc7fbe1040f49e820473e33cc3902a5a7177c7", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "13rianlucero/CrabAgePrediction", "max_stars_repo_path": "crabageprediction/venv/Lib/site-packages/sklearn/utils/tests/test_readonly_wrapper.py", "max_stars_repo_stars_event_max_datetime": "2022-03-31T23:40:12.000Z", "max_stars_repo_stars_event_min_datetime": "2015-01-01T06:06:31.000Z", "num_tokens": 328, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 1344 }
module Variables include("./constants.jl") using JuMP using .Constants export init_variables function init_variables(m) #Dimensions of box @variable(m, lob_length_of_box[i=1:length(boxes)] == boxes[i][1]) @variable(m, wob_width_of_box[i=1:length(boxes)] == boxes[i][2]) @variable(m, hob_height_of_box[i=1:length(boxes)] == boxes[i][3]) #Front-left coordinate of each box @variable(m, flbx_front_left_bot_x[i=1:length(boxes)] >= 0) @variable(m, flby_front_left_bot_y[i=1:length(boxes)] >= 0) @variable(m, flbz_front_left_bot_z[i=1:length(boxes)] >= 0) #Check if length (pi) is parallel to X, Y or Z-axis @variable(m, lpx_length_of_box_parallel_to_x[i=1:length(boxes)], Bin) @variable(m, lpy_length_of_box_parallel_to_y[i=1:length(boxes)], Bin) @variable(m, lpz_length_of_box_parallel_to_z[i=1:length(boxes)], Bin) #Check if width (qi) is parallel to X, Y or Z-axis @variable(m, wpx_width_of_box_parallel_to_x[i=1:length(boxes)], Bin) @variable(m, wpy_width_of_box_parallel_to_y[i=1:length(boxes)], Bin) @variable(m, wpz_width_of_box_parallel_to_z[i=1:length(boxes)], Bin) #Check if height (ri) is parallel to X, Y or Z-axis @variable(m, hpx_height_of_box_parallel_to_x[i=1:length(boxes)], Bin) @variable(m, hpy_height_of_box_parallel_to_y[i=1:length(boxes)], Bin) @variable(m, hpz_height_of_box_parallel_to_z[i=1:length(boxes)], Bin) # box_j_x_axis_cover = lpx_length_of_box_parallel_to_x[j]*lob_length_of_box[j] + wpx_width_of_box_parallel_to_x[j]*wob_width_of_box[j] # box_j_y_axis_cover = lpy_length_of_box_parallel_to_y[j]*lob_length_of_box[j] + wpy_width_of_box_parallel_to_y[j]*wob_width_of_box[j] @expressions(m, begin box_x_axis_cover[i=1:length(boxes)], ( m[:lpx_length_of_box_parallel_to_x][i]*fix_value(m[:lob_length_of_box][i]) + m[:wpx_width_of_box_parallel_to_x][i]*fix_value(m[:wob_width_of_box][i]) ) box_y_axis_cover[i=1:length(boxes)], ( m[:lpy_length_of_box_parallel_to_y][i]*fix_value(m[:lob_length_of_box][i]) + m[:wpy_width_of_box_parallel_to_y][i]*fix_value(m[:wob_width_of_box][i]) ) end) #Make objective available for the whole program. Later used in objective function. @expression( m, heights[i=1:length(boxes)], m[:flbz_front_left_bot_z][i] + fix_value(m[:hob_height_of_box][i]) ) @variable(m, H >= maxHeight >= 0) @constraint(m, m[:maxHeight] .>= m[:heights]) return m end end
{ "alphanum_fraction": 0.7031188314, "author": null, "avg_line_length": 40.8548387097, "converted": null, "ext": "jl", "file": null, "hexsha": "132ea0ed52d08ad51cb24c10b5cd5b39232a5ed8", "include": null, "lang": "Julia", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "5d4a51598f1677c2f5c219a88ca9ab4c9b6a5c6f", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "ToralfFrich/Master_Thesis", "max_forks_repo_path": "src/constraints/geometric/variables.jl", "max_issues_count": null, "max_issues_repo_head_hexsha": "5d4a51598f1677c2f5c219a88ca9ab4c9b6a5c6f", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "ToralfFrich/Master_Thesis", "max_issues_repo_path": "src/constraints/geometric/variables.jl", "max_line_length": 138, "max_stars_count": null, "max_stars_repo_head_hexsha": "5d4a51598f1677c2f5c219a88ca9ab4c9b6a5c6f", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "ToralfFrich/Master_Thesis", "max_stars_repo_path": "src/constraints/geometric/variables.jl", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 755, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 2533 }
""" json 불러와서 캡션 붙이는 것 """ import json import pandas as pd path = './datasets/vqa/v2_OpenEnded_mscoco_train2014_questions.json' with open(path) as question: question = json.load(question) # question['questions'][0] # question['questions'][1] # question['questions'][2] df = pd.DataFrame(question['questions']) df caption_path = './datasets/caption/vis_st_trainval.json' with open(caption_path) as cap: cap = json.load(cap) df_cap = pd.DataFrame(cap) df_cap df_addcap = pd.merge(df, df_cap, how='left', on='image_id') del df_addcap['file_path'] ######################################################################################################################## """ pandas to json """ df_addcap.to_json('./datasets/caption/train_cap2.json', orient='table') with open('./datasets/caption/train_cap2.json') as train_cap: train_cap = json.load(train_cap) ######################################################################################################################## ######################################################################################################################## """ answer + cap """ path = '/home/nextgen/Desktop/mcan-vqa/datasets/vqa/v2_mscoco_train2014_annotations.json' path = './datasets/vqa/v2_mscoco_val2014_annotations.json' with open(path) as answer: answer = json.load(answer) answer['annotations'][0] df_ans = pd.DataFrame(answer['annotations']) df_ans[:0] del df_ans['question_type'] del df_ans['answers'] del df_ans['answer_type'] del df_ans['image_id'] df_ans[df_ans['question_id']==458752000] df_addcap2 = pd.merge(df_addcap, df_ans, how='left', on='question_id') df_addcap2[:0] df_addcap2['multiple_choice_answer'] # del df_addcap['file_path'] df_addcap2.to_json('./datasets/caption/val_qacap.json', orient='table') with open('./datasets/caption/train_qacap.json') as train_qacap: train_qacap = json.load(train_qacap) ######################################################################################################################## """val test도 마찬가지""" path = './datasets/vqa/v2_OpenEnded_mscoco_val2014_questions.json' with open(path) as question: question = json.load(question) df = pd.DataFrame(question['questions']) df caption_path = './datasets/caption/vis_st_trainval.json' with open(caption_path) as cap: cap = json.load(cap) df_cap = pd.DataFrame(cap) df_cap df_addcap = pd.merge(df, df_cap, how='left', on='image_id') df_addcap[:0] del df_addcap['file_path'] df_addcap.to_json('./datasets/caption/val_cap.json', orient='table') #test path = './datasets/vqa/v2_OpenEnded_mscoco_test-dev2015_questions.json' with open(path) as question: question = json.load(question) df = pd.DataFrame(question['questions']) df df['image_id'] = df.image_id.astype(int) caption_path = './datasets/caption/vis_st_test.json' with open(caption_path) as cap: cap = json.load(cap) df_cap = pd.DataFrame(cap) df_cap df_cap['image_id'] = df_cap.image_id.astype(int) df_addcap = pd.merge(df, df_cap, how='left', on='image_id') df_addcap[:0] del df_addcap['file_path'] df_addcap.to_json('./datasets/caption/test_cap.json', orient='table') ######################################################################################################################## from core.data.ans_punct import prep_ans import numpy as np import en_vectors_web_lg, random, re, json import json from core.data.data_utils import ques_load stat_ques_list = \ json.load(open('./datasets/caption/train_cap.json', 'r'))['data'] + \ json.load(open('./datasets/caption/val_cap.json', 'r'))['data'] + \ json.load(open('./datasets/caption/test_cap.json', 'r'))['data'] def tokenize(stat_ques_list, use_glove): token_to_ix = { 'PAD': 0, 'UNK': 1, } spacy_tool = None pretrained_emb = [] if use_glove: spacy_tool = en_vectors_web_lg.load() pretrained_emb.append(spacy_tool('PAD').vector) pretrained_emb.append(spacy_tool('UNK').vector) for ques in stat_ques_list: words = re.sub( r"([.,'!?\"()*#:;])", '', ques['question'].lower() ).replace('-', ' ').replace('/', ' ').split() for word in words: if word not in token_to_ix: token_to_ix[word] = len(token_to_ix) if use_glove: pretrained_emb.append(spacy_tool(word).vector) for ques in stat_ques_list: words = re.sub( r"([.,'!?\"()*#:;])", '', ques['caption'].lower() ).replace('-', ' ').replace('/', ' ').split() for word in words: if word not in token_to_ix: token_to_ix[word] = len(token_to_ix) if use_glove: pretrained_emb.append(spacy_tool(word).vector) pretrained_emb = np.array(pretrained_emb) return token_to_ix, pretrained_emb token_to_ix, pretrained_emb = tokenize(stat_ques_list, True) ####################################################################################################################### # with open('./datasets/vqa/v2_mscoco_train2014_annotations.json') as answer: # answer = json.load(answer) # # answer['annotations'][2] """ 답을 이용하는거로 하면 train val 비교로해야 함 test셋은 답을 제공하지 않아서 test할 때 답을 이용하는 모델을 사용할 수 없음 """ #### import cal_sim import pandas as pd with open('datasets/caption/train_cap.json') as train_cap: train_cap = json.load(train_cap) with open('datasets/caption/val_cap.json') as val_cap: val_cap = json.load(val_cap) with open('datasets/caption/test_cap.json') as test_cap: test_cap = json.load(test_cap) df_train = pd.DataFrame(train_cap['data']) df_val = pd.DataFrame(val_cap['data']) df_test = pd.DataFrame(test_cap['data']) df_train[:0] # df_train['similarity'] = cal_sim.sent_sim((df_train['question'], dtype=int32), (df_train['caption'], dtype=int32)) df_train.iloc[0]['question'] def txt2vec(sentence): # s = sentence.split() tt = [] new_i = re.sub( r"([.,'!?\"()*#:;])", '', sentence.lower() ).replace('-', ' ').replace('/', ' ').split() for i in new_i: num = token_to_ix[i] tt.append(pretrained_emb[num]) return tt stat_ques_list[0] token_to_ix['what'] len(txt2vec(df_train.iloc[0]['question'])) df_train.iloc[0]['question'] df_train.iloc[0]['caption'] len(txt2vec(df_train.iloc[0]['caption'])) from numpy import dot from numpy.linalg import norm import numpy as np def cos_sim(A, B): return dot(A, np.transpose(B)) / (norm(A) * norm(B)) def word_sim(w1,w2): #word simiarity s = 0.5 * (1+ cos_sim(w1,w2)) return s def sent_sim(ss1, ss2): #sentence simiarity s1 = txt2vec(ss1) s2 = txt2vec(ss2) t = [] for i in s1[2:]: #question 0,1 are PAD, UNK tmp = [] for j in s2[2:]: #caption tmp_sim = word_sim(i,j) tmp.append(tmp_sim) t.append(max(tmp)) sentence_sim = sum(t) / len(s1[2:]) return sentence_sim t = sent_sim('yes', 'hello') tmp = sent_sim(df_train.iloc[105]['question'], df_train.iloc[103]['caption']) t1 = sent_sim('Is there a travel guide on the table?', 'A place of cake and coffee are on an outdoor table') t2 = sent_sim('yes', 'A place of cake and coffee are on an outdoor table') t3 = sent_sim('no', 'no') df_train.iloc[105]['question'] #유사도 좀 이상한 듯 너무 높게 나오는 것 같은느낌 df_train.iloc[103]['caption'] cos_sim(txt2vec('e'), txt2vec('z')) new_i = re.sub( r"([.,'!?\"()*#:;])", '', df_train.iloc[102]['question'].lower() ).replace('-', ' ').replace('/', ' ').split() np.dot(txt2vec(df_train.iloc[103]['question']), txt2vec(df_train.iloc[103]['caption']))
{ "alphanum_fraction": 0.5889103804, "author": null, "avg_line_length": 26.7413793103, "converted": null, "ext": "py", "file": null, "hexsha": "8165627d6470d7c3af8974c3cff4f40b6476f57e", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "788e21fc1bc712018166aa44cc3298264f493f3b", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "JoonseoKang/mcan-cap", "max_forks_repo_path": "json_pandas.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "788e21fc1bc712018166aa44cc3298264f493f3b", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "JoonseoKang/mcan-cap", "max_issues_repo_path": "json_pandas.py", "max_line_length": 120, "max_stars_count": null, "max_stars_repo_head_hexsha": "788e21fc1bc712018166aa44cc3298264f493f3b", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "JoonseoKang/mcan-cap", "max_stars_repo_path": "json_pandas.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1967, "path": null, "reason": "import numpy,from numpy", "repo": null, "save_path": null, "sha": null, "size": 7755 }
# script to convert the newly generated Relative Humidity def convert_to_hur( tas_arr, vap_arr ): import numpy as np with np.errstate( over='ignore' ): esa_arr = 6.112 * np.exp( 17.62 * tas_arr/ (243.12 + tas_arr) ) # esa_arr = 6.112 * np.exp( 22.46 * tas_arr / (272.62 + tas_arr) ) return vap_arr/esa_arr * 100 def convert_to_vap( tas_arr, hur_arr ): import numpy as np with np.errstate( over='ignore' ): esa_arr = 6.112 * np.exp( 17.62 * tas_arr / (243.12 + tas_arr) ) # esa_arr = 6.112 * np.exp( 22.46*tas_arr / (272.62 + tas_arr) ) return (hur_arr * esa_arr) / 100 def run( x ): tas = rasterio.open( x[0] ) hur = rasterio.open( x[1] ) meta = tas.meta meta[ 'dtype' ] = 'float32' # set it to float32 meta.update( compress='lzw' ) meta.pop( 'transform' ) tas_arr = tas.read( 1 ) hur_arr = hur.read( 1 ) vap_arr = convert_to_vap( tas_arr, hur_arr ) # mask it: mask = tas.read_masks( 1 ) vap_arr[ mask == 0 ] = tas.nodata # build an output filename from the input tas and write out -- changed to deal with pathing! output_filename = x[1].replace( 'hur', 'vap' ) output_filename = output_filename.replace( '_metric_', '_hPa_' ) # output_filename = x[0].replace( 'tas', 'vap' ) # output_filename = output_filename.replace( '_C_', '_hPa_' ) dirname = os.path.dirname( output_filename ) try: if not os.path.exists( dirname ): os.makedirs( dirname ) except: pass with rasterio.open( output_filename, 'w', **meta ) as out: out.write( vap_arr.astype( np.float32 ), 1 ) return output_filename if __name__ == '__main__': # import modules import os, glob, rasterio import numpy as np from pathos import multiprocessing as mp # args ncores = 40 tas_input_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_november_final/ar5' hur_input_path = '/Data/malindgren/cru_november_final/ar5' models = [ 'IPSL-CM5A-LR', 'GISS-E2-R', 'MRI-CGCM3', 'CCSM4', 'GFDL-CM3' ] for model in models: # print model tas_files = sorted( glob.glob( os.path.join( tas_input_path, model, 'tas', 'downscaled', '*.tif' ) ) ) hur_files = sorted( glob.glob( os.path.join( hur_input_path, model, 'hur', 'downscaled', '*.tif' ) ) ) # combine the sorted lists which should now be in a common order... tas_hur_list = zip( tas_files, hur_files ) # run in parallel pool = mp.Pool( processes=ncores ) out = pool.map( run, tas_hur_list ) pool.close() # def return_files( input_path, var ): # output_files = [] # for root, subs, files in os.walk( input_path ): # # # print root # if root.endswith( 'downscaled' ) and len( files ) != 0 and var in root: # pool = mp.Pool( processes=ncores ) # files = pool.map( lambda x: os.path.join( root, x ), files ) # pool.close() # output_files.append( files ) # return output_files
{ "alphanum_fraction": 0.6729470316, "author": null, "avg_line_length": 32.7093023256, "converted": null, "ext": "py", "file": null, "hexsha": "80c61aacc21822c977f0758b8a90ad8a8e99498c", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 3, "max_forks_repo_forks_event_max_datetime": "2021-05-25T03:46:00.000Z", "max_forks_repo_forks_event_min_datetime": "2020-09-16T04:48:57.000Z", "max_forks_repo_head_hexsha": "3fe8ea1774cf82149d19561ce5f19b25e6cba6fb", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "ua-snap/downscale", "max_forks_repo_path": "snap_scripts/old_scripts/tem_iem_older_scripts_april2018/tem_inputs_iem/old_code/convert_tas_hur_to_vap.py", "max_issues_count": 17, "max_issues_repo_head_hexsha": "3fe8ea1774cf82149d19561ce5f19b25e6cba6fb", "max_issues_repo_issues_event_max_datetime": "2017-04-17T20:57:02.000Z", "max_issues_repo_issues_event_min_datetime": "2016-01-04T23:37:47.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "ua-snap/downscale", "max_issues_repo_path": "snap_scripts/old_scripts/tem_iem_older_scripts_april2018/tem_inputs_iem/old_code/convert_tas_hur_to_vap.py", "max_line_length": 112, "max_stars_count": 5, "max_stars_repo_head_hexsha": "3fe8ea1774cf82149d19561ce5f19b25e6cba6fb", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "ua-snap/downscale", "max_stars_repo_path": "snap_scripts/old_scripts/tem_iem_older_scripts_april2018/tem_inputs_iem/old_code/convert_tas_hur_to_vap.py", "max_stars_repo_stars_event_max_datetime": "2022-03-23T16:32:54.000Z", "max_stars_repo_stars_event_min_datetime": "2020-06-24T21:55:12.000Z", "num_tokens": 898, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 2813 }
[STATEMENT] lemma closed_Union [continuous_intros, intro]: "finite S \<Longrightarrow> \<forall>T\<in>S. closed T \<Longrightarrow> closed (\<Union>S)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>finite S; \<forall>T\<in>S. closed T\<rbrakk> \<Longrightarrow> closed (\<Union> S) [PROOF STEP] by (induct set: finite) auto
{ "alphanum_fraction": null, "author": null, "avg_line_length": null, "converted": null, "ext": null, "file": null, "hexsha": null, "include": null, "lang": null, "length": 1, "llama_tokens": 119, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": null, "max_forks_repo_licenses": null, "max_forks_repo_name": null, "max_forks_repo_path": null, "max_issues_count": null, "max_issues_repo_head_hexsha": null, "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": null, "max_issues_repo_name": null, "max_issues_repo_path": null, "max_line_length": null, "max_stars_count": null, "max_stars_repo_head_hexsha": null, "max_stars_repo_licenses": null, "max_stars_repo_name": null, "max_stars_repo_path": null, "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": null, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": null }
subroutine qqb_ttw_v(p,msqv) ************************************************************************ * Author: R. K. Ellis * * March, 2012. * * * * Calculate the virtual matrix element squared for the process * * * * q(-p1) +qbar(-p2)= * * +t(nu(p3)+e+(p4)+b(p5)) * * +t~(b~(p6)+e^-(p7)+nu~(p8)) * * +nu(p9) + e^+(p10) * * * * * ************************************************************************ implicit none include 'constants.f' include 'ckm.f' include 'masses.f' include 'sprods_com.f' include 'zprods_com.f' include 'qcdcouple.f' include 'ewcouple.f' include 'momwbbm.f' include 'scheme.f' include 'plabel.f' integer j,k,nu,j1,j2,hb,hc double precision p(mxpart,4),q(mxpart,4),msqv(-nf:nf,-nf:nf) double precision qqb,qbq,dot double precision fac,mQsq,s56,betasq double complex a6treemm,a6treemp,a6treepm,a6treepp, & mtop(2,2),manti(2,2),a61mm,a61mp,a61pm,a61pp,a61(2,2),a6(2,2), & loqbq(2,2),hoqbq(2,2),loqqb(2,2),hoqqb(2,2) logical numcheck common/numcheck/numcheck !$omp threadprivate(/numcheck/) scheme='dred' do j=-nf,nf do k=-nf,nf msqv(j,k)=0d0 enddo enddo c--- set the following flag to true to write out values of different primitives c--- (a similar flag, to write out coefficients of the basis integrals, c--- can be found in the routine a61mass) numcheck=.false. c--- setup for performing check against numerical evaluation if (numcheck) then c----- read in special point include 'MCFMpoint.f' c call writeout(p) c pause c--- perform the usual business to rotate away from the z-direction do j=1,6 q(j,4)=p(j,4) q(j,1)=p(j,3) q(j,2)=-p(j,2) q(j,3)=p(j,1) do k=1,4 p(j,k)=q(j,k) enddo enddo endif do nu=1,4 q(1,nu)=p(1,nu) q(2,nu)=p(2,nu) q(3,nu)=p(9,nu) q(4,nu)=p(10,nu) q(5,nu)=p(3,nu)+p(4,nu)+p(5,nu) q(6,nu)=p(6,nu)+p(7,nu)+p(8,nu) enddo mQsq=mt**2 c--- construct the massless momenta a la Rodrigo do j=1,4 do nu=1,4 mom(j,nu)=q(j,nu) enddo enddo s56=2d0*dot(q,5,6)+2d0*mQsq betasq=1d0-4d0*mQsq/s56 if (betasq .ge. 0d0) then bp=0.5d0*(1d0+dsqrt(betasq)) bm=1d0-bp else write(6,*) 'betasq < 0 in qqb_ttw_v.f, betasq=',betasq call flush(6) stop endif do nu=1,4 mom(5,nu)=(bp*q(5,nu)-bm*q(6,nu))/dsqrt(betasq) mom(6,nu)=(bp*q(6,nu)-bm*q(5,nu))/dsqrt(betasq) enddo call tdecayrod(p,3,4,5,6,7,8,0,mtop) call adecayrod(p,3,4,5,6,7,8,0,manti) c--- compute spinor products call spinoru(6,mom,za,zb) c--- overall factor fac=V*gsq**2*gwsq**6*aveqq/(mt*twidth)**4 fac=fac*xn*ason2pi fac=fac*s(3,4)**2/((s(3,4)-wmass**2)**2+(wmass*wwidth)**2) c--- include factor for hadronic decays of W if (plabel(3) .eq. 'pp') fac=2d0*xn*fac if (plabel(7) .eq. 'pp') fac=2d0*xn*fac c--- QBQ: compute 1-loop and tree amplitudes call a61mass(1,6,5,2,4,3,mQsq,a61mm,a61mp,a61pm,a61pp, & a6treemm,a6treemp,a6treepm,a6treepp) a61(1,1)=a61mm a61(1,2)=a61mp a61(2,1)=a61pm a61(2,2)=a61pp a6(1,1)=a6treemm a6(1,2)=a6treemp a6(2,1)=a6treepm a6(2,2)=a6treepp qbq=0d0 do hb=1,2 do hc=1,2 hoqbq(hb,hc)=czip loqbq(hb,hc)=czip do j1=1,2 do j2=1,2 loqbq(hb,hc)=loqbq(hb,hc)+mtop(hb,j1)*a6(j1,j2)*manti(j2,hc) hoqbq(hb,hc)=hoqbq(hb,hc)+mtop(hb,j1)*a61(j1,j2)*manti(j2,hc) enddo enddo qbq=qbq+fac*dble(loqbq(hb,hc)*dconjg(hoqbq(hb,hc))) enddo enddo c--- put a pause here when writing out primitives c if (numcheck) pause c--- QQB: compute 1-loop and tree amplitudes call a61mass(2,6,5,1,4,3,mQsq,a61mm,a61mp,a61pm,a61pp, & a6treemm,a6treemp,a6treepm,a6treepp) a61(1,1)=a61mm a61(1,2)=a61mp a61(2,1)=a61pm a61(2,2)=a61pp a6(1,1)=a6treemm a6(1,2)=a6treemp a6(2,1)=a6treepm a6(2,2)=a6treepp qqb=0d0 do hb=1,2 do hc=1,2 hoqqb(hb,hc)=czip loqqb(hb,hc)=czip do j1=1,2 do j2=1,2 loqqb(hb,hc)=loqqb(hb,hc)+ & mtop(hb,j1)*a6(j1,j2)*manti(j2,hc) hoqqb(hb,hc)=hoqqb(hb,hc)+ & mtop(hb,j1)*a61(j1,j2)*manti(j2,hc) enddo enddo qqb=qqb+fac*dble(loqqb(hb,hc)*dconjg(hoqqb(hb,hc))) enddo enddo do j=-nf,nf do k=-nf,nf if ((j .gt. 0) .and. (k .lt. 0)) then msqv(j,k)=Vsq(j,k)*qqb elseif ((j .lt. 0) .and. (k .gt. 0)) then msqv(j,k)=Vsq(j,k)*qbq endif enddo enddo return end
{ "alphanum_fraction": 0.4778597786, "author": null, "avg_line_length": 29.2972972973, "converted": null, "ext": "f", "file": null, "hexsha": "c6115fb56877b523ecd6d242c190f5c189376213", "include": null, "lang": "FORTRAN", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 19, "max_forks_repo_forks_event_max_datetime": "2021-07-06T10:04:40.000Z", "max_forks_repo_forks_event_min_datetime": "2015-05-04T22:15:41.000Z", "max_forks_repo_head_hexsha": "80da31668d7b7eb5b02bb4cac435562c45075d24", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "tmartini/JHUGen", "max_forks_repo_path": "MCFM-JHUGen/src/TopW/qqb_ttw_v.f", "max_issues_count": 64, "max_issues_repo_head_hexsha": "80da31668d7b7eb5b02bb4cac435562c45075d24", "max_issues_repo_issues_event_max_datetime": "2022-01-25T04:59:32.000Z", "max_issues_repo_issues_event_min_datetime": "2015-06-24T15:08:17.000Z", "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "tmartini/JHUGen", "max_issues_repo_path": "MCFM-JHUGen/src/TopW/qqb_ttw_v.f", "max_line_length": 79, "max_stars_count": 3, "max_stars_repo_head_hexsha": "80da31668d7b7eb5b02bb4cac435562c45075d24", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "tmartini/JHUGen", "max_stars_repo_path": "MCFM-JHUGen/src/TopW/qqb_ttw_v.f", "max_stars_repo_stars_event_max_datetime": "2020-09-04T19:59:36.000Z", "max_stars_repo_stars_event_min_datetime": "2015-06-08T13:09:28.000Z", "num_tokens": 2050, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 5420 }
# coding: utf-8 from scipy import stats import numpy as np from itertools import chain from scipy.stats import chi2_contingency import jpegio as jio import collections img = jio.read('00576.jpg') g = img.coef_arrays[0] g = g.reshape(g.shape[0]*g.shape[1]) for ind in range(30): g1 = g[0.03*len(g)*i:0.03*len(g)*(i+1)] num = collections.Counter(g) deg, cnt = zip(*num.items()) print(deg) print(cnt) t = 2**11 pairnum = int(t/2) print(pairnum) y = np.ones((pairnum, 1)) yy = np.ones((pairnum, 1)) deg = list(deg) cnt = list(cnt) o = [] for i in range(-1024, 1023, 2): j = int(i/2) if i in deg: add = deg.index(i) h1 = cnt[add] else: h1 = 0 if i+1 in deg: add = deg.index(i+1) h2 = cnt[add] else: h2 = 0 if h1+h2 > 0: y[j] = (h1+h2)/2.0 yy[j] = h1 o.append([h1, h2]) else: t = t-2 print(o) t, p = stats.chisquare(yy, f_exp=y) print(t) print(p) chi2, p, dof, ex = chi2_contingency(o, correction=False) print(chi2) print(p)
{ "alphanum_fraction": 0.5054667788, "author": null, "avg_line_length": 19.8166666667, "converted": null, "ext": "py", "file": null, "hexsha": "a172f44e60ace4b62441e510d614e2772206cd77", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "b0f861a02ac222656a0c68ad01c904172f52afcd", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "everange-ustc/ImageSteganalysis", "max_forks_repo_path": "chi_square_test.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "b0f861a02ac222656a0c68ad01c904172f52afcd", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "everange-ustc/ImageSteganalysis", "max_issues_repo_path": "chi_square_test.py", "max_line_length": 60, "max_stars_count": null, "max_stars_repo_head_hexsha": "b0f861a02ac222656a0c68ad01c904172f52afcd", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "everange-ustc/ImageSteganalysis", "max_stars_repo_path": "chi_square_test.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 396, "path": null, "reason": "import numpy,from scipy", "repo": null, "save_path": null, "sha": null, "size": 1189 }
\documentclass{article} \usepackage{bm} \usepackage{amsmath} \usepackage{graphicx} \usepackage{mdwlist} \usepackage[colorlinks=true]{hyperref} \usepackage{geometry} \geometry{margin=1in} \geometry{headheight=2in} \geometry{top=1in} \usepackage{palatino} \usepackage{listings} \usepackage{color} \definecolor{codegreen}{rgb}{0,0.6,0} \definecolor{codegray}{rgb}{0.5,0.5,0.5} \definecolor{codepurple}{rgb}{0.58,0,0.82} \definecolor{backcolour}{rgb}{0.95,0.95,0.92} \lstdefinestyle{mystyle}{ backgroundcolor=\color{backcolour}, commentstyle=\color{codegreen}, keywordstyle=\color{magenta}, numberstyle=\tiny\color{codegray}, stringstyle=\color{codepurple}, basicstyle=\footnotesize, breakatwhitespace=false, breaklines=true, captionpos=b, keepspaces=true, numbers=left, numbersep=5pt, showspaces=false, showstringspaces=false, showtabs=false, tabsize=2 } \lstset{style=mystyle} %\renewcommand{\rmdefault}{palatino} \usepackage{fancyhdr} %\pagestyle{fancy} \rhead{} \lhead{} \chead{% {\vbox{% \vspace{4mm} \large Statement of Purpose \hfill \\ Seoul National University \\[2mm] \textbf{GAO ZHIYUAN} } } } \usepackage{paralist} \usepackage{todonotes} \setlength{\marginparwidth}{2.15cm} \usepackage{tikz} \usetikzlibrary{positioning,shapes,backgrounds} \begin{document} \pagestyle{fancy} %!TEX root = hw1.tex %% Q1 \section{In relation to your academic interest and personal experiences, please describe your motivation for your desired course. You may include information related to your preparation for the course and related academic achievements. Please state your goals while studying at Seoul National University as well as your study plan(4000 bytes limit)} I am applying to the undergraduate program in Computer Science and Engineering with a strong motivation into research. I have been an exchange student in Seoul National University for a year and I believe it would be an enriching experience if I enroll as an undergraduate. \\ \\ I believe computer science could make a concrete and tangible impact to our surroundings. In 2015, I first enrolled in department of political science in Taiwan. The philosophy and paradigm that political science focuses were fascinating. Immersion in those intellectual challenges could always be rigorous and diverting. \\ \\ However, I turned to computer science when I started a voluneering community in Taiwan, where we devoted to computer education in Malaysian aboriginal community. In case of extreme poverty in the tropical jungles, computer proves to be feasible as a method of education. The community continues to grow and it also earned me an opportunity as a speaker for openSUSE Asia Summit in Japan, 2017. I was obsessed with what computer science could achieve and that seeded computer science in my mind as a profession. \\ \\ I believe Computer Science is my vocation. I started programming at university but I was a fast learner. I become better acquainted with programming on STM32 board with cortex-M core. On a basic level, we had to set up a server on RTOS, to monitor the soil humidity and to broadcast collected data to all connected devices. I navigated the datasheets and careful considerations had to be taken in order that the sensor is activated acurately with exact timing controls. It was first time to get to have a basic idea of GPIO, SPI and other hardware components. And I found myself greatly immersed in the intense progress of tweaking and developing a program. \\ \\ SNU's computer science undergradute program looms large in my mind, largely because of its interdisciplinary approach towards computer science. I was especially interested in the computer system related courses, such as compiler, system programming and computer architecture. My dedication to programming in Taiwan earned me the opportunity to exchange in Seoul National University for a year. I make a compiler from scratch in compiler class and hence we had the chance to know in detail what compiler consists of how these large projects should be organized. In addition, operating system also greatly benefited me, with intensive linux kernel projects that I am using on my resume. I would hope that I have taken more courses when I was exchanging, since Hardware system design, Principles and Practices of Software Development also greatly draw my interest. Lectures in department of Computer Science give me a glimmer of a higher realm. Moments of these great intensity were intoxicating that I believe 4 years as an undergraduate would grant me glittering opportunities for a leap forward. \\ \\ As for a long term goal, I would like to be a researcher and I found myself specially interested in compiler backend for deep neural networks. We have seen a huge step deep learning has gained in past decades along with various applications into embedded devices, and thus efficiency and portibility of neural network are gradually coming into discussion. Optimizing neural network with respect to its compiler, in terms of code generation and instruction scheduling, explores a provocative but as of yet scarcely studied field of compiler optimization. I was tremendously inspired by darknet, a deep learning framework written in C, and when I embedded tiny-yolov3 on Rasberry Pi with darknet, I also found out they have a bug towards parsing models. In brief, my research would relate to improve deep learning frameworks with respect to compilation techniques.\\ \\ I am a language fast learner. My growing skills in Korean, and my fluency in English, Japanese and Chinese make me believe that I am able to quickyly fit in the classes here, where most major courses in Computer Science department are in English, and that very shortly I would comprehense Korean necessities for daily communication and lectures. \\ \\ \section{Please briefly state your academic and extracurricular activities(4000 bytes limit)} I demonstrated a good aptitude of science since high school, when I scored highest in math in the chinese university entrance exam, which is considered to be the most influential, or the only factor that universities evaluate applicants and which is extremely competitive. \\ \\ I demonstrated a good aptitude of science since high school, when I scored highest in math in the Chinese university entrance exam, which is considered to be the most influential, or the only factor that universities evaluate applicants and which is extremely competitive. In addition, I was the conductor of high school orchestra and I hold national certificate for trombone and tuba at the highest level. \\ \\ I am also the founder of a volunteer community in Taiwan. We have been contributing to malaysian aboriginal communities, building up connection between Taiwan aboriginal voluneers and malaysian communities, with continuous sponsorship from ASUS, the community is fast growing in the belief that computer education is having impact on their life. Fortunately, this experience earned me the chance to attend openSUSE Asia Summit 2017 as a speaker, which is one of the largest open source conferences and exchange ideas with top developers in computer science. \\ \\ I am also the captain of the university speed roller club in Taiwan, where routinely we have been holding local competitions and collaborate with other universities. \\ \\ I was an intern at Computer Systems and Platforms Laboratory in Seoul National University. I aimed at proposing a model to compare and detect source code plagiarism based on abstract syntax trees that I learnt from compiler class in SNU. I researched into numerous previous works and papers, which greatly inspired my implementation. Mingling different program scopes' conformance became a tradeoff, but eventually, I succeeded in drawing an equation which would retain a satisfying judgement with respect to source code plagiarism.\\ \\ In addition, I am also an paid intern at City Energy Lab in GSES, Seoul National University. I am contributing to a software refactoring, which would integrate and improve two existing weather simulation softwares---SURFEX, and UWG. The code base appears to be large and demanding, but I'm gradually gaining comprehension of the software architecture and realize that the efficiency of the software can possibly be improved to a large scale. \\ \\ Furthermore, I have been offered an paid intership opportunity at DYSK Labs, Taiwan, for the upcoming six months as a computer vision engineer. I am expecting my deep learning knowledge to be further developed and that could be benefitial to my long term research goal. \\ \\ My programming skills also me a remote paid work for Success Factors, a company based in Spain, with respect to linux security and rootkit prevention. \\ \\ \section{Please write about yourself with regard to your characteristics other than your record of academic achievement. This section is provided to illustrate the personal aspects of each applicant. The following contents may be included in this section, though this section is not limited to them; experiences which have been influential in your life, individual perspectives on current issues, or role models or figures you respect(4000 bytes limit)} I am a hacker, a thinker, a tinkerer, and a wonderer. \\ \\ Firstly, I was tremendously uplifted by Linus Torvalds, whose Linux kernel is one of the largest, best-known, and most widely-used open source project. It formed the very base of most modern operating system distributions and linux is still the largest place holder in embedded system and server market. With rapt attention and religious devotation, two years ago I dived down the source code for the first time, tracing down the red black tree and trying to understand the data structures. Hereafter Linux kernel became my stepstone onto other glittering opportunities. I made a couple of diverting implementations which drew interests of several recruiters and it also helped me find several opportunities as a programmer and served as something that I could capitalize upon. Linux kernel taught me the code philosophy which retains both elegancy and pragmaticism. It lit up my obession for programming, with great care of beauty, security, pragmatism and organization. \\ \\ Experience as a backpacker is the second thing that shaped me from bottom up. For 45 days I was striving to enjoy my survival in India, with wild dogs sleeping by my side from time to time and daily life elbowing myself onto every transportation. Things went thither when exposed to insecurity, fear, and pure excitement out of a new atmosphere. I started to wonder toward the summits of existence and how we become morally valuable, that each of us worthy of dignity and respect. And I started to see the beauty in people who were so open hearted though they did not make any pleasant offer. It provoked my yearning for ideals, and that it is the arduous journey that I am strained to take.\\ \\ Thirdly, exchange at Seoul National University penetrates the yearning more deeply into me, that I am destined to be in pursuit of excellence, with honest and unironic hunger for a prudent foresight into computer science. I was given a glimpse for mountaineering here, that the truth is my light. We tend to focus on the cutting-edge research issues along with the chanllenging assignments. Every time I take a class, it feels like an adventure into a fascinating realm in profusion. This exchange experience drove me to make a vital choice, to enroll in Seoul National University as an undergraduate and to be shaped from bottom up. \\ \end{document}
{ "alphanum_fraction": 0.79400017, "author": null, "avg_line_length": 98.8823529412, "converted": null, "ext": "tex", "file": null, "hexsha": "226754f4afb46db6040c6bc2fbb6ecca1c58abe5", "include": null, "lang": "TeX", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "d96f3ad7743d68447b835d411a732f865cfe6ac1", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "alapha23/resume", "max_forks_repo_path": "past_resume/sop.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "d96f3ad7743d68447b835d411a732f865cfe6ac1", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "alapha23/resume", "max_issues_repo_path": "past_resume/sop.tex", "max_line_length": 1099, "max_stars_count": null, "max_stars_repo_head_hexsha": "d96f3ad7743d68447b835d411a732f865cfe6ac1", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "alapha23/resume", "max_stars_repo_path": "past_resume/sop.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2456, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 11767 }
# -*- coding: utf-8 -*- # Copyright (c) 2018 The HERA Collaboration # Licensed under the MIT License from __future__ import print_function, division, absolute_import import numpy as np from six.moves import range from scipy.signal import windows from warnings import warn from scipy.optimize import leastsq, lsq_linear import copy #DEFAULT PARAMETERS FOR CLEANs CLEAN_DEFAULTS_1D={'tol':1e-9, 'window':'none', 'alpha':.5, 'maxiter':100, 'gain':0.1, 'edgecut_low': 0, 'edgecut_hi': 0, 'add_clean_residual':False, 'filt2d_mode':'rect'} DAYENU_DEFAULTS_1D = {'suppression_factors' : [1e-9], 'max_contiguous_edge_flags' : 10} DPSS_DEFAULTS_1D = {'suppression_factors' : [1e-9], 'eigenval_cutoff' : [1e-12], 'max_contiguous_edge_flags' : 10} DFT_DEFAULTS_1D = {'suppression_factors' : [1e-9], 'fundamental_period' : np.nan, 'max_contiguous_edge_flags' : 10} CLEAN_DEFAULTS_2D = {'tol':1e-9, 'window': ['none', 'none'], 'alpha':.5, 'maxiter':100, 'gain':0.1, 'edgecut_low': [0, 0], 'edgecut_hi': [0, 0], 'add_clean_residual':False, 'filt2d_mode':'rect'} DAYENU_DEFAULTS_2D = {'suppression_factors' : [[1e-9], [1e-9]], 'max_contiguous_edge_flags' : 10} DPSS_DEFAULTS_2D = {'suppression_factors' : [[1e-9], [1e-9]], 'eigenval_cutoff' : [[1e-12], [1e-12]], 'max_contiguous_edge_flags' : 10} DFT_DEFAULTS_2D = {'suppression_factors' : [[1e-9], [1e-9]], 'fundamental_period' : [np.nan, np.nan], 'max_contiguous_edge_flags' : 10} def _process_filter_kwargs(kwarg_dict, default_dict): """ Utility function to complete a dictionary of kwargs by inserting values in default_dict for missing keys Also enforces correct spelling by making sure that all keys in kwarg_dict are in default_dict Parameters ---------- kwarg_dict : dictionary of kwargs to process default_dict : dictionary containing default kwarg values. Returns ------- Nothing, kwarg dict is corrected in place. """ # check for mispelled keys for k in kwarg_dict: if not k in default_dict: raise ValueError("%s is not a valid argument!"%(k) + \ "valid arguments include %s"%(list(default_dict.keys()))) for k in default_dict: if not k in kwarg_dict: kwarg_dict[k] = default_dict[k] def wedge_width(bl_len, sdf, nchan, standoff=0., horizon=1.): '''Return the (upper,lower) delay bins that geometrically correspond to the sky. Variable names preserved for backward compatability with capo/PAPER analysis. Arguments: bl_len: length of baseline (in 1/[sdf], typically ns) sdf: frequency channel width (typically in GHz) nchan: number of frequency channels standoff: fixed additional delay beyond the horizon (same units as bl_len) horizon: proportionality constant for bl_len where 1 is the horizon (full light travel time) Returns: uthresh, lthresh: bin indices for filtered bins started at uthresh (which is filtered) and ending at lthresh (which is a negative integer and also not filtered) Designed for area = np.ones(nchan, dtype=np.int); area[uthresh:lthresh] = 0 ''' bl_dly = horizon * bl_len + standoff return calc_width(bl_dly, sdf, nchan) def _get_filter_area(x, filter_center, filter_width): """ Return an 'area' vector demarking where cleaning should be allowed to take place. Arguments: x : array-like real space vector listing where data is sampled. filter_center : center of the area to be cleaned. Units of 1/(x-units) filter_width : width of the region of area to be cleaned. Units of 1/(x-units) """ nx = len(x) dx = np.mean(np.diff(x)) if not np.isinf(filter_width): av = np.ones(len(x)) filter_size = ((-filter_center + filter_width), (filter_center + filter_width)) ut, lt = calc_width(filter_size, dx, nx) av[ut:lt] = 0. else: av = np.ones(nx) return av def place_data_on_uniform_grid(x, data, weights, xtol=1e-3): """If possible, place data on a uniformly spaced grid. Given a vector of x-values (x), with data and weights, this function determines whether there are gaps in the provided x-values that are multiples of the minimum distance between x-values or whether any gaps are integer multiples of a fundamental grid spacing. If there are gaps that are integer multiples of a fundamental spacing, this function restores these x-values and inserts zero-valued data and zero-valued weights at their location, returning equally spaced data and weights that are effectively flagged at the missing x-values. This supports filtering data that was regularly sampled but has missing samples due to (for example) correlator dropouts since several of our filtering methods (DPSS fits and CLEAN) require data to be sampled on an equally spaced grid. Parameters ---------- x: array-like, array of x-values. data: array-like, array of y-values. Should be the same length as x. weights: array-like, array of weights. Should be the same length as x. xtol: float, optional. fractional error tolerance to determine if x-values are on an incomplete grid. Returns ------- xout: array-like If the separations on x are multiples of a single underlying minimum unit returns x with all multiples of the fundamental unit filled in. If x is already uniformly spaced, returns x unchanged. If separations are not multiples of fundamental unit, also returns x unchanged. yout: array-like If the separations on x are multiples of a single underlying minimum unit returns y with all multiples of the fundamental unit filled in with zeros. If x is already uniformly spaced, returns y unchanged. If separations are not multiples of fundamental unit, also returns y unchanged. wout: array-like If the separations on x are multiples of a single underlying minimum unit returns w with all multiples of the fundamental unit filled in with zeros. If x is already uniformly spaced, returns w unchanged. If separations are not multiples of fundamental unit, also returns w unchanged. inserted: array-like boolean array indicating which x-values were inserted. """ xdiff = np.diff(x) dx = np.abs(np.diff(x)).min() * np.sign(np.diff(x)[0]) # first, check whether x, y, w already on a grid. # if they are, just return them. if np.allclose(xdiff, dx, rtol=0, atol=dx * xtol): xout = x dout = data wout = weights inserted = np.zeros(len(x), dtype=bool) return xout, dout, wout, inserted # next, check that the array is not on a grid and if it isn't, return x, y, w if not np.allclose(xdiff / dx, np.round(xdiff / dx), rtol=0.0, atol=np.abs(xtol * dx)): xout = x dout = data wout = weights inserted = np.zeros(len(x), dtype=bool) warn("Data cannot be placed on equally spaced grid! No values inserted.", RuntimeWarning) return xout, dout, wout, inserted # if the array is on a grid, then construct filled in grid. grid_size =int(np.round((x[-1] - x[0]) / dx)) + 1 xout = np.linspace(x[0], x[-1], grid_size) dout = np.zeros(grid_size, dtype=np.complex128) wout = np.zeros(grid_size, dtype=np.float) inserted = np.ones(grid_size, dtype=bool) # fill in original data and weights. for x_index, xt in enumerate(x): output_index = np.argmin(np.abs(xout - xt)) dout[output_index] = data[x_index] wout[output_index] = weights[x_index] inserted[output_index] = False return xout, dout, wout, inserted def _fourier_filter_hash(filter_centers, filter_half_widths, filter_factors, x, w=None, hash_decimal=10, **kwargs): ''' Generate a hash key for a fourier filter Parameters ---------- filter_centers: list, list of floats for filter centers filter_half_widths: list list of float filter half widths (in fourier space) filter_factors: list list of float filter factors x: the x-axis of the data to be subjected to the hashed filter. w: optional vector of float weights to hash to. default, none hash_decimal: number of decimals to use for floats in key. kwargs: additional hashable elements the user would like to include in their filter key. Returns ------- A key for fourier_filter arrays hasing the information provided in the args. ''' filter_key = ('x:',) + tuple(np.round(x,hash_decimal))\ + ('filter_centers x N x DF:',) + tuple(np.round(np.asarray(filter_centers) * np.mean(np.diff(x)) * len(x), hash_decimal))\ + ('filter_half_widths x N x DF:',) + tuple(np.round(np.asarray(filter_half_widths) * np.mean(np.diff(x)) * len(x), hash_decimal))\ + ('filter_factors x 1e9:',) + tuple(np.round(np.asarray(filter_factors) * 1e9, hash_decimal)) if w is not None: filter_key = filter_key + ('weights', ) + tuple(np.round(w.tolist(), hash_decimal)) filter_key = filter_key + tuple([kwargs[k] for k in kwargs]) return filter_key def calc_width(filter_size, real_delta, nsamples): '''Calculate the upper and lower bin indices of a fourier filter Arguments: filter_size: the half-width (i.e. the width of the positive part) of the region in fourier space, symmetric about 0, that is filtered out. In units of 1/[real_delta]. Alternatively, can be fed as len-2 tuple specifying the absolute value of the negative and positive bound of the filter in fourier space respectively. Example: (20, 40) --> (-20 < tau < 40) real_delta: the bin width in real space nsamples: the number of samples in the array to be filtered Returns: uthresh, lthresh: bin indices for filtered bins started at uthresh (which is filtered) and ending at lthresh (which is a negative integer and also not filtered). Designed for area = np.ones(nsamples, dtype=np.int); area[uthresh:lthresh] = 0 ''' if isinstance(filter_size, (list, tuple, np.ndarray)): _, l = calc_width(np.abs(filter_size[0]), real_delta, nsamples) u, _ = calc_width(np.abs(filter_size[1]), real_delta, nsamples) return (u, l) bin_width = 1.0 / (real_delta * nsamples) w = int(np.around(filter_size / bin_width)) uthresh, lthresh = w + 1, -w if lthresh == 0: lthresh = nsamples return (uthresh, lthresh) def fourier_filter(x, data, wgts, filter_centers, filter_half_widths, mode, filter_dims=1, skip_wgt=0.1, zero_residual_flags=True, **filter_kwargs): ''' A filtering function that wraps up all functionality of high_pass_fourier_filter and add support for additional linear fitting options. It can filter 1d or 2d data with x-axis(es) x and wgts in fourier domain rectangular windows centered at filter_centers or filter_half_widths perform filtering along any of 2 dimensions in 2d or 1d! the 'dft' and 'dayenu' modes support irregularly sampled data. Parameters ----------- x: array-like Array of floats giving x-values of data. Depending on the chosen method, this data may need to be equally spaced. If performing a 2d clean, a 2-list or 2-tuple of np.ndarrays with x-values should be provided. data: array-like 1d or 2d numpy.ndarray of complex data to filter. wgts: array-like 1d or 2d numpy.ndarray of real weights. Must be the same shape as data. filter_centers: array-like if not 2dfilter: 1d np.ndarray or list or tuple of floats specifying centers of rectangular fourier regions to filter. If 2dfilter: should be a 2-list or 2-tuple. Each element should be a list or tuple or np.ndarray of floats that include centers of rectangular regions to filter. filter_half_widths: array-like if not 2dfilter: 1d np.ndarray or list of tuples of floats specifying the half-widths of rectangular fourier regions to filter. if 2dfilter: should be a 2-list or 2-tuple. Each element should be a list or tuple or np.ndarray of floats that include centers of rectangular bins. filter_dims, int or 2-list/tuple, optional specify dimension to filter. valid options are [0, 1, (1, 0)] skip_wgt: skips filtering rows with very low total weight (unflagged fraction ~< skip_wgt). Model is left as 0s, residual is left as data, and info is {'skipped': True} for that time. Only works properly when all weights are all between 0 and 1. mode: string specify filtering mode. Currently supported are 'clean', iterative clean 'dpss_leastsq', dpss fitting using scipy.optimize.lsq_linear 'dft_leastsq', dft fitting using scipy.optimize.lsq_linear 'dpss_matrix', dpss fitting using direct lin-lsq matrix computation. Slower then lsq but provides linear operator that can be used to propagate statistics and the matrix is cached so on average, can be faster for data with many similar flagging patterns. 'dft_matrix', dft fitting using direct lin-lsq matrix computation. Slower then lsq but provides linear operator that can be used to propagate statistics and the matrix is cached so on average, can be faster for data with many similar flagging patterns. !!!WARNING: In my experience, 'dft_matrix' option is numerical unstable.!!! 'dpss_matrix' works much better. 'dayenu', apply dayenu filter to data. Does not deconvolve subtracted foregrounds. 'dayenu_dft_leastsq', apply dayenu filter to data and deconvolve subtracted foregrounds using 'dft_leastsq' method (see above). 'dayenu_dpss_leastsq', apply dayenu filter to data and deconvolve subtracted foregrounds using 'dpss_leastsq' method (see above) 'dayenu_dft_matrix', apply dayenu filter to data and deconvolve subtracted foregrounds using 'dft_matrix' mode (see above). !!!WARNING: dft_matrix mode is often numerically unstable. I don't recommend it! 'dayenu_dpss_matrix', apply dayenu filter to data and deconvolve subtracted foregrounds using 'dpss_matrix' method (see above) 'dayenu_clean', apply dayenu filter to data. Deconvolve subtracted foregrounds with 'clean'. zero_residual_flags : bool, optional. If true, set flagged channels in the residual equal to zero. Default is True. filter_kwargs: additional arguments that are parsed as a dictionary dictionary with options for fitting techniques. if filter2d is true, this should be a 2-tuple or 2-list of dictionaries. The dictionary for each dimension must specify the following for each fitting method. If mode=='dayenu', the user does not need to provide this argument. * dft : fundamental_period : float or 2-tuple The fundamental_period of dft modes to fit. This is the Fourier resolution of fitted fourier modes equal to 1/FP where FP is the fundamental period. For a standard delay DFT FP = B where B is the visibility bandwidth FP also sets the number of modes fit within each window in 'filter_half_widths' will equal fw / fundamental_period where fw is the filter width. if filter2d, must provide a 2-tuple with fundamental_period of each dimension. suppression_factors : array-like if len(filter_dims) == 1: 1d np.ndarray or list of tuples of floats specifying the fractional residuals of model to leave in the data. For example, 1e-6 means that the filter will leave in 1e-6 of data fitted by the model. if len(filter_dims) == 2: should be a 2-list or 2-tuple. Each element should be a list or tuple or np.ndarray of floats that include centers of rectangular bins. max_contiguous_edge_flags : int, optional if the number of contiguous samples at the edge is greater then this at either side, skip. * dayenu : cache : dict, optional dictionary for caching fitting matrices. max_contiguous_edge_flags : int, optional if the number of contiguous samples at the edge is greater then this at either side, skip. * dpss : eigenval_cutoff : array-like list of sinc_matrix eigenvalue cutoffs to use for included dpss modes. if 2d filter, provide 2-tuple/list suppression_factors : array-like if not 2dfilter: 1d np.ndarray or list of tuples of floats specifying the fractional residuals of model to leave in the data. For example, 1e-6 means that the filter will leave in 1e-6 of data fitted by the model. if 2dfilter: should be a 2-list or 2-tuple. Each element should be a list or tuple or np.ndarray of floats that include centers of rectangular bins. If mode == 'clean', this option can be left unspecified.' max_contiguous_edge_flags : int, optional if the number of contiguous samples at the edge is greater then this at either side, skip. cache : dict, optional dictionary for caching fitting matrices. * clean : defaults can be accessed in dspec.CLEAN_DEFAULTS tol : float, clean tolerance. 1e-9 is standard. maxiter : int maximum number of clean iterations. 100 is standard. filt2d_mode : string if 'rect', clean withing a rectangular region of Fourier space given by the intersection of each set of windows. if 'plus' only clean the plus-shaped shape along zero-delay and fringe rate. edgecut_low : int, number of bins to consider zero-padded at low-side of the FFT axis, such that the windowing function smoothly approaches zero. For 2D cleaning, can be fed as a tuple specifying edgecut_low for first and second FFT axis. edgecut_hi : int, number of bins to consider zero-padded at high-side of the FFT axis, such that the windowing function smoothly approaches zero. For 2D cleaning, can be fed as a tuple specifying edgecut_hi for first and second FFT axis. add_clean_residual : bool, if True, adds the CLEAN residual within the CLEAN bounds in fourier space to the CLEAN model. Note that the residual actually returned is not the CLEAN residual, but the residual in input data space. window : window function for filtering applied to the filtered axis. See dspec.gen_window for options. If clean2D, can be fed as a list specifying the window for each axis in data. gain : The fraction of a residual used in each iteration. If this is too low, clean takes unnecessarily long. If it is too high, clean does a poor job of deconvolving. alpha : float, if window is 'tukey', this is its alpha parameter. Returns --------- d_mdl: array-like model -- best fit real-space model of data. d_res: array-like residual -- difference of data and model, nulled at flagged channels info: dictionary with meta data on run and provided arguments. clean uses a different info dict structure because of downstream code assumptions that are not sufficiently general to describe the other methods. We should eventually migrate clean assumptions to this format. * 'status': dict holding two sub-dicts status of filtering on each time/frequency step. - 'axis_0'/'axis_1': dict holding the status of time filtering for each time/freq step. Keys are integer index of each step and values are a string that is either 'success' or 'skipped'. * 'filter_params': dict holding the filtering parameters for each axis with the following sub-dicts. - 'axis_0'/'axis_1': dict holding filtering parameters for filtering over each respective axis. - 'mode': the filtering mode used to filter the time axis ('dayenu', 'dpss_leastsq' 'dpss_method') - 'basis': (if using dpss/dft) gives the filtering basis. - 'filter_centers': centers of filtering windows. - 'filter_half_widths': half-widths of filtering regions for each axis. - 'suppression_factors': amount of suppression for each filtering region. - 'basis_options': the basis options used for dpss/dft mode. See dft_operator and dpss_operator for more details. - 'x': vector of x-values used to generate the filter. * 'clean_status': if CLEAN mode is used, this is also a field. - 'axis_0'/'axis_1': dictionary holding the clean output for cleaning on each axis. keys are integrations cleaned (integer) and values for each key are the status dictionaries returned by aipy.deconv.clean (see aipy.deconv.clean for more information). ''' if not isinstance(filter_dims, (list, tuple)): filter_dims = [filter_dims] for d in filter_dims: if not d in [0, 1, -1]: raise ValueError("filter_dims can either contain 0, 1, or -1.") supported_modes=['clean', 'dft_leastsq', 'dpss_leastsq', 'dft_matrix', 'dpss_matrix', 'dayenu', 'dayenu_dft_leastsq', 'dayenu_dpss_leastsq', 'dayenu_dpss_matrix', 'dayenu_dft_matrix', 'dayenu_clean'] if not mode in supported_modes: raise ValueError("Need to supply a mode in supported modes:%s"%(str(supported_modes))) mode = mode.split('_') ndim_data = data.ndim ndim_wgts = wgts.ndim if not (ndim_data == 1 or ndim_data == 2): raise ValueError("data must be a 1D or 2D ndarray") if not ndim_wgts == ndim_data: raise ValueError("Number of dimensions in weights, %d does not equal number of dimensions in data, %d!"%(ndim_wgts, ndim_data)) #The core code of this method will always assume 2d data if ndim_data == 1: data = np.asarray([data]) wgts = np.asarray([wgts]) if len(filter_dims) == 2: filter2d = True elif len(filter_dims) == 1: filter2d = False else: raise ValueError("filter_dims either have length 1 or length 2") if 0 in filter_dims and not filter2d: # If we are only filtering along the time-axis # then we set data and weights equal to their transposes # and proceed to filter as though we are filtering across # the frequency axis. # the transposes are undone below after filtering is complete data = data.T wgts = wgts.T if 'cache' not in filter_kwargs: cache = {} else: cache = filter_kwargs.pop('cache') # process filter_kwargs if 'dayenu' == mode[0]: if len(mode) > 1: if 'dft' in mode: if filter2d: defaults = {**DAYENU_DEFAULTS_2D, **DFT_DEFAULTS_2D} else: defaults = {**DAYENU_DEFAULTS_1D, **DFT_DEFAULTS_1D} elif 'dpss' in mode: if filter2d: defaults = {**DAYENU_DEFAULTS_2D, **DPSS_DEFAULTS_2D} else: defaults = {**DAYENU_DEFAULTS_1D, **DPSS_DEFAULTS_1D} else: if filter2d: defaults = DAYENU_DEFAULTS_2D else: defaults = DAYENU_DEFAULTS_1D elif mode[0] == 'dft': if filter2d: defaults = DFT_DEFAULTS_2D else: defaults = DFT_DEFAULTS_1D elif mode[0] == 'dpss': if filter2d: defaults = DPSS_DEFAULTS_2D else: defaults = DPSS_DEFAULTS_1D elif mode[0] == 'clean': if filter2d: defaults = CLEAN_DEFAULTS_2D else: defaults = CLEAN_DEFAULTS_1D _process_filter_kwargs(filter_kwargs, defaults) if 'dft' in mode: fp = np.asarray(filter_kwargs['fundamental_period']).flatten() if filter2d: for m in range(len(fp)): if np.isnan(fp[m]): fp[m] = 2. * (x[m].max() - x[m].min()) else: if np.isnan(fp[0]): fp = [2. * (x.max() - x.min())] if len(fp) == 1: filter_kwargs['fundamental_period'] = fp[0] else: filter_kwargs['fundamental_period'] = list(fp) if mode[0] == 'dayenu': if zero_residual_flags is None: zero_residual_flags = True if filter2d: filter_dims_d = [1, 0] else: # If filter_dimes = [0], then the data and wgts have already been transposed # so that the 1d filtering is executed as though we are filtering in frequency # the transposes are undone below (after filtering) filter_dims_d = [1] suppression_factors = filter_kwargs.pop('suppression_factors') max_contiguous_edge_flags = filter_kwargs.pop('max_contiguous_edge_flags') residual, info = dayenu_filter(x=x, data=data, wgts=wgts, filter_dimensions=filter_dims_d, filter_centers=filter_centers, filter_half_widths=filter_half_widths, filter_factors=suppression_factors, cache=cache, skip_wgt=skip_wgt, max_contiguous_edge_flags=max_contiguous_edge_flags, zero_residual_flags=zero_residual_flags) model = data - residual if len(mode) > 1: model, _, info_deconv = _fit_basis_2d(x=x, data=model, filter_centers=filter_centers, filter_dims=filter_dims_d, skip_wgt=skip_wgt, basis=mode[1], method=mode[2], wgts=wgts, basis_options=filter_kwargs, filter_half_widths=filter_half_widths, suppression_factors=suppression_factors, cache=cache, max_contiguous_edge_flags=max_contiguous_edge_flags, zero_residual_flags=zero_residual_flags) info['info_deconv']=info_deconv elif mode[0] in ['dft', 'dpss']: if zero_residual_flags is None: zero_residual_flags = True if filter2d: filter_dims_d = [1, 0] else: # If filter_dimes = [0], then the data and wgts have already been transposed # so that the 1d filtering is executed as though we are filtering in frequency # the transposes are undone below (after filtering) filter_dims_d = [1] suppression_factors = filter_kwargs.pop('suppression_factors') max_contiguous_edge_flags = filter_kwargs.pop('max_contiguous_edge_flags') #if filter2d is True, create fitting_options that is a 2-list for 0 and 1 dimension model, residual, info = _fit_basis_2d(x=x, data=data, filter_centers=filter_centers, filter_dims=filter_dims_d, skip_wgt=skip_wgt, basis=mode[0], method=mode[1], wgts=wgts, basis_options=filter_kwargs, filter_half_widths=filter_half_widths, suppression_factors=suppression_factors, cache=cache, max_contiguous_edge_flags=max_contiguous_edge_flags, zero_residual_flags=zero_residual_flags) elif mode[0] == 'clean': if zero_residual_flags is None: zero_residual_flags = False model, residual, info = _clean_filter(x=x, data=data, wgts=wgts, filter_centers=filter_centers, skip_wgt=skip_wgt, filter_half_widths=filter_half_widths, clean2d=filter2d, zero_residual_flags=zero_residual_flags, **filter_kwargs) if filter2d: info['filter_params']['axis_0'] = filter_kwargs info['filter_params']['axis_1'] = info['filter_params']['axis_0'] else: info['filter_params']['axis_1'] = filter_kwargs if 0 in filter_dims and not filter2d: # undo transposes if we were performing a dimension 0 # time filter. model = model.T residual = residual.T data = data.T wgts = wgts.T # switch axis 0 and axis 1 info dicts if we were doing time cleaning. for k in info: if not k == 'info_deconv': info[k]['axis_0'] = copy.deepcopy(info[k]['axis_1']) info[k]['axis_1'] = {} # if we deconvolve the subtracted foregrounds in dayenu # then provide fitting options for the deconvolution. if 'info_deconv' in info: for k in info['info_deconv']: info['info_deconv'][k]['axis_0'] = copy.deepcopy(info['info_deconv'][k]['axis_1']) info['info_deconv'][k]['axis_1'] = {} if ndim_data == 1: model = model.flatten() residual = residual.flatten() return model, residual, info def vis_clean(data, wgts, filter_size, real_delta, clean2d=False, tol=1e-9, window='none', skip_wgt=0.1, maxiter=100, gain=0.1, filt2d_mode='rect', alpha=0.5, edgecut_low=0, edgecut_hi=0, add_clean_residual=False): '''Apply a highpass fourier filter to data. Uses aipy.deconv.clean. Default is a 1D clean on the last axis of data. Arguments: data: 1D or 2D (real or complex) numpy array to be filtered. (Unlike previous versions, it is NOT assumed that weights have already been multiplied into the data.) wgts: real numpy array of linear multiplicative weights with the same shape as the data. filter_size: the half-width (i.e. the width of the positive part) of the region in fourier space, symmetric about 0, that is filtered out. In units of 1/[real_delta]. Alternatively, can be fed as len-2 tuple specifying the absolute value of the negative and positive bound of the filter in fourier space respectively. Example: (20, 40) --> (-20 < tau < 40) real_delta: the bin width in real space of the dimension to be filtered. If 2D cleaning, then real_delta must also be a len-2 list. clean2d : bool, if True perform 2D clean, else perform a 1D clean on last axis. tol: CLEAN algorithm convergence tolerance (see aipy.deconv.clean) window: window function for filtering applied to the filtered axis. See dspec.gen_window for options. If clean2D, can be fed as a list specifying the window for each axis in data. skip_wgt: skips filtering rows with very low total weight (unflagged fraction ~< skip_wgt). Model is left as 0s, residual is left as data, and info is {'skipped': True} for that time. Only works properly when all weights are all between 0 and 1. maxiter: Maximum number of iterations for aipy.deconv.clean to converge. gain: The fraction of a residual used in each iteration. If this is too low, clean takes unnecessarily long. If it is too high, clean does a poor job of deconvolving. alpha : float, if window is 'tukey', this is its alpha parameter. filt2d_mode : str, only applies if clean2d == True. options = ['rect', 'plus'] If 'rect', a 2D rectangular filter is constructed in fourier space (default). If 'plus', the 'rect' filter is first constructed, but only the plus-shaped slice along 0 delay and fringe-rate is kept. edgecut_low : int, number of bins to consider zero-padded at low-side of the FFT axis, such that the windowing function smoothly approaches zero. For 2D cleaning, can be fed as a tuple specifying edgecut_low for first and second FFT axis. edgecut_hi : int, number of bins to consider zero-padded at high-side of the FFT axis, such that the windowing function smoothly approaches zero. For 2D cleaning, can be fed as a tuple specifying edgecut_hi for first and second FFT axis. add_clean_residual : bool, if True, adds the CLEAN residual within the CLEAN bounds in fourier space to the CLEAN model. Note that the residual actually returned is not the CLEAN residual, but the residual in input data space. Returns: d_mdl: CLEAN model -- best fit low-pass filter components (CLEAN model) in real space d_res: CLEAN residual -- difference of data and d_mdl, nulled at flagged channels info: dictionary (1D case) or list of dictionaries (2D case) with CLEAN metadata ''' if clean2d: filter_dims = [1, 0] if not isinstance(real_delta, (list, tuple)) or not len(real_delta) == 2: raise ValueError("real_delta must be length 2 list or tuple if clean2d is True.") x = [np.arange(data.shape[0]) * real_delta[0], np.arange(data.shape[1]) * real_delta[1]] fc = [] fw = [] for m in range(2): if isinstance(filter_size[m], (list, tuple)): fc.append([(filter_size[m][1] - filter_size[m][0]) / 2.]) fw.append([np.abs(filter_size[m][1] + filter_size[m][0]) / 2.]) else: fc.append([0.]) fw.append([filter_size[m]]) edgecut_hi = (edgecut_hi, edgecut_hi) edgecut_low = (edgecut_low, edgecut_low) window = (window, window) else: filter_dims = 1 x = np.arange(data.shape[-1]) * real_delta if isinstance(filter_size, (list, tuple)): fc = [(filter_size[1] - filter_size[0]) / 2.] fw = [np.abs(filter_size[1] + filter_size[0]) / 2.] else: fc = [0.] fw = [filter_size] return fourier_filter(x, data, wgts, filter_centers=fc, filter_half_widths=fw, mode='clean', filter_dims=filter_dims, tol=tol, window=window, maxiter=maxiter, gain=gain, filt2d_mode=filt2d_mode, alpha=alpha, edgecut_hi=edgecut_hi, edgecut_low=edgecut_low, add_clean_residual=add_clean_residual) #TODO: Add DPSS interpolation function to this. def high_pass_fourier_filter(data, wgts, filter_size, real_delta, clean2d=False, tol=1e-9, window='none', skip_wgt=0.1, maxiter=100, gain=0.1, filt2d_mode='rect', alpha=0.5, edgecut_low=0, edgecut_hi=0, add_clean_residual=False): ''' wrapper for vis_clean to support backwards compatibility. See vis_clean docstring. ''' return vis_clean(data=data, wgts=wgts, filter_size=filter_size, real_delta=real_delta, clean2d=clean2d, tol=tol, window=window, skip_wgt=skip_wgt, maxiter=maxiter, gain=gain, filt2d_mode=filt2d_mode, alpha=alpha, edgecut_low=edgecut_low, edgecut_hi=edgecut_hi, add_clean_residual=add_clean_residual) def dayenu_filter(x, data, wgts, filter_dimensions, filter_centers, filter_half_widths, filter_factors, cache = {}, return_matrices=True, hash_decimal=10, skip_wgt=0.1, max_contiguous_edge_flags=10, zero_residual_flags=True): ''' Apply a linear delay filter to waterfall data. Due to performance reasons, linear filtering only supports separable delay/fringe-rate filters. Arguments --------- x: array-like or length-2 list/tuples that are array-like x-values for each data point in dimension to be filtered. data: 1D or 2D (real or complex) numpy array where last dimension is frequency. Does not assume that weights have already been multiplied! wgts: real numpy array of linear multiplicative weights with the same shape as the data. filter_dimensions: list list of integers indicating data dimensions to filter. Must be 0, 1, or -1 filter_centers: float, list, or 1d numpy array of delays at which to center filter windows Typically in units of (seconds) filter_half_widths: float, list, or 1d numpy array of half-widths of each delay filtere window with centers specified by filter_centers. Typically in units of (seconds) filter_factors: float, list, or 1d numpy array of factors by which filtering should be applied within each filter window specified in filter_centers and filter_half_widths. If a float or length-1 list/ndarray is provided, the same filter factor will be used in every filter window. cache: optional dictionary for storing pre-computed delay filter matrices. return_matrices: bool, if True, return a dict referencing every every filtering matrix used. hash_decimal: number of decimals to hash x to skip_wgt: skips filtering rows with very low total weight (unflagged fraction ~< skip_wgt). Model is left as 0s, residual is left as data, and info is {'skipped': True} for that time. Only works properly when all weights are all between 0 and 1. max_contiguous_edge_flags : int, optional if the number of contiguous samples at the edge is greater then this at either side, skip . zero_residual_flags : bool, optional. If true, set flagged channels in the residual equal to zero. Default is True. Returns ------- data: array, 2d clean residual with data filtered along the frequency direction. info: dictionary with filtering parameters and a list of skipped_times and skipped_channels has the following fields * 'status': dict holding two sub-dicts status of filtering on each time/frequency step. - 'axis_0'/'axis_1': dict holding the status of time filtering for each time/freq step. Keys are integer index of each step and values are a string that is either 'success' or 'skipped'. * 'filter_params': dict holding the filtering parameters for each axis with the following sub-dicts. - 'axis_0'/'axis_1': dict holding filtering parameters for filtering over each respective axis. - 'filter_centers': centers of filtering windows. - 'filter_half_widths': half-widths of filtering regions for each axis. - 'suppression_factors': amount of suppression for each filtering region. - 'x': vector of x-values used to generate the filter. ''' # check that data and weight shapes are consistent. d_shape = data.shape w_shape = wgts.shape d_dim = data.ndim w_dim = wgts.ndim if not (d_dim == 1 or d_dim == 2): raise ValueError("number of dimensions in data array does not " "equal 1 or 2! data dim = %d"%(d_dim)) if not (w_dim == 1 or w_dim == 2): raise ValueError("number of dimensions in wgts array does not " "equal 1 or 2! wght dim = %d"%(w_dim)) if not w_dim == d_dim: raise ValueError("number of dimensions in data array does not equal " "number of dimensions in weights array." "data.dim == %d, wgts.dim == %d"%(d_dim, w_dim)) for dim in range(d_dim): if not d_shape[dim] == w_shape[dim]: raise ValueError("number of elements along data dimension %d, nel=%d" "does not equal the number of elements along weight" "dimension %d, nel = %d"%(dim, d_shape[dim], dim, w_shape[dim])) if not isinstance(x, (np.ndarray,list, tuple)): raise ValueError("x must be a numpy array, list, or tuple") # Check that inputs are tiples or lists if not isinstance(filter_dimensions, (list,tuple,int, np.int)): raise ValueError("filter_dimensions must be a list or tuple") # if filter_dimensions are supplied as a single integer, convert to list (core code assumes lists). if isinstance(filter_dimensions, int): filter_dimensions = [filter_dimensions] # check that filter_dimensions is no longer then 2 elements if not len(filter_dimensions) in [1, 2]: raise ValueError("length of filter_dimensions cannot exceed 2") # make sure filter_dimensions are 0 or 1. for dim in filter_dimensions: if not dim in [0, 1] or not isinstance(dim, (int, np.int)): raise ValueError("filter dimension must be integer 0, or 1") # convert filter dimensions to a list of integers (incase the dimensions were supplied as floats) # will only filter each dim a single time. # now check validity of other inputs. We perform the same check over multiple # inputs by iterating over a list with their names. #convert 1d data to 2d data to save lines of code. if d_dim == 1: data = np.asarray([data]) wgts = np.asarray([wgts]) data_1d = True # 1d data will result in nonsensical filtering along zeroth axis. filter_dimensions=[1] else: data_1d = False nchan = data.shape[1] ntimes = data.shape[0] check_vars = [filter_centers, filter_half_widths, filter_factors] check_names = ['filter_centers', 'filter_half_widths', 'filter_factors'] for anum, aname, avar in zip(range(len(check_vars)),check_names,check_vars): # If any of these inputs is a float or numpy array, convert to a list. if isinstance(avar, np.ndarray): check_vars[anum] = list(avar) elif isinstance(avar, np.float): check_vars[anum] = [avar] filter_centers,filter_half_widths,filter_factors = check_vars # Next, perform some checks that depend on the filtering dimensions provided. if 0 in filter_dimensions and 1 in filter_dimensions: for avar,aname in zip(check_vars,check_names): err_msg = "2d clean specified! %s must be a length-2 list of lists for 2d clean"%aname # if we are going to filter in dimension 1 and 0, make sure that each input # listed in check_vars is a length-2 list of lists. if len(avar) == 2: if not (isinstance(avar[0], list) and isinstance(avar[1], list)): raise ValueError(err_msg) else: raise ValueError(err_msg) if not len(x) == 2: raise ValueError("For 2d filtering, x must be 2d long list or tuple or ndarray") for j in range(2): if not isinstance(x[j], (tuple, list, np.ndarray)): raise ValueError("x[%d] must be a tuple, list or numpy array."%(j)) x[j]=np.asarray(x[j]) for ff_num,ff_list in zip(filter_dimensions,filter_factors): # we allow the user to provide a single filter factor for multiple # filtering windows on a single dimension. This code # iterates through each dimension and if a single filter_factor is provided # it converts the filter_factor list to a list of filter_factors with the same # length as filter_centers. if len(ff_list) == 1: ff_list = [ff_list[0] for m in range(len(filter_centers[ff_num]))] else: if len(filter_factors) == 1: # extend filter factor list of user supplied a float or len-1 list. filter_factors = [filter_factors[0] for m in range(len(filter_centers))] if 0 in filter_dimensions: # convert 1d input-lists to # a list of lists for core-code to operate on. filter_factors = [filter_factors,[]] filter_centers = [filter_centers,[]] filter_half_widths = [filter_half_widths,[]] x = [x,None] elif 1 in filter_dimensions: # convert 1d input-lists to # a list of lists for core-code to operate on. filter_factors = [[],filter_factors] filter_centers = [[],filter_centers] filter_half_widths = [[],filter_half_widths] x = [None, x] check_vars = [filter_centers, filter_half_widths, filter_factors] # Now check that the number of filter factors = number of filter widths # = number of filter centers for each dimension. for fs in filter_dimensions: for aname1,avar1 in zip(check_names,check_vars): for aname2,avar2 in zip(check_names,check_vars): if not len(avar1[fs]) == len(avar2[fs]): raise ValueError("Number of elements in %s-%d must equal the" " number of elements %s-%d!"%(aname1, fs, aname2, fs)) info = {'status':{'axis_0':{}, 'axis_1':{}}, 'filter_params':{'axis_0':{}, 'axis_1':{}}} for fs in range(2): info['filter_params']['axis_%d'%fs]['filter_centers'] = filter_centers[fs] info['filter_params']['axis_%d'%fs]['filter_half_widths'] = filter_half_widths[fs] info['filter_params']['axis_%d'%fs]['filter_factors'] = filter_factors[fs] info['filter_params']['axis_%d'%fs]['x'] = x[fs] info['filter_params']['axis_%d'%fs]['mode'] = 'dayenu' skipped = [[],[]] # in the lines below, we iterate over the time dimension. For each time, we # compute a lazy covariance matrix (filter_mat) from the weights (wght) and # a sinc downweight matrix. (dayenu_mat_inv). We then attempt to # take the psuedo inverse to get a filtering matrix that removes foregrounds. # we do this for the zeroth and first filter dimension. output = copy.deepcopy(data) #this loop iterates through dimensions to iterate over (fs is the non-filter #axis). filter_matrices=[{},{}] #check filter factors for zeros and negative numbers for ff in filter_factors: for fv in ff: if fv <= 0.: raise ValueError("All filter factors must be greater than zero! You provided %.2e :(!"%(fv)) for fs in filter_dimensions: if fs == 0: _d, _w = output.T, wgts.T else: _d, _w = output, wgts #if the axis orthogonal to the iteration axis is to be filtered, then #filter it!. for sample_num, sample, wght in zip(range(data.shape[fs-1]), _d, _w): filter_key = _fourier_filter_hash(filter_centers=filter_centers[fs], filter_half_widths=filter_half_widths[fs], filter_factors=filter_factors[fs], x=x[fs], w=wght, label='dayenu_filter_matrix') if not filter_key in cache: #only calculate filter matrix and psuedo-inverse explicitly if they are not already cached #(saves calculation time). if np.count_nonzero(wght) / len(wght) >= skip_wgt and np.count_nonzero(wght[:max_contiguous_edge_flags]) > 0 \ and np.count_nonzero(wght[-max_contiguous_edge_flags:]) >0: wght_mat = np.outer(wght.T, wght) filter_mat = dayenu_mat_inv(x=x[fs], filter_centers=filter_centers[fs], filter_half_widths=filter_half_widths[fs], filter_factors=filter_factors[fs], cache=cache) * wght_mat try: #Try taking psuedo-inverse. Occasionally I've encountered SVD errors #when a lot of channels are flagged. Interestingly enough, I haven't #I'm not sure what the precise conditions for the error are but #I'm catching it here. cache[filter_key] = np.linalg.pinv(filter_mat) except np.linalg.LinAlgError: # skip if we can't invert or psuedo-invert the matrix. cache[filter_key] = None else: # skip if we don't meet skip_wegith criterion or continuous edge flags # are to many. This last item isn't really a problem for dayenu # but it's here for consistancy. cache[filter_key] = None filter_mat = cache[filter_key] if filter_mat is not None: if fs == 0: output[:, sample_num] = np.dot(filter_mat, sample) elif fs == 1: output[sample_num] = np.dot(filter_mat, sample) info['status']['axis_%d'%fs][sample_num] = 'success' else: skipped[fs-1].append(sample_num) info['status']['axis_%d'%fs][sample_num] = 'skipped' if return_matrices: filter_matrices[fs][sample_num]=filter_mat if zero_residual_flags: output = output * (~np.isclose(wgts, 0., atol=1e-10)).astype(float) # set residual equal to zero where weights are zero. #1d data will only be filtered across "channels". if data_1d and ntimes == 1: output = output[0] return output, info def wedge_filter(data, wgts, bl_len, sdf, standoff=0., horizon=1., min_dly=0.0, skip_wgt=0.5, mode='clean', **kwargs): '''Apply a wideband delay filter to data. Variable names preserved for backward compatability with capo/PAPER analysis. Arguments: data: 1D or 2D (real or complex) numpy array where last dimension is frequency. (Unlike previous versions, it is NOT assumed that weights have already been multiplied into the data.) wgts: real numpy array of linear multiplicative weights with the same shape as the data. bl_len: length of baseline (in 1/[sdf], typically ns) sdf: frequency channel width (typically in GHz) standoff: fixed additional delay beyond the horizon (same units as bl_len) horizon: proportionality constant for bl_len where 1 is the horizon (full light travel time) min_dly: a minimum delay used for cleaning: if bl_dly < min_dly, use min_dly. same units as bl_len skip_wgt: skips filtering rows with very low total weight (unflagged fraction ~< skip_wgt). Model is left as 0s, residual is left as data, and info is {'skipped': True} for that time. Only works properly when all weights are all between 0 and 1. mode: filtering mode (see supported modes in fourier_filter docstring) kwargs: see fourier_filter documentation Returns: d_mdl: CLEAN model -- best fit low-pass filter components (CLEAN model) in real space d_res: CLEAN residual -- difference of data and d_mdl, nulled at flagged channels info: dictionary (1D case) or list of dictionaries (2D case) with CLEAN metadata ''' # get bl delay bl_dly = _get_bl_dly(bl_len, horizon=horizon, standoff=standoff, min_dly=min_dly) return delay_filter(sdf=sdf, data=data, wgts=wgts, max_dly=bl_dly, skip_wgt=skip_wgt, **kwargs) def delay_filter(data, wgts, max_dly, sdf, skip_wgt=0.5, mode='clean', **kwargs): '''Apply a wideband delay filter to data. Variable names preserved for backward compatability with capo/PAPER analysis. Arguments: data: 1D or 2D (real or complex) numpy array where last dimension is frequency. (Unlike previous versions, it is NOT assumed that weights have already been multiplied into the data.) wgts: real numpy array of linear multiplicative weights with the same shape as the data. max_dly: maximum abs of delay to filter to (around delay = 0.) sdf: frequency channel width (typically in GHz) skip_wgt: skips filtering rows with very low total weight (unflagged fraction ~< skip_wgt). Model is left as 0s, residual is left as data, and info is {'skipped': True} for that time. Only works properly when all weights are all between 0 and 1. mode: filtering mode (see supported modes in fourier_filter docstring) kwargs: see fourier_filter documentation Returns: d_mdl: CLEAN model -- best fit low-pass filter components (CLEAN model) in real space d_res: CLEAN residual -- difference of data and d_mdl, nulled at flagged channels info: dictionary (1D case) or list of dictionaries (2D case) with CLEAN metadata ''' freqs = np.arange(data.shape[-1]) * sdf return fourier_filter(x=freqs, data=data, wgts=wgts, filter_centers=[0.], filter_half_widths=[max_dly], skip_wgt=skip_wgt, filter_dims=1, mode=mode, **kwargs) def fringe_filter(data, wgts, max_frate, dt, skip_wgt=0.5, mode='clean', **kwargs): """ Run a CLEAN deconvolution along the time axis. Args: data : 1D or 2D data array. If 2D, shape=(Ntimes, Nfreqs) wgts : 1D or 2D weight array. max_frate : float, maximum fringe-rate (i.e. frequency) to CLEAN, units of 1/[dt]. 2-tuple can be provided where (20, 20) would clean between -20 < fr < 20 dt : float, time-bin width of data skip_wgt: skips filtering rows with very low total weight (unflagged fraction ~< skip_wgt). Model is left as 0s, residual is left as data, and info is {'skipped': True} for that time. Only works properly when all weights are all between 0 and 1. mode: filtering mode (see supported modes in fourier_filter docstring) kwargs: see fourier_filter docstring. Returns: d_mdl: CLEAN model -- best fit low-pass filter components (CLEAN model) in real space d_res: CLEAN residual -- difference of data and d_mdl, nulled at flagged channels info: dictionary (1D case) or list of dictionaries (2D case) with CLEAN metadata """ # print deprecation warning times = np.arange(data.shape[0]) * dt # run fourier filter if isinstance(max_frate, (list,tuple)): fc = (max_frate[1] - max_frate[0]) / 2. fw = np.abs(max_frate[1] + max_frate[0]) / 2. else: fc = 0. fw = max_frate return fourier_filter(x=times, data=data, wgts=wgts, filter_centers=[fc], filter_half_widths=[fw], skip_wgt=skip_wgt, filter_dims=0, mode=mode, **kwargs) def vis_filter(data, wgts, max_frate=None, dt=None, bl_len=None, sdf=None, standoff=0.0, horizon=1., min_dly=0., tol=1e-4, window='none', maxiter=100, gain=1e-1, skip_wgt=0.5, filt2d_mode='rect', edgecut_low=0, edgecut_hi=0, alpha=0.5, add_clean_residual=False): """ A generalized interface to delay and/or fringe-rate 1D CLEAN functions, or a full 2D clean if both bl_len & sdf and max_frate & dt variables are specified. Args: data : 1D or 2D data array. If 2D has shape=(Ntimes, Nfreqs) wgts : float weight array, matching shape of data max_frate : float, maximum fringe-rate (i.e. frequency) to CLEAN, units of 1/[dt] dt : float, time-bin width [sec] bl_len: length of baseline (in 1/[sdf], typically ns) sdf: frequency channel width (typically in GHz) standoff: fixed additional delay beyond the horizon (same units as bl_len) horizon: proportionality constant for bl_len where 1 is the horizon (full light travel time) min_dly: a minimum delay used for cleaning: if bl_dly < min_dly, use min_dly. same units as bl_len tol: CLEAN algorithm convergence tolerance (see aipy.deconv.clean) window: window function for filtering applied to the filtered axis. See gen_window for options. skip_wgt: skips filtering rows with very low total weight (unflagged fraction ~< skip_wgt). Model is left as 0s, residual is left as data, and info is {'skipped': True} for that time. Only works properly when all weights are all between 0 and 1. maxiter: Maximum number of iterations for aipy.deconv.clean to converge. gain: The fraction of a residual used in each iteration. If this is too low, clean takes unnecessarily long. If it is too high, clean does a poor job of deconvolving. filt2d_mode : str, only applies if clean2d == True. options = ['rect', 'plus'] If 'rect', a 2D rectangular filter is constructed in fourier space (default). If 'plus', the 'rect' filter is first constructed, but only the plus-shaped slice along 0 delay and fringe-rate is kept. edgecut_low : int, number of bins to consider zero-padded at low-side of the FFT axis, such that the windowing function smoothly approaches zero. For 2D cleaning, can be fed as a tuple specifying edgecut_low for first and second FFT axis. edgecut_hi : int, number of bins to consider zero-padded at high-side of the FFT axis, such that the windowing function smoothly approaches zero. For 2D cleaning, can be fed as a tuple specifying edgecut_hi for first and second FFT axis. alpha : float, if window is tukey, this is its alpha parameter. add_clean_residual : bool, if True, adds the residual within the CLEAN bounds in fourier space to the CLEAN model (and sets residual within CLEAN bounds to zero). This is more in-line with a standard filtering operation, rather than a CLEAN operation. If False, residual is not added to the CLEAN model. Returns: d_mdl: CLEAN model -- best fit low-pass filter components (CLEAN model) in real space d_res: CLEAN residual -- difference of data and d_mdl, nulled at flagged channels info: dictionary (1D case) or list of dictionaries (2D case) with CLEAN metadata """ # print deprecation warning warn("Warning: dspec.vis_filter will soon be deprecated in favor of filtering.vis_filter", DeprecationWarning) # type checks timeclean = False if dt is not None or max_frate is not None: timeclean = True assert max_frate is not None and dt is not None, "Must specify both max_frate and dt for time cleaning" freqclean = False if sdf is not None or bl_len is not None: freqclean = True assert sdf is not None and bl_len is not None, "Must specify both bl_len and sdf for frequency cleaning" clean2d = timeclean and freqclean # 1D clean if not clean2d: # time clean if timeclean: mdl, res, info = fringe_filter(data, wgts, max_frate, dt=dt, tol=tol, window=window, edgecut_low=edgecut_low, edgecut_hi=edgecut_hi, skip_wgt=skip_wgt, maxiter=maxiter, gain=gain, alpha=alpha, mode='clean') # freq clean elif freqclean: mdl, res, info = wedge_filter(data, wgts, standoff=standoff, horizon=horizon, sdf=sdf, tol=tol, window=window, edgecut_low=edgecut_low, bl_len=bl_len, edgecut_hi=edgecut_hi, skip_wgt=skip_wgt, maxiter=maxiter, gain=gain, alpha=alpha, mode='clean') # 2D clean else: # get bl delay bl_dly = _get_bl_dly(bl_len, horizon=horizon, standoff=standoff, min_dly=min_dly) freqs = np.arange(data.shape[1]) * sdf times = np.arange(data.shape[0]) * dt if isinstance(max_frate, (list,tuple)): fc = (max_frate[1] - max_frate[0]) / 2. fw = np.abs(max_frate[1] + max_frate[0]) / 2. else: fc = 0. fw = max_frate # 2D clean if isinstance(edgecut_hi, (int, np.int)): edgecut_hi = (edgecut_hi, edgecut_hi) if isinstance(edgecut_low, (int, np.int)): edgecut_low = (edgecut_low, edgecut_low) if isinstance(window, str): window = (window, window) mdl, res, info = fourier_filter(data=data, wgts=wgts, x=[times, freqs], filter_centers=[[fc], [0.]], filter_half_widths=[[fw], [bl_dly]], filter_dims=[1, 0], tol=tol, window=window, edgecut_low=edgecut_low, edgecut_hi=edgecut_hi, maxiter=maxiter, gain=gain, filt2d_mode=filt2d_mode, alpha=alpha, add_clean_residual=add_clean_residual, mode='clean') return mdl, res, info def _get_bl_dly(bl_len, horizon=1., standoff=0., min_dly=0.): # construct baseline delay bl_dly = horizon * bl_len + standoff # check minimum delay bl_dly = np.max([bl_dly, min_dly]) return bl_dly def gen_window(window, N, alpha=0.5, edgecut_low=0, edgecut_hi=0, normalization=None, **kwargs): """ Generate a 1D window function of length N. Args: window : str, window function N : int, number of channels for windowing function. edgecut_low : int, number of bins to consider as zero-padded at the low-side of the array, such that the window smoothly connects to zero. edgecut_hi : int, number of bins to consider as zero-padded at the high-side of the array, such that the window smoothly connects to zero. alpha : if window is 'tukey', this is its alpha parameter. normalization : str, optional set to 'rms' to divide by rms and 'mean' to divide by mean. """ if normalization is not None: if normalization not in ["mean", "rms"]: raise ValueError("normalization must be one of ['rms', 'mean']") # parse multiple input window or special windows w = np.zeros(N, dtype=np.float) Ncut = edgecut_low + edgecut_hi if Ncut >= N: raise ValueError("Ncut >= N for edgecut_low {} and edgecut_hi {}".format(edgecut_low, edgecut_hi)) if edgecut_hi > 0: edgecut_hi = -edgecut_hi else: edgecut_hi = None if window in ['none', None, 'None', 'boxcar', 'tophat']: w[edgecut_low:edgecut_hi] = windows.boxcar(N - Ncut) elif window in ['blackmanharris', 'blackman-harris', 'bh', 'bh4']: w[edgecut_low:edgecut_hi] = windows.blackmanharris(N - Ncut) elif window in ['hanning', 'hann']: w[edgecut_low:edgecut_hi] = windows.hann(N - Ncut) elif window == 'tukey': w[edgecut_low:edgecut_hi] = windows.tukey(N - Ncut, alpha) elif window in ['blackmanharris-7term', 'blackman-harris-7term', 'bh7']: # https://ieeexplore.ieee.org/document/293419 a_k = [0.27105140069342, 0.43329793923448, 0.21812299954311, 0.06592544638803, 0.01081174209837, 0.00077658482522, 0.00001388721735] w[edgecut_low:edgecut_hi] = windows.general_cosine(N - Ncut, a_k, True) elif window in ['cosinesum-9term', 'cosinesum9term', 'cs9']: # https://ieeexplore.ieee.org/document/940309 a_k = [2.384331152777942e-1, 4.00554534864382e-1, 2.358242530472107e-1, 9.527918858383112e-2, 2.537395516617152e-2, 4.152432907505835e-3, 3.68560416329818e-4, 1.38435559391703e-5, 1.161808358932861e-7] w[edgecut_low:edgecut_hi] = windows.general_cosine(N - Ncut, a_k, True) elif window in ['cosinesum-11term', 'cosinesum11term', 'cs11']: # https://ieeexplore.ieee.org/document/940309 a_k = [2.151527506679809e-1, 3.731348357785249e-1, 2.424243358446660e-1, 1.166907592689211e-1, 4.077422105878731e-2, 1.000904500852923e-2, 1.639806917362033e-3, 1.651660820997142e-4, 8.884663168541479e-6, 1.938617116029048e-7, 8.482485599330470e-10] w[edgecut_low:edgecut_hi] = windows.general_cosine(N - Ncut, a_k, True) else: try: # return any single-arg window from windows w[edgecut_low:edgecut_hi] = getattr(windows, window)(N - Ncut) except AttributeError: raise ValueError("Didn't recognize window {}".format(window)) if normalization == 'rms': w /= np.sqrt(np.mean(np.abs(w)**2.)) if normalization == 'mean': w /= w.mean() return w def fourier_operator(dsize, nmax, nmin=None, L=None): """ Return a complex Fourier analysis operator for a given data dimension and number of Fourier modes. Parameters ---------- dsize : int Size of data array. nmax : int Maximum Fourier mode number. Modes will be constructed between [nmin, nmax], for a total of (nmax - min) + 1 modes. nmin : int, optional, default nmin = nmax minimum integer of fourier mode numbers. Modes will be constructed between [nmin, nmax] for total of (nmax - nmin) + 1 modes. L : int, optional, default = None fundamental period of Fourier modes to fit too. if none, default to ndata. Returns ------- F : array_like Fourier matrix operator, of shape (Nmodes, Ndata) """ nu = np.arange(dsize) if L is None: L = nu[-1] - nu[0] if nmin is None: nmin = -nmax # Construct frequency array (*not* in physical frequency units) # Build matrix operator for complex Fourier basis n = np.arange(nmin, nmax + 1) F = np.array([np.exp(-1.j * _n * nu / L) for _n in n]) return F def fourier_model(cn, Nfreqs): """ Calculate a 1D (complex) Fourier series model from a set of complex coefficients. Parameters ---------- coeffs : array_like Array of complex Fourier coefficients, ordered from (-n, n), where n is the highest harmonic mode in the model. Nfreqs : int Number of frequency channels to model. The Fourier modes are integer harmonics within this frequency window. Returns ------- model : array_like Fourier model constructed from the input harmonic coefficients. Shape: (Nfreqs,). """ try: cn_shape = cn.shape except AttributeError: raise ValueError("cn must be a 1D array") if len(cn.shape) != 1: raise ValueError("cn must be a 1D array") nmax = (cn.size - 1) // 2 # Max. harmonic # Build matrix operator for complex Fourier basis F = fourier_operator(dsize=Nfreqs, nmax=nmax) # Return model return np.dot(cn, F) def delay_filter_leastsq_1d(data, flags, sigma, nmax, add_noise=False, cn_guess=None, use_linear=True, operator=None, fundamental_period=None): """ Fit a smooth model to 1D complex-valued data with flags, using a linear least-squares solver. The model is a Fourier series up to a specified order. As well as calculating a best-fit model, this will also return a copy of the data with flagged regions filled in ('in-painted') with the smooth solution. Optionally, you can also add an uncorrelated noise realization on top of the smooth model in the flagged region. Parameters ---------- data : array_like, complex Complex visibility array as a function of frequency, with shape (Nfreqs,). flags : array_like, bool Boolean flags with the same shape as data. sigma : float or array_like Noise standard deviation, in the same units as the data. If float, assumed to be homogeneous in frequency. If array_like, must have the same shape as the data. Note that the choice of sigma will have some bearing on how sensitive the fits are to small-scale variations. nmax: int or 2-tuple of ints Max. order of Fourier modes to fit. A model with complex Fourier modes between [-n, n] will be fitted to the data, where the Fourier basis functions are ~ exp(-i 2 pi n nu / (Delta nu). If 2-tuple fit [-n0, n1]. add_noise : bool, optional Whether to add an unconstrained noise realization to the in-painted areas. This uses sigma to set the noise standard deviation. Default: False. cn_guess : array_like, optional Initial guess for the series coefficients. If None, zeros will be used. A sensible choice of cn_guess can speed up the solver significantly. Default: None. use_linear : bool, optional Whether to use a fast linear least-squares solver to fit the Fourier coefficients, or a slower generalized least-squares solver. Default: True. operator : array_like, optional Fourier basis operator matrix. This is used to pass in a pre-computed matrix operator when calling from other functions, e.g. from delay_filter_leastsq. Operator must have shape (Nmodes, Nfreq), where Nmodes = 2*nmax + 1. A complex Fourier basis will be automatically calculated if no operator is specified. fundamental_period : int, optional, default = None fundamental period of Fourier modes to fit too. if none, default to ndata. Returns ------- model : array_like Best-fit model, composed of a sum of Fourier modes. model_coeffs : array_like Coefficients of Fourier modes, ordered from modes [-nmax, +nmax]. data_out : array_like In-painted data. """ # Construct Fourier basis operator if not specified if isinstance(nmax, tuple) or isinstance(nmax, list): nmin = nmax[0] nmax = nmax[1] assert isinstance(nmin, int) and isinstance(nmax, int), "Provide integers for nmax and nmin" elif isinstance(nmax, int): nmin = -nmax if operator is None: F = fourier_operator(dsize=data.size, nmin = nmin, nmax=nmax, L=fundamental_period) else: F = operator cshape = nmax - nmin + 1 if F.shape[0] != cshape: raise ValueError("Fourier basis operator has the wrong shape. " "Must have shape (Nmodes, Nfreq).") # Turn flags into a mask w = np.logical_not(flags) # Define model and likelihood function def model(cn, F): return np.dot(cn, F) nmodes = nmax - nmin + 1 # Initial guess for Fourier coefficients (real + imaginary blocks) cn_in = np.zeros(2 * nmodes) if cn_guess is not None: if cn_in.size != 2 * cn_guess.size: raise ValueError("cn_guess must be of size %s" % (cn_in.size / 2)) cn_in[:cn_guess.shape[0]] = cn_guess.real cn_in[cn_guess.shape[0]:] = cn_guess.imag # Make sure sigma is the right size for matrix broadcasting if isinstance(sigma, np.ndarray): mat_sigma = np.tile(sigma, (nmodes, 1)).T else: mat_sigma = sigma # Run least-squares fit if use_linear: # Solve as linear system A = np.atleast_2d(w).T * F.T res = lsq_linear(A / mat_sigma ** 2., w * data / sigma ** 2.) cn_out = res.x else: # Use full non-linear leastsq fit def loglike(cn): """ Simple log-likelihood, assuming Gaussian data. Calculates: logL = -0.5 [w*(data - model)]^2 / sigma^2. """ # Need to do real and imaginary parts separately, otherwise # leastsq() fails _delta = w * (data - model(cn[:nmodes] + 1.j * cn[nmodes:], F)) delta = np.concatenate((_delta.real / sigma, _delta.imag / sigma)) return -0.5 * delta**2. # Do non-linear least-squares calculation cn, stat = leastsq(loglike, cn_in) cn_out = cn[:nmodes] + 1.j * cn[nmodes:] # Inject smooth best-fit model into masked areas bf_model = model(cn_out, F) data_out = data.copy() data_out[flags] = bf_model[flags] # Add noise to in-painted regions if requested if add_noise: noise = np.random.randn(np.sum(flags)) \ + 1.j * np.random.randn(np.sum(flags)) if isinstance(sigma, np.ndarray): data_out[flags] += sigma[flags] * noise else: data_out[flags] += sigma * noise # Return coefficients and best-fit model return bf_model, cn_out, data_out def delay_filter_leastsq(data, flags, sigma, nmax, add_noise=False, cn_guess=None, use_linear=True, operator=None, fundamental_period=None): """ Fit a smooth model to each 1D slice of 2D complex-valued data with flags, using a linear least-squares solver. The model is a Fourier series up to a specified order. As well as calculating a best-fit model, this will also return a copy of the data with flagged regions filled in ('in-painted') with the smooth solution. Optionally, you can also add an uncorrelated noise realization on top of the smooth model in the flagged region. N.B. This is just a wrapper around delay_filter_leastsq_1d() but with some time-saving precomputations. It fits to each 1D slice of the data individually, and does not perform a global fit to the 2D data. Parameters ---------- data : array_like, complex Complex visibility array as a function of frequency, with shape (Ntimes, Nfreqs). flags : array_like, bool Boolean flags with the same shape as data. sigma : float or array_like Noise standard deviation, in the same units as the data. If float, assumed to be homogeneous in frequency. If array_like, must have the same shape as the data. Note that the choice of sigma will have some bearing on how sensitive the fits are to small-scale variations. nmax: int Max. order of Fourier modes to fit. A model with complex Fourier modes between [-n, n] will be fitted to the data, where the Fourier basis functions are ~ exp(-i 2 pi n nu / (Delta nu). add_noise : bool, optional Whether to add an unconstrained noise realization to the in-painted areas. This uses sigma to set the noise standard deviation. Default: False. cn_guess : array_like, optional Initial guess for the series coefficients of the first row of the input data. If None, zeros will be used. Default: None. use_linear : bool, optional Whether to use a fast linear least-squares solver to fit the Fourier coefficients, or a slower generalized least-squares solver. Default: True. operator : array_like, optional Fourier basis operator matrix. Must have shape (Nmodes, Nfreq), where Nmodes = 2*nmax + 1. A complex Fourier basis will be used by default. fundamental_period : int, optional, default = None fundamental period of Fourier modes to fit too. if none, default to ndata. Returns ------- model : array_like Best-fit model, composed of a sum of Fourier modes. Same shape as the data. model_coeffs : array_like Coefficients of Fourier modes, ordered from modes [-n, +n]. data_out : array_like In-painted data. """ if isinstance(nmax, tuple) or isinstance(nmax, list): nmin = nmax[0] nmax = nmax[1] assert isinstance(nmin, int) and isinstance(nmax, int), "Provide integers for nmax and nmin" elif isinstance(nmax, int): nmin = -nmax # Construct and cache Fourier basis operator (for speed) if operator is None: F = fourier_operator(dsize=data.shape[1], nmax=nmax, nmin=nmin, L=fundamental_period) else: # delay_filter_leastsq_1d will check for correct dimensions F = operator nmodes = nmax - nmin + 1 # Array to store in-painted data inp_data = np.zeros(data.shape, dtype=np.complex) cn_array = np.zeros((data.shape[0], nmodes), dtype=np.complex) mdl_array = np.zeros(data.shape, dtype=np.complex) # Loop over array cn_out = None for i in range(data.shape[0]): bf_model, cn_out, data_out = delay_filter_leastsq_1d( data[i], flags[i], sigma=sigma, nmax=(nmin, nmax), add_noise=add_noise, use_linear=use_linear, cn_guess=cn_out, operator=F, fundamental_period=fundamental_period) inp_data[i, :] = data_out cn_array[i, :] = cn_out mdl_array[i, :] = bf_model return mdl_array, cn_array, inp_data def _fit_basis_1d(x, y, w, filter_centers, filter_half_widths, basis_options, suppression_factors=None, hash_decimal=10, method='leastsq', basis='dft', cache=None): """ A 1d linear-least-squares fitting function for computing models and residuals for fitting of the form y_model = A @ c where A is a design matrix encoding our choice for a basis functions and y_model is a fitted version of the data and c is a set of fitting coefficients determined by c = [A^T w A]^{-1} A^T w y where y is the original data and w is a diagonal matrix of weights for each channel in y. Currently supports fitting of dpss and dft modes. Parameters ---------- x: array-like x-axis of data to fit. y: array-like y-axis of data to fit. w: array-like data weights. filter_centers': array-like list of floats specifying the centers of fourier windows with which to fit signals filter_half_widths': array-like list of floats specifying the half-widths of fourier windows to model. suprression_factors: array-like, optional list of floats for each basis function denoting the fraction of of each basis element that should be present in the fitted model If none provided, model will include 100% of each mode. It is sometimes useful, for renormalization reversability to only include 1-\epsilon where \epsilon is a small number of each mode in the model. hash_decimal: number of decimals to round to for floating point keys. basis_options: dictionary basis specific options for fitting. The two bases currently supported are dft and dpss whose options are as follows: * 'dft': *'fundamental_period': float or 2-tuple The fundamental_period of dft modes to fit. This is the Fourier resoltion of fitted fourier modes equal to 1/FP where FP is the fundamental period. For a standard delay DFT FP = B where B is the visibility bandwidth FP also sets the number of modes fit within each window in 'filter_half_widths' will equal fw / fundamental_period where fw is the filter width. if filter2d, must provide a 2-tuple with fundamental_period of each dimension. * 'dpss': The basis_options must include one and only one of the four options for specifying how to terminate the dpss series in each filter window. *'eigenval_cutoff': array-like list of sinc_matrix eigenvalue cutoffs to use for included dpss modes. *'nterms': array-like list of integers specifying the order of the dpss sequence to use in each filter window. *'edge_supression': array-like specifies the degree of supression that must occur to tones at the filter edges to calculate the number of DPSS terms to fit in each sub-window. *'avg_suppression': list of floats, optional specifies the average degree of suppression of tones inside of the filter edges to calculate the number of DPSS terms. Similar to edge_supression but instead checks the suppression of a since vector with equal contributions from all tones inside of the filter width instead of a single tone. method: string specifies the fitting method to use. We currently support. *'leastsq' to perform iterative leastsquares fit to derive model. using scipy.optimize.leastsq *'matrix' derive model by directly calculate the fitting matrix [A^T W A]^{-1} A^T W and applying it to the y vector. Returns: model: array-like Ndata array of complex floats equal to interpolated model resid: array-like Ndata array of complex floats equal to y - model info: dictionary containing fitting arguments for reference. if 'matrix' method is used. Fields are * 'method' : method used to derive fits. * 'basis' : basis that the fits are in * 'filter_centers' : filtering centers argument * 'filter_half_widths' : filter_half_widths argument * 'suppression_factors' : suppression_factors argument * 'basis_options' : basis specific options dictionary see dpss_operator and dft_operator. * 'amat' : A matrix used for fitting. * 'fitting_matrix' : matrix used for fitting (A [ATA]^-1 AT) if the method == 'matrix'. """ if cache is None: cache = {} info = copy.deepcopy(basis_options) if basis.lower() == 'dft': amat = dft_operator(x, filter_centers=filter_centers, filter_half_widths=filter_half_widths, cache=cache, **basis_options) elif basis.lower() == 'dpss': amat, nterms = dpss_operator(x, filter_centers=filter_centers, filter_half_widths=filter_half_widths, cache=cache, **basis_options) info['nterms'] = nterms else: raise ValueError("Specify a fitting basis in supported bases: ['dft', 'dpss']") if suppression_factors is None: suppression_vector = np.ones(amat.shape[1]) else: if basis.lower() == 'dft': suppression_vector = np.hstack([1-sf * np.ones(2*int(np.ceil(fw * basis_options['fundamental_period'])))\ for sf,fw in zip(suppression_factors, filter_half_widths)]) elif basis.lower() == 'dpss': suppression_vector = np.hstack([1-sf * np.ones(nterm) for sf, nterm in zip(suppression_factors, nterms)]) info['method'] = method info['basis'] = basis info['filter_centers'] = filter_centers info['filter_half_widths'] = filter_half_widths info['suppression_factors'] = suppression_factors info['basis_options'] = basis_options info['amat'] = amat info['skipped'] = False wmat = np.diag(w) if method == 'leastsq': a = np.atleast_2d(w).T * amat try: res = lsq_linear(a, w * y) cn_out = res.x # np.linalg.LinAlgError catches "SVD did not converge." # which can happen if the solution is under-constrained. # also handle nans and infs in the data here too. except (np.linalg.LinAlgError, ValueError, TypeError) as err: warn(f"{err} -- recording skipped integration in info and setting to zero.") cn_out = 0.0 info['skipped'] = True elif method == 'matrix': fm_key = _fourier_filter_hash(filter_centers=filter_centers, filter_half_widths=filter_half_widths, filter_factors=suppression_vector, x=x, w=w, hash_decimal=hash_decimal, label='fitting matrix', basis=basis) if basis.lower() == 'dft': fm_key = fm_key + (basis_options['fundamental_period'], ) elif basis.lower() == 'dpss': fm_key = fm_key + tuple(nterms) fmat = fit_solution_matrix(wmat, amat, cache=cache, fit_mat_key=fm_key) info['fitting_matrix'] = fmat cn_out = fmat @ y else: raise ValueError("Provided 'method', '%s', is not in ['leastsq', 'matrix']."%(method)) model = amat @ (suppression_vector * cn_out) resid = (y - model) * (~np.isclose(w, 0, atol=1e-10)).astype(float) #suppress flagged residuals (such as RFI) return model, resid, info def _clean_filter(x, data, wgts, filter_centers, filter_half_widths, clean2d=False, tol=1e-9, window='none', skip_wgt=0.1, maxiter=100, gain=0.1, filt2d_mode='rect', alpha=0.5, edgecut_low=0, edgecut_hi=0, add_clean_residual=False, zero_residual_flags=True): ''' core cleaning functionality Input sanitation not implemented. Should be called through fourier_filter and the higher level functions that call fourier_filter. Parameters ---------- x : array-like (or 2-tuple/list of arrays for filter2d) x-values of data to be cleaned. Each x-axis must be equally spaced. data : array-like, complex, 1d or 2d numpy array of data to be filtered. wgts : array-like, float, 1d or 2d numpy array of wgts for data. filter_centers : list of floats (1d clean) 2-list of lists of floats (2d clean) centers of filtering regions in units of 1 / x-units filter_half_widths : list of floats (1d clean) 2-list of lists of floats (2d clean) half-widths of filtering regions in units of 1 / x-units clean2d : bool, optional, specify if 2dclean is to be performed. if False, just clean axis -1. tol : float, tolerance parameter for clean. window : str, apodization to perform on data before cleaning. skip_wgt : float, If less then skip_wgt fraction of data is flagged, skip the clean. maxiter : int, maximum number of clean iterations. gain : float, fraction of detected peak to subtract on each clean iteration. filt2d_mode : str, only applies if clean2d == True. options = ['rect', 'plus'] If 'rect', a 2D rectangular filter is constructed in fourier space (default). If 'plus', the 'rect' filter is first constructed, but only the plus-shaped slice along 0 delay and fringe-rate is kept. edgecut_low : int, number of bins to consider zero-padded at low-side of the FFT axis, such that the windowing function smoothly approaches zero. For 2D cleaning, can be fed as a tuple specifying edgecut_low for first and second FFT axis. edgecut_hi : int, number of bins to consider zero-padded at high-side of the FFT axis, such that the windowing function smoothly approaches zero. For 2D cleaning, can be fed as a tuple specifying edgecut_hi for first and second FFT axis. add_clean_residual : bool, if True, adds the CLEAN residual within the CLEAN bounds in fourier space to the CLEAN model. Note that the residual actually returned is not the CLEAN residual, but the residual in input data space. zero_residual_flags : bool, optional. If true, set flagged channels in the residual equal to zero. Default is True. Returns: d_mdl: CLEAN model -- best fit low-pass filter components (CLEAN model) in real space d_res: CLEAN residual -- difference of data and d_mdl, nulled at flagged channels info: dictionary (1D case) or list of dictionaries (2D case) with CLEAN metadata ''' import aipy if not clean2d: #pad = [0, pad] _x = [np.zeros(data.shape[0]), np.fft.fftfreq(len(x), x[1]-x[0])] x = [np.zeros(data.shape[0]), x] edgecut_hi = [ 0, edgecut_hi ] edgecut_low = [ 0, edgecut_low ] filter_centers = [[0.], copy.deepcopy(filter_centers)] filter_half_widths = [[np.inf], copy.deepcopy(filter_half_widths)] window = ['none', window] else: if not np.all(np.isclose(np.diff(x[1]), np.mean(np.diff(x[1])))): raise ValueError("Data must be equally spaced for CLEAN mode!") _x = [np.fft.fftfreq(len(x[m]), x[m][1]-x[m][0]) for m in range(2)] #window_opt = window for m in range(2): if not np.all(np.isclose(np.diff(x[m]), np.mean(np.diff(x[m])))): raise ValueError("Data must be equally spaced for CLEAN mode!") window = [gen_window(window[m], data.shape[m], alpha=alpha, edgecut_low=edgecut_low[m], edgecut_hi=edgecut_hi[m]) for m in range(2)] window[0] = np.atleast_2d(window[0]).T area_vecs = [ np.zeros(len(_x[m])) for m in range(2) ] #set area equal to one inside of filtering regions info = {} info['filter_params'] = {'axis_0':{}, 'axis_1':{}} info['clean_status'] = {'axis_0':{}, 'axis_1':{}} info['status'] = {'axis_0':{}, 'axis_1':{}} if filt2d_mode == 'rect' or not clean2d: for m in range(2): for fc, fw in zip(filter_centers[m], filter_half_widths[m]): area_vecs[m] = _get_filter_area(x[m], fc, fw) area = np.outer(area_vecs[0], area_vecs[1]) elif filt2d_mode == 'plus' and clean2d: area = np.zeros(data.shape) #construct and add a 'plus' for each filtering window pair in each dimension. for fc0, fw0 in zip(filter_centers[0], filter_half_widths[0]): for fc1, fw1 in zip(filter_centers[1], filter_half_widths[1]): area_temp = np.zeros(area.shape) if fc0 >= _x[0].min() and fc0 <= _x[0].max(): #generate area vector centered at zero av = _get_filter_area(x[1], fc1, fw1) area_temp[np.argmin(np.abs(_x[0]-fc0)), :] = av if fc1 >= _x[1].min() and fc1 <= _x[1].max(): #generate area vector centered at zero av = _get_filter_area(x[0], fc0, fw0) area_temp[:, np.argmin(np.abs(_x[1]-fc1))] = av area += area_temp area = (area>0.).astype(int) else: raise ValueError("%s is not a valid filt2d_mode! choose from ['rect', 'plus']"%(filt2d_mode)) if clean2d: _wgts = np.fft.ifft2(window[0] * wgts * window[1]) _data = np.fft.ifft2(window[0] * data * wgts * window[1]) else: _wgts = np.fft.ifft(window[0] * wgts * window[1], axis=1) _data = np.fft.ifft(window[0] * wgts * data * window[1], axis=1) _d_cl = np.zeros_like(_data) _d_res = np.zeros_like(_data) if not clean2d: for i, _d, _w, _a in zip(np.arange(_data.shape[0]).astype(int), _data, _wgts, area): # we skip steps that might trigger infinite CLEAN loops or divergent behavior. # if the weights sum up to a value close to zero (most of the data is flagged) # or if the data itself is close to zero. if _w[0] < skip_wgt or np.all(np.isclose(_d, 0.)): _d_cl[i] = 0. _d_res[i] = _d info['status']['axis_1'][i] = 'skipped' else: _d_cl[i], _info = aipy.deconv.clean(_d, _w, area=_a, tol=tol, stop_if_div=False, maxiter=maxiter, gain=gain) _d_res[i] = _info['res'] _info['skipped'] = False del(_info['res']) info['clean_status']['axis_1'][i] = _info info['status']['axis_1'][i] = 'success' elif clean2d: # we skip 2d cleans if all the data is close to zero (which can cause an infinite clean loop) # or the weights are all equal to zero which can also lead to a clean loop. # the maximum of _wgts should be the average value of all cells in 2d wgts. # since it is the 2d fft of wgts. if not np.all(np.isclose(_data, 0.)) and np.abs(_wgts).max() > skip_wgt: _d_cl, _info = aipy.deconv.clean(_data, _wgts, area=area, tol=tol, stop_if_div=False, maxiter=maxiter, gain=gain) _d_res = _info['res'] del(_info['res']) info['clean_status']['axis_1'] = _info info['clean_status']['axis_0'] = info['clean_status']['axis_1'] info['status']['axis_1'] = {i:'success' for i in range(_data.shape[0])} info['status']['axis_0'] = {i:'success' for i in range(_data.shape[1])} else: info['clean_status']['axis_0'] = {'skipped':True} info['clean_status']['axis_1'] = {'skipped':True} info['status']['axis_1'] = {i:'skipped' for i in range(_data.shape[0])} info['status']['axis_0'] = {i:'skipped' for i in range(_data.shape[1])} _d_cl = np.zeros_like(_data) _d_res = np.zeros_like(_d_cl) if add_clean_residual: _d_cl = _d_cl + _d_res * area if clean2d: model = np.fft.fft2(_d_cl) else: model = np.fft.fft(_d_cl, axis=1) #transpose back if filtering the 0th dimension. residual = (data - model) if zero_residual_flags: windmat = np.outer(window[0], window[1]) residual *= (~np.isclose(wgts * windmat, 0.0, atol=1e-10)).astype(float) return model, residual, info def _fit_basis_2d(x, data, wgts, filter_centers, filter_half_widths, basis_options, suppression_factors=None, method='leastsq', basis='dft', cache=None, filter_dims = 1, skip_wgt=0.1, max_contiguous_edge_flags=5, zero_residual_flags=True): """ A 1d linear-least-squares fitting function for computing models and residuals for fitting of the form y_model = A @ c where A is a design matrix encoding our choice for a basis functions and y_model Parameters ---------- x: array-like or 2-tuple/2-list x-axis of data to fit. if more then one filter_dim, must provide 2-tuple or 2-list with x data: array-like data to fit, should be an Ntimes x Nfreqs array. wgts: array-like data weights. filter_centers': array-like list of floats specifying the centers of fourier windows with which to fit signals filter_half_widths': array-like list of floats specifying the half-widths of fourier windows to model. suppression_factors: array-like, optional list of floats for each basis function denoting the fraction of of each basis element that should be present in the fitted model If none provided, model will include 100% of each mode. It is sometimes useful, for renormalization reversability to only include 1-\epsilon where \epsilon is a small number of each mode in the model. basis_options: dictionary basis specific options for fitting. The two bases currently supported are dft and dpss whose options are as follows: * 'dft': *'fundamental_period': float or 2-list/tuple The fundamental_period of dft modes to fit. This is the Fourier resoltion of fitted fourier modes equal to 1/FP where FP is the fundamental period. For a standard delay DFT FP = B where B is the visibility bandwidth FP also sets the number of modes fit within each window in 'filter_half_widths' will equal fw / fundamental_period where fw is the filter width. if filter2d, must provide a 2-tuple with fundamental_period of each dimension. * 'dpss': The basis_options must include one and only one of the four options for specifying how to terminate the dpss series in each filter window. *'eigenval_cutoff': array-like list of sinc_matrix eigenvalue cutoffs to use for included dpss modes. if 2d fit, should be a 2-list with each element giving list of eigenval cutoffs for each dimension. *'nterms': array-like list of integers specifying the order of the dpss sequence to use in each filter window. if 2d fit, should be a 2-list of lists of nterms for each delay window in each dimension. *'edge_supression': array-like specifies the degree of supression that must occur to tones at the filter edges to calculate the number of DPSS terms to fit in each sub-window. if 2d fit, should be a 2-list of lists of edge_suppression thresholds in each dim *'avg_suppression': list of floats, optional specifies the average degree of suppression of tones inside of the filter edges to calculate the number of DPSS terms. Similar to edge_supression but instead checks the suppression of a since vector with equal contributions from all tones inside of the filter width instead of a single tone. if 2d fit, should be a list of lists of avg_suppression thressholds for each. method: string specifies the fitting method to use. We currently support. *'leastsq' to perform iterative leastsquares fit to derive model. using scipy.optimize.leastsq *'matrix' derive model by directly calculate the fitting matrix [A^T W A]^{-1} A^T W and applying it to the y vector. filter_dim, int optional specify dimension to filter. default 1, and if 2d filter, will use both dimensions. skip_wgt: skips filtering rows with very low total weight (unflagged fraction ~< skip_wgt). Model is left as 0s, residual is left as data, and info is {'skipped': True} for that time. Only works properly when all weights are all between 0 and 1. max_contiguous_edge_flags : int, optional if the number of contiguous samples at the edge is greater then this at either side, skip . zero_residual_flags : bool, optional. If true, set flagged channels in the residual equal to zero. Default is True. Returns ------- model: array-like Ndata array of complex floats equal to interpolated model resid: array-like Ndata array of complex floats equal to y - model info: dictionary with filtering parameters and a list of skipped_times and skipped_channels has the following fields * 'status': dict holding two sub-dicts status of filtering on each time/frequency step. - 'axis_0'/'axis_1': dict holding the status of time filtering for each time/freq step. Keys are integer index of each step and values are a string that is either 'success' or 'skipped'. * 'filter_params': dict holding the filtering parameters for each axis with the following sub-dicts. - 'axis_0'/'axis_1': dict holding filtering parameters for filtering over each respective axis. - 'filter_centers': centers of filtering windows. - 'filter_half_widths': half-widths of filtering regions for each axis. - 'suppression_factors': amount of suppression for each filtering region. - 'x': vector of x-values used to generate the filter. - 'basis': (if using dpss/dft) gives the filtering basis. - 'basis_options': the basis options used for dpss/dft mode. See dft_operator and dpss_operator for more details. """ if isinstance(filter_dims, (int, np.integer)): filter_dims = [filter_dims] if cache is None: cache={} info = {'status':{'axis_0':{}, 'axis_1':{}}} residual = np.zeros_like(data) filter2d = (0 in filter_dims and 1 in filter_dims) filter_dims = sorted(filter_dims)[::-1] #this will only happen if filter_dims is only zero! if filter_dims[0] == 0: data = data.T wgts = wgts.T if not filter2d: x = [np.zeros_like(x), x] filter_centers = [[], copy.deepcopy(filter_centers)] filter_half_widths = [[], copy.deepcopy(filter_half_widths)] suppression_factors = [[], copy.deepcopy(suppression_factors)] basis_options=[{}, basis_options] else: for k in basis_options: if not isinstance(basis_options[k], (tuple,list)) or not len(basis_options[k]) == 2: raise ValueError("basis_options values must be 2-tuple or 2-list for 2d filtering.") basis_options = [{k:basis_options[k][0] for k in basis_options}, {k:basis_options[k][1] for k in basis_options}] #filter -1 dimension model = np.zeros_like(data) for i, _y, _w, in zip(range(data.shape[0]), data, wgts): if np.count_nonzero(_w)/len(_w) >= skip_wgt and np.count_nonzero(_w[:max_contiguous_edge_flags]) > 0 \ and np.count_nonzero(_w[-max_contiguous_edge_flags:]) >0: model[i], _, info_t = _fit_basis_1d(x=x[1], y=_y, w=_w, filter_centers=filter_centers[1], filter_half_widths=filter_half_widths[1], suppression_factors=suppression_factors[1], basis_options=basis_options[1], method=method, basis=basis, cache=cache) if info_t['skipped']: info['status']['axis_1'][i] = 'skipped' else: info['status']['axis_1'][i] = 'success' else: info['status']['axis_1'][i] = 'skipped' #and if filter2d, filter the 0 dimension. Note that we feed in the 'model' #set wgts for time filtering to happen on skipped rows info['filter_params'] = {'axis_0':{}, 'axis_1':{}} if np.any([info['status']['axis_1'][i] == 'success' for i in info['status']['axis_1']]): info['filter_params']['axis_1']['method'] = info_t['method'] info['filter_params']['axis_1']['basis'] = info_t['basis'] info['filter_params']['axis_1']['filter_centers'] = info_t['filter_centers'] info['filter_params']['axis_1']['filter_half_widths'] = info_t['filter_half_widths'] info['filter_params']['axis_1']['suppression_factors'] = info_t['suppression_factors'] info['filter_params']['axis_1']['basis_options'] = info_t['basis_options'] info['filter_params']['axis_1']['mode'] = info_t['basis'] + '_' + method if filter2d: wgts_time = np.ones_like(wgts) for i in range(data.shape[0]): if info['status']['axis_1'][i] == 'skipped': wgts_time[i] = 0. for i, _y, _w, in zip(range(model.shape[1]), model.T, wgts_time.T): if np.count_nonzero(_w)/len(_w) >= skip_wgt and np.count_nonzero(_w[:max_contiguous_edge_flags]) > 0 \ and np.count_nonzero(_w[-max_contiguous_edge_flags:]) >0: model.T[i], _, info_t = _fit_basis_1d(x=x[0], y=_y, w=_w, filter_centers=filter_centers[0], filter_half_widths=filter_half_widths[0], suppression_factors=suppression_factors[0], basis_options=basis_options[0], method=method, basis=basis, cache=cache) if info_t['skipped']: info['status']['axis_0'][i] = 'skipped' else: info['status']['axis_0'][i] = 'success' else: info['status']['axis_0'][i] = 'skipped' if np.any([info['status']['axis_0'][i] == 'success' for i in info['status']['axis_0']]): info['filter_params']['axis_0']['method'] = info_t['method'] info['filter_params']['axis_0']['basis'] = info_t['basis'] info['filter_params']['axis_0']['filter_centers'] = info_t['filter_centers'] info['filter_params']['axis_0']['filter_half_widths'] = info_t['filter_half_widths'] info['filter_params']['axis_0']['suppression_factors'] = info_t['suppression_factors'] info['filter_params']['axis_0']['basis_options'] = info_t['basis_options'] residual = (data - model) * (np.abs(wgts) > 0).astype(float) #this will only happen if filter_dims is only zero! if filter_dims[0] == 0: data = data.T wgts = wgts.T model = model.T residual = residual.T for k in info: info[k]['axis_0'] = copy.deepcopy(info[k]['axis_1']) info[k]['axis_1'] = {} if zero_residual_flags: residual = residual * (~np.isclose(wgts, 0., atol=1e-10)).astype(float) # set residual to zero in flags. return model, residual, info def fit_solution_matrix(weights, design_matrix, cache=None, hash_decimal=10, fit_mat_key=None): """ Calculate the linear least squares solution matrix from a design matrix, A and a weights matrix W S = [A^T W A]^{-1} A^T W Parameters ---------- weights: array-like ndata x ndata matrix of data weights design_matrx: array-like ndata x n_fit_params matrix transforming fit_parameters to data cache: optional dictionary optional dictionary storing pre-computed fitting matrix. hash_decimal: int optional the number of decimals to use in hash for caching. default is 10 fit_mat_key: optional hashable variable optional key. If none is used, hash fit matrix against design and weighting matrix. Returns ----------- array-like n_fit_params x n_fit_params matrix S = [A^T W A]^{-1} A ^T W """ if cache is None: cache = {} ndata = weights.shape[0] if not weights.shape[0] == weights.shape[1]: raise ValueError("weights must be a square matrix") if not design_matrix.shape[0] == ndata: raise ValueError("weights matrix incompatible with design_matrix!") if fit_mat_key is None: opkey = ('fitting_matrix',) + tuple(np.round(weights.flatten(), hash_decimal))\ +tuple(np.round(design_matrix.flatten(), hash_decimal)) else: opkey = fit_mat_key if not opkey in cache: #check condition number cmat = np.conj(design_matrix.T) @ weights @ design_matrix #should there be a conjugation!?! if np.linalg.cond(cmat)>=1e9: warn('Warning!!!!: Poorly conditioned matrix! Your linear inpainting IS WRONG!') cache[opkey] = np.linalg.pinv(cmat) @ np.conj(design_matrix.T) @ weights else: try: cache[opkey] = np.linalg.inv(cmat) @ np.conj(design_matrix.T) @ weights except np.linalg.LinAlgError as error: print(error) cache[opkey] = None return cache[opkey] def dpss_operator(x, filter_centers, filter_half_widths, cache=None, eigenval_cutoff=None, edge_suppression=None, nterms=None, avg_suppression=None, xc=None, hash_decimal=10, xtol=1e-3): """ Calculates DPSS operator with multiple delay windows to fit data. Frequencies must be equally spaced (unlike Fourier operator). Users can specify how the DPSS series fits are cutoff in each delay-filtering window with one (and only one) of three conditions: eigenvalues in sinc matrix fall below a thresshold (eigenval_cutoff), user specified number of DPSS terms (nterms), xor the suppression of fourier tones at the filter edge by a user specified amount (edge_supression). Parameters ---------- x: array-like x values to evaluate operator at filter_centers: array-like list of floats of centers of delay filter windows in nanosec filter_half_widths: array-like list of floats of half-widths of delay filter windows in nanosec cache: dictionary, optional dictionary for storing operator matrices with keys tuple(x) + tuple(filter_centers) + tuple(filter_half_widths)\ + (series_cutoff_name,) = tuple(series_cutoff_values) eigenval_cutoff: list of floats, optional list of sinc matrix eigenvalue cutoffs to use for included dpss modes. nterms: list of integers, optional integer specifying number of dpss terms to include in each delay fitting block. edge_suppression: list of floats, optional specifies the degree of supression that must occur to tones at the filter edges to calculate the number of DPSS terms to fit in each sub-window. avg_suppression: list of floats, optional specifies the average degree of suppression of tones inside of the filter edges to calculate the number of DPSS terms. Similar to edge_suppression but instead checks the suppression of a sinc vector with equal contributions from all tones inside of the filter width instead of a single tone. xc: float optional hash_decimal: number of decimals to round for floating point dict keys. xtol: fraction of average diff that the diff between all x-values must be within the average diff to be considered equally spaced. Default is 1e-3 Returns ---------- 2-tuple First element: Design matrix for DPSS fitting. Ndata x (Nfilter_window * nterm) transforming from DPSS modes to data. Second element: list of integers with number of terms for each fourier window specified by filter_centers and filter_half_widths """ if cache is None: cache = {} #conditions for halting. crit_labels = ['eigenval_cutoff', 'nterms', 'edge_suppression', 'avg_suppression'] crit_list = [eigenval_cutoff, nterms, edge_suppression, avg_suppression] crit_provided = np.asarray([not crit is None for crit in crit_list]).astype(bool) #only allow the user to specify a single condition for cutting off DPSS modes to fit. crit_provided_name = [ label for m,label in enumerate(crit_labels) if crit_provided[m] ] crit_provided_value = [ crit for m,crit in enumerate(crit_list) if crit_provided[m] ] if np.count_nonzero(crit_provided) != 1: raise ValueError('Must only provide a single series cutoff condition. %d were provided: %s '%(np.count_nonzero(crit_provided), str(crit_provided_name))) opkey = _fourier_filter_hash(filter_centers=filter_centers, filter_half_widths=filter_half_widths, filter_factors=[0.], crit_name=crit_provided_name[0], x=x, w=None, hash_decimal=hash_decimal, label='dpss_operator', crit_val=tuple(crit_provided_value[0])) if not opkey in cache: # try placing x on a uniform grid. # x is a version of x with the in-between grid values filled in and inserted is a boolean vector # set to True wherever a value for x was inserted and False otherwise. x, _, _, inserted = place_data_on_uniform_grid(x, np.zeros(len(x)), np.ones(len(x))) # if this is not successful, then throw a value error.. if not np.allclose(np.diff(x), np.median(np.diff(x)), rtol=0., atol=np.abs(xtol * np.median(np.diff(x)))): #for now, don't support DPSS iterpolation unless x is equally spaced. #In principal, I should be able to compute off-grid DPSS points using #the fourier integral of the DPSWF raise ValueError('x values must be equally spaced for DPSS operator!') nf = len(x) df = np.abs(x[1]-x[0]) xg, yg = np.meshgrid(x,x) if xc is None: xc = x[nf//2] #determine cutoffs if nterms is None: nterms = [] for fn,fw in enumerate(filter_half_widths): dpss_vectors = windows.dpss(nf, nf * df * fw, nf) if not eigenval_cutoff is None: smat = np.sinc(2 * fw * (xg-yg)) * 2 * df * fw eigvals = np.sum((smat @ dpss_vectors.T) * dpss_vectors.T, axis=0) nterms.append(np.max(np.where(eigvals>=eigenval_cutoff[fn]))) if not edge_suppression is None: z0=fw * df edge_tone=np.exp(-2j*np.pi*np.arange(nf)*z0) fit_components = dpss_vectors * (dpss_vectors @ edge_tone) #this is a vector of RMS residuals of a tone at the edge of the delay window being fitted between 0 to nf DPSS components. rms_residuals = np.asarray([ np.sqrt(np.mean(np.abs(edge_tone - np.sum(fit_components[:k],axis=0))**2.)) for k in range(nf)]) nterms.append(np.max(np.where(rms_residuals>=edge_suppression[fn]))) if not avg_suppression is None: sinc_vector=np.sinc(2 * fw * df * (np.arange(nf)-nf/2.)) sinc_vector = sinc_vector / np.sqrt(np.mean(sinc_vector**2.)) fit_components = dpss_vectors * (dpss_vectors @ sinc_vector) #this is a vector of RMS residuals of vector with equal contributions from all tones within -fw and fw. rms_residuals = np.asarray([ np.sqrt(np.mean(np.abs(sinc_vector - np.sum(fit_components[:k],axis=0))**2.)) for k in range(nf)]) nterms.append(np.max(np.where(rms_residuals>=avg_suppression[fn]))) #next, construct A matrix. amat = [] for fc, fw, nt in zip(filter_centers,filter_half_widths, nterms): amat.append(np.exp(2j * np.pi * (yg[:,:nt]-xc) * fc ) * windows.dpss(nf, nf * df * fw, nt).T ) if len(amat) > 1: amat = np.hstack(amat) else: amat = amat[0] # we used the regularly spaced inserted grid to generate our fitting basis vectors # but we dont need them for the actual fit. # so here we keep only the non-inserted rows of the design matrix. amat = amat[~inserted, :] cache[opkey] = (amat, nterms) return cache[opkey] def dft_operator(x, filter_centers, filter_half_widths, cache=None, fundamental_period=None, xc=None, hash_decimal=10): """ Discrete Fourier operator with multiple flexible delay windows to fit data, potentially with arbitrary user provided frequencies. A_{nu tau} = e^{- 2 * pi * i * nu * tau / B} for a set of taus contained within delay regions centered at filter_centers and with half widths of filter_half_widths separated by 1/B where B is provided by fundamental_period. Parameters ---------- x: array-like floats. x values to evaluate operator at filter_centers: float or list float or list of floats of centers of delay filter windows in nanosec filter_half_widths: float or list float or list of floats of half-widths of delay filter windows in nanosec cache: dictionary, optional dictionary storing operator matrices with keys (x) + (filter_centers) + (filter_half_widths) + \ hash_decimal: int, optional number of decimals to use for floating point keys. Returns -------- Ndata x (Nfilter_window * nterm) design matrix transforming DFT coefficients to data. """ if cache is None: cache = {} #if no fundamental fourier period is provided, set fundamental period equal to measurement #bandwidth. if fundamental_period is None: fundamental_period = np.median(np.diff(x)) * len(x) if xc is None: xc = x[int(np.round(len(x)/2))] if isinstance(filter_centers, float): filter_centers = [filter_centers] if isinstance(filter_half_widths, float): filter_half_widths = [filter_half_widths] #each column is a fixed delay opkey = _fourier_filter_hash(filter_centers=filter_centers, filter_half_widths=filter_half_widths, filter_factors=[0.], x=x, w=None, hash_decimal=hash_decimal, label='dft_operator', fperiod=fundamental_period) if not opkey in cache: amat = [] for fc, fw in zip(filter_centers,filter_half_widths): bs = np.ceil(fw * fundamental_period) dlys = fc + np.arange(-bs, bs) / fundamental_period xg, dg = np.meshgrid(x-xc, dlys, indexing='ij') fblock = np.exp(2j * np.pi * dg * xg) amat.append(fblock) cache[opkey] = np.hstack(amat) return cache[opkey] def delay_interpolation_matrix(nchan, ndelay, wgts, fundamental_period=None, cache=None, window='none'): """ Compute a foreground interpolation matrix. Computes a foreground interpolation matrix that, when applied to data, interpolates over flagged channels with delays between -ndelay / fundamental_period, ndelay / fundamental_period The computed Matrix is equal to F = A @ [ A^T @ W @ A]^{-1} @ A^T W where A is an nchan \times 2ndelay design matrix y = A \tilde{y} y is the frequency representation of data and \tilde{y} is a 2xndelay vector holding the data's fourier coefficients. W is a diagonal matrix of frequency-data weights. The net effect of F, when applied to flagged data, is to solve for the fourier coefficients fitting unflagged channels ([ A^T @ W @ A]^{-1} @ A^T W solves the linear least squares problem) and then return the unflagged Fourier transform by apply A @ to the fitted coefficients, resulting in data that is linearly interpolated. !!! THIS FUNCTION WILL BE DEPRECATED BY fit_solution_matrix !!! Parameters ---------- nchan: int Number of frequency channels to interpolate over. ndelay: int number of delays to use in interpolation. wgts: float array wgts to be applied to each frequency channel. must have length equal to nchan. in addition, wgts should have more nonezero values then there are degrees of freedom (delay modes) to solve for. fundamental_period: float, optional fundamental period of Fourier modes to fit too. this sets the resolution in Fourier space. A standard DFT has a resolution of 1/N_{FP} = 1/N between fourier modes so that the DFT operator is D_{mn} = e^{-2 \pi i m n / N_{FP}}. fg_deconv_fundamental_period is N_{FP}. cache: dict, optional optional cache holding pre-computed matrices window: string, optional use a window to fit. Returns ---------- (nchan, nchan) numpy array that can be used to interpolate over channel gaps. """ if cache is None: cache = {} if not len(wgts) == nchan: raise ValueError("nchan must equal length of wgts") if fundamental_period is None: #recommend 2 x nchan or nchan. fundamental_period = 2*nchan #this tends to give well conditioned matrices. if not np.sum((np.abs(wgts) > 0.).astype(float)) >= 2*ndelay: raise ValueError("number of unflagged channels must be greater then or equal to number of delays") matkey = (nchan, ndelay, fundamental_period) + tuple(wgts) amat = dft_operator(x=np.arange(nchan)-nchan/2., filter_centers=[0.], filter_half_widths=[ndelay/fundamental_period], cache=cache, fundamental_period=fundamental_period) wmat = np.diag(wgts * gen_window(window, nchan)).astype(complex) fs = fit_solution_matrix(wmat, amat) if fs is not None: return amat @ fs else: return np.nan * np.ones((nchan, nchan)) def dayenu_mat_inv(x, filter_centers, filter_half_widths, filter_factors, cache=None, wrap=False, wrap_interval=1, nwraps=1000, no_regularization=False, hash_decimal=10): """ Computes the inverse of sinc weights for a baseline. This form of weighting is diagonal in delay-space and down-weights tophat regions. Parameters ---------- x: array like array-like list of arbitrary frequencies. If this is supplied, evaluate sinc_downweight_mat at these frequencies instead of linear array of nchan. filter_centers: float or list float or list of floats of centers of delay filter windows in nanosec filter_half_widths: float or list float or list of floats of half-widths of delay filter windows in nanosec filter_factors: float or list float or list of floats of filtering factors. cache: dictionary, optional dictionary storing filter matrices with keys hash_decimal int, number of decimals to consider when hashing x tuple(x) + (filter_centers) + (filter_half_widths) + \ (filter_factors) !!!------------- WARNING: The following parameters are intended for theoretical studies of how inverse sinc-weighting functions but should not be changed from defaults in practical data analysis! !!!------------ wrap: bool, If true, add a wrap around, equivalent to situation where we want sinc weights to be the IDFT of a diagonal matrix wrap_interval: integer, interval of wrap around in units of nf * df (bandwidth) nwraps: number of wraps to include. no_regularization: bool, if True, do not include diagonal regularization. Returns ---------- (nchan, nchan) complex inverse of the tophat filtering matrix assuming that the delay-space covariance is diagonal and zero outside of the horizon """ if cache is None: cache = {} if isinstance(filter_factors,(float,int, np.int, np.float)): filter_factors = [filter_factors] if isinstance(filter_centers, (float, int, np.int, np.float)): filter_centers = [filter_centers] if isinstance(filter_half_widths, (float, int, np.int, np.float)): filter_half_widths = [filter_half_widths] nchan = len(x) filter_key = _fourier_filter_hash(filter_centers=filter_centers, filter_half_widths=filter_half_widths, filter_factors=filter_factors, x=x, w=None, hash_decimal=hash_decimal, label='dayenu_matrix_inverse', wrap=wrap, wrap_interval=wrap_interval, nwraps=nwraps, no_regularization=no_regularization) if not filter_key in cache: fx, fy = np.meshgrid(x,x) sdwi_mat = np.identity(fx.shape[0]).astype(np.complex128) if no_regularization: sdwi_mat *= 0. for fc, fw, ff in zip(filter_centers, filter_half_widths, filter_factors): if not ff == 0: if not wrap: sdwi_mat = sdwi_mat + np.sinc( 2. * (fx-fy) * fw ).astype(np.complex128)\ * np.exp(-2j * np.pi * (fx-fy) * fc) / ff else: bwidth = x[-1] - x[0] + (x[1]-x[0]) for wnum in np.arange(-nwraps//2, nwraps//2): offset = bwidth * wnum * wrap_interval sdwi_mat = sdwi_mat + \ np.sinc( 2. * (fx-fy - offset) * fw ).astype(np.complex128)\ * np.exp(-2j * np.pi * (fx-fy - offset) * fc) / ff cache[filter_key] = sdwi_mat else: sdwi_mat = cache[filter_key] return sdwi_mat
{ "alphanum_fraction": 0.598524137, "author": null, "avg_line_length": 54.1273701566, "converted": null, "ext": "py", "file": null, "hexsha": "7ba16ef166e7b8cd6ca09b8a2dd9c70c59b53a0d", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2018-01-27T06:58:54.000Z", "max_forks_repo_forks_event_min_datetime": "2018-01-27T06:58:54.000Z", "max_forks_repo_head_hexsha": "b1bbe5fd8cff06354bed6ca4ab195bf82b8db976", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "LBJ-Wade/uvtools", "max_forks_repo_path": "uvtools/dspec.py", "max_issues_count": 122, "max_issues_repo_head_hexsha": "b1bbe5fd8cff06354bed6ca4ab195bf82b8db976", "max_issues_repo_issues_event_max_datetime": "2022-03-29T17:36:09.000Z", "max_issues_repo_issues_event_min_datetime": "2017-06-26T21:09:41.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "LBJ-Wade/uvtools", "max_issues_repo_path": "uvtools/dspec.py", "max_line_length": 165, "max_stars_count": null, "max_stars_repo_head_hexsha": "b1bbe5fd8cff06354bed6ca4ab195bf82b8db976", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "LBJ-Wade/uvtools", "max_stars_repo_path": "uvtools/dspec.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 29981, "path": null, "reason": "import numpy,from scipy", "repo": null, "save_path": null, "sha": null, "size": 131313 }
import collections import itertools import numpy as np from operator import itemgetter from scipy.spatial import distance import re DEBUG = False """Vector stuff""" def v_same_orientation(v1, v2): return np.dot(v1, v2) > 0 """Division by zero problem!""" def v_angle(v1, v2): length_v1 = np.linalg.norm(v1) length_v2 = np.linalg.norm(v2) if length_v1 == 0 or length_v2 == 0: return 0 return np.round(np.degrees(np.arccos(np.dot(v1, v2) / length_v1 * length_v2))) def v_perpendicular(v1, v2, tolerance_deg = 0): return in_range(v_angle(v1, v2), 90, 5) def v_parallel(v1, v2, tolerance_deg = 0): return in_range(v_angle(v1, v2), 0, 5) or in_range(v_angle(v1, v2), 180, 5) def in_range(value, target, tolerance): return target - tolerance <= value <= target + tolerance def v_rotate(matrix, angle): """rotates the given matrix by angle in degrees, counter clockwise.""" angle = np.radians(angle) rot_matrix = np.array( [ [ np.cos(angle), -np.sin(angle)], [ np.sin(angle), np.cos(angle)] ] ) return np.dot(rot_matrix, matrix) """Helpers""" def log(message): """Prints a message only if DEBUG = True, so that all printing to stdout can be easily disabled.""" if DEBUG: print(message) def are_same(reference, value, percentage): min_value = reference - reference * percentage max_value = reference + reference * percentage result = min_value < value < max_value return result """Heavy stuff""" def string_to_coords(coord_string): """ Checks and decodes a coordinates string (that is passed to the API on the command line) into coordinates. Returns an empty list if it is not well formed. """ if not isinstance(coord_string, str): return [] coord_string = re.sub(r'\s+', '', coord_string, flags=re.UNICODE) is_well_formed = re.match(r'\[(\(\d+,\d+\),){0,}(\(\d+,\d+\))\]', coord_string) return eval(coord_string) if is_well_formed else [] def approximates(ref_point, point, max_deviation): """Helper function to check if two points are the same within the specified deviation.""" x = ref_point[0] - max_deviation <= point[0] <= ref_point[0] + max_deviation y = ref_point[1] - max_deviation <= point[1] <= ref_point[1] + max_deviation return x and y def get_orientation_marks(points): """ Extract the reference system (o, vx, vy) from a set of points. Returns None if no reference system found. """ p_threshold = 0.10 # no touchcode if there are not enough points if points is None or len(points) < 3: return None # calculate all possible distances between all points vectors = [(p1, p2, distance.euclidean(p1, p2)) for p1, p2 in list(itertools.combinations(points, 2))] # get the two points that have the longest distance (those are vx and vy) v1, v2, longest_distance = max(vectors, key=itemgetter(2)) log("v1: {0}, v2: {1}, dst(v1, v2): {2}]".format(v1, v2, longest_distance)) origin = None candidates = [] # find the origin candidates by getting all distances that are longest_distance / sqrt(2) for vector in vectors: if are_same(longest_distance / np.sqrt(2), vector[2], p_threshold): if np.array_equal(vector[0], v1) or np.array_equal(vector[0], v2): candidates.append((vector[1][0], vector[1][1])) if np.array_equal(vector[1], v1) or np.array_equal(vector[1], v2): candidates.append((vector[0][0], vector[0][1])) # find the origin (the point that we got twice) try: origin = np.array([k for k, v in collections.Counter(candidates).items() if v == 2])[0] except: return None return find_vx_vy_new(np.array([origin,v1,v2])) def find_vx_vy_new(m): """ Given three points (origin, v1, v2), finds out which of v1, v2 is vx and vy. Input: A 2x3 matrix (origin, v1, v2) Output: A 2x3 matrix (origin, vx, vy) """ # The standard coordinate system positive_x = np.array([1,0]) positive_y = np.array([0,1]) real_origin = np.array([0,0]) # The origin of our touchcode system origin = m[0] # Translate the touchcode coordinate system to have its origin at the standard origin (0,0) translation_vec = real_origin - origin mt = m + translation_vec v1, v2 = mt[1], mt[2] log("v1 is {0}".format(v1)) # Pick v1 as a pivot and check if it is in first or fourth quadrant. # If so, rotate by angle(v1, positive_y) to align v2 with the x-axis. # Next, check whether v2 has the same orientation as the positive x-axis, v1 then being vx. # In the other case, v1 is the vx. if v_same_orientation(v1, positive_x): log("v1 is oriented with positive_x") angle = v_angle(v1, positive_y) log("angle: {0}".format(angle)) v1 = v_rotate(v1, angle) v2 = v_rotate(v2, angle) else: log("v1 is NOT oriented with positive_x") angle = 360 - v_angle(v1, positive_y) v1 = v_rotate(v1, angle) v2 = v_rotate(v2, angle) log(v_same_orientation(v2, positive_x)) log("after rot: v1 = {0} and v2 = {1}".format(v1, v2)) if v_same_orientation(v2, positive_x): return np.array([m[0],m[2],m[1]]) else: return m def norm(reference, point): """Given a reference system (o, vx, vy), normalize a set of points to new coordinates.""" o = reference[0] x = reference[1] y = reference[2] s = point # Richtungsvektoren entlang der Kanten vom Referenzsystem vx = x - o vy = y - o # Ortsvektor des Punkts bzgl o (wo ist s bgzl des neuen Ursprungs o) so = s - o # Normierung der Richtungsvektoren vx = (vx/(np.linalg.norm(vx)))/(np.linalg.norm(vx))*3 vy = (vy/(np.linalg.norm(vy)))/(np.linalg.norm(vy))*3 xcor = np.dot(vx, so) ycor = np.dot(vy, so) log("s.x: {0}, s.y: {1}".format(xcor, ycor)) return (round(xcor, 1), round(ycor, 1)) def touchcode_from_points(points): """Generate touchcode for a set of normalized touchpoints.""" touchcode = 0 touchpoint_map = { (1,3): 0x001, (2,3): 0x002, (0,2): 0x004, (1,2): 0x008, (2,2): 0x010, (3,2): 0x020, (0,1): 0x040, (1,1): 0x080, (2,1): 0x100, (3,1): 0x200, (1,0): 0x400, (2,0): 0x800 } for touchpoint, tc_bit in touchpoint_map.items(): if any(map(lambda point: approximates(touchpoint, point, 0.2), points)): touchcode |= tc_bit return touchcode def xmirror(points, max_y): mirrored_points = [] for point in points: mirrored_points.append((point[0], max_y - point[1])) return mirrored_points def check_touchcode(points, x_mirror=True, max_y=1080): """Main API function. Takes a list of points, finds the reference system in it and tries to decode the corresponding touchcode. Returns: A touchcode from 0 to 4095 (12 bit) or -1 if no touchcode could be decoded. """ no_result = -1 if points is None or not isinstance(points, list): return no_result if x_mirror: points = xmirror(points, max_y) reference_system = get_orientation_marks(points) if reference_system is None: return no_result touchpoints = [norm(reference_system, point) for point in points] return touchcode_from_points(touchpoints) def check_touchcode_str(coord_string, x_mirror=True): """ Wrapper around check_touchcode_lst to make it externally callable with a string of coordinates. """ return check_touchcode(string_to_coords(coord_string), x_mirror)
{ "alphanum_fraction": 0.6248890016, "author": null, "avg_line_length": 31.406374502, "converted": null, "ext": "py", "file": null, "hexsha": "b572b17106649afa59317c3864caf225a4581831", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "26e9a8a555036beb17f8eeb06e83b6608b3995a2", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "informatik-mannheim/sysplace_demo_orderstatus", "max_forks_repo_path": "python/touchcode.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "26e9a8a555036beb17f8eeb06e83b6608b3995a2", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "informatik-mannheim/sysplace_demo_orderstatus", "max_issues_repo_path": "python/touchcode.py", "max_line_length": 109, "max_stars_count": null, "max_stars_repo_head_hexsha": "26e9a8a555036beb17f8eeb06e83b6608b3995a2", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "informatik-mannheim/sysplace_demo_orderstatus", "max_stars_repo_path": "python/touchcode.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2208, "path": null, "reason": "import numpy,from scipy", "repo": null, "save_path": null, "sha": null, "size": 7883 }
/*#ifndef MODEL_MARKET_H #define MODEL_MARKET_H #endif // MODEL_MARKET_H */ #pragma once #include <graphene/chain/protocol/operations.hpp> #include <graphene/db/generic_index.hpp> #include <boost/multi_index/composite_key.hpp> #include <vector> namespace graphene { namespace chain { using namespace std; class database; class model_market_category_object : public graphene::db::abstract_object<model_market_category_object> { public: static const uint8_t space_id = protocol_ids; static const uint8_t type_id = model_market_category_object_type; string category_name; uint8_t model_market_type; uint32_t order_num; account_id_type issuer; uint8_t status =0; time_point_sec create_date_time; }; struct by_id_model{}; struct by_issuer_model{}; struct by_model_market_type{}; using model_market_category_multi_index_type = multi_index_container< model_market_category_object, indexed_by< ordered_unique< tag<by_id_model> ,member<object,object_id_type,&object::id> >, ordered_non_unique< tag<by_issuer_model>,member<model_market_category_object,account_id_type,&model_market_category::issuer> >, ordered_non_unique< tag<by_model_market_type>, composite_key< model_market_category_object, member<model_market_category_object,uint8_t,&model_market_category_object::model_market_type> > > > >; using model_market_category_index = generic_index<model_market_category_object,model_market_category_multi_index_type>; } } FC_REFLECT_DERIVED( graphene::chain::model_market_category_object, (graphene::db::object), (category_name) (model_market_type) (order_num) (issuer) (status) (create_date_time) )
{ "alphanum_fraction": 0.5753660637, "author": null, "avg_line_length": 36.28125, "converted": null, "ext": "hpp", "file": null, "hexsha": "3e8ac6e06de49d1d03c74b858d5eb5adcc86f854", "include": null, "lang": "C++", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "282638a1b83d0f2987939915c1818e59d07e2943", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "noachain/noa-core", "max_forks_repo_path": "libraries/chain/include/graphene/chain/model_market_object.hpp", "max_issues_count": null, "max_issues_repo_head_hexsha": "282638a1b83d0f2987939915c1818e59d07e2943", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "noachain/noa-core", "max_issues_repo_path": "libraries/chain/include/graphene/chain/model_market_object.hpp", "max_line_length": 143, "max_stars_count": null, "max_stars_repo_head_hexsha": "282638a1b83d0f2987939915c1818e59d07e2943", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "noachain/noa-core", "max_stars_repo_path": "libraries/chain/include/graphene/chain/model_market_object.hpp", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 414, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 2322 }
MODULE in_out_manager !!====================================================================== !! *** MODULE in_out_manager *** !! I/O manager utilities : Defines run parameters together with logical units !!===================================================================== !! History : 1.0 ! 2002-06 (G. Madec) original code !! 2.0 ! 2006-07 (S. Masson) iom, add ctl_stop, ctl_warn !! 3.0 ! 2008-06 (G. Madec) add ctmp4 to ctmp10 !! 3.2 ! 2009-08 (S. MAsson) add new ctl_opn !! 3.3 ! 2010-10 (A. Coward) add NetCDF4 usage !!---------------------------------------------------------------------- !!---------------------------------------------------------------------- USE par_oce ! ocean parameter USE nc4interface ! NetCDF4 interface IMPLICIT NONE PUBLIC ! !!---------------------------------------------------------------------- !! namrun namelist parameters !!---------------------------------------------------------------------- CHARACTER(lc) :: cn_exp !: experiment name used for output filename CHARACTER(lc) :: cn_ocerst_in !: suffix of ocean restart name (input) CHARACTER(lc) :: cn_ocerst_indir !: restart input directory CHARACTER(lc) :: cn_ocerst_out !: suffix of ocean restart name (output) CHARACTER(lc) :: cn_ocerst_outdir !: restart output directory LOGICAL :: ln_rstart !: start from (F) rest or (T) a restart file LOGICAL :: ln_rst_list !: output restarts at list of times (T) or by frequency (F) INTEGER :: nn_no !: job number INTEGER :: nn_rstctl !: control of the time step (0, 1 or 2) INTEGER :: nn_rstssh = 0 !: hand made initilization of ssh or not (1/0) INTEGER :: nn_it000 !: index of the first time step INTEGER :: nn_itend !: index of the last time step INTEGER :: nn_date0 !: initial calendar date aammjj INTEGER :: nn_time0 !: initial time of day in hhmm INTEGER :: nn_leapy !: Leap year calendar flag (0/1 or 30) INTEGER :: nn_istate !: initial state output flag (0/1) INTEGER :: nn_write !: model standard output frequency INTEGER :: nn_stock !: restart file frequency INTEGER, DIMENSION(10) :: nn_stocklist !: restart dump times LOGICAL :: ln_mskland !: mask land points in NetCDF outputs (costly: + ~15%) LOGICAL :: ln_cfmeta !: output additional data to netCDF files required for compliance with the CF metadata standard LOGICAL :: ln_clobber !: clobber (overwrite) an existing file INTEGER :: nn_chunksz !: chunksize (bytes) for NetCDF file (works only with iom_nf90 routines) !$AGRIF_DO_NOT_TREAT TYPE(snc4_ctl) :: snc4set !: netcdf4 chunking control structure (always needed for decision making) !$AGRIF_END_DO_NOT_TREAT !! conversion of DOCTOR norm namelist name into model name !! (this should disappear in a near futur) CHARACTER(lc) :: cexper !: experiment name used for output filename INTEGER :: no !: job number INTEGER :: nrstdt !: control of the time step (0, 1 or 2) INTEGER :: nit000 !: index of the first time step INTEGER :: nitend !: index of the last time step INTEGER :: ndate0 !: initial calendar date aammjj INTEGER :: nleapy !: Leap year calendar flag (0/1 or 30) INTEGER :: ninist !: initial state output flag (0/1) INTEGER :: nwrite !: model standard output frequency INTEGER :: nstock !: restart file frequency INTEGER, DIMENSION(10) :: nstocklist !: restart dump times !!---------------------------------------------------------------------- !! was in restart but moved here because of the OFF line... better solution should be found... !!---------------------------------------------------------------------- INTEGER :: nitrst !: time step at which restart file should be written LOGICAL :: lrst_oce !: logical to control the oce restart write INTEGER :: numror = 0 !: logical unit for ocean restart (read). Init to 0 is needed for SAS (in daymod.F90) INTEGER :: numrow !: logical unit for ocean restart (write) INTEGER :: nrst_lst !: number of restart to output next !!---------------------------------------------------------------------- !! output monitoring !!---------------------------------------------------------------------- LOGICAL :: ln_ctl !: run control for debugging INTEGER :: nn_timing !: run control for timing INTEGER :: nn_diacfl !: flag whether to create CFL diagnostics INTEGER :: nn_print !: level of print (0 no print) INTEGER :: nn_ictls !: Start i indice for the SUM control INTEGER :: nn_ictle !: End i indice for the SUM control INTEGER :: nn_jctls !: Start j indice for the SUM control INTEGER :: nn_jctle !: End j indice for the SUM control INTEGER :: nn_isplt !: number of processors following i INTEGER :: nn_jsplt !: number of processors following j INTEGER :: nn_bench !: benchmark parameter (0/1) INTEGER :: nn_bit_cmp = 0 !: bit reproducibility (0/1) ! INTEGER :: nprint, nictls, nictle, njctls, njctle, isplt, jsplt, nbench !: OLD namelist names INTEGER :: ijsplt = 1 !: nb of local domain = nb of processors !!---------------------------------------------------------------------- !! logical units !!---------------------------------------------------------------------- INTEGER :: numstp = -1 !: logical unit for time step INTEGER :: numtime = -1 !: logical unit for timing INTEGER :: numout = 6 !: logical unit for output print; Set to stdout to ensure any early ! output can be collected; do not change INTEGER :: numnam_ref = -1 !: logical unit for reference namelist INTEGER :: numnam_cfg = -1 !: logical unit for configuration specific namelist INTEGER :: numond = -1 !: logical unit for Output Namelist Dynamics INTEGER :: numnam_ice_ref = -1 !: logical unit for ice reference namelist INTEGER :: numnam_ice_cfg = -1 !: logical unit for ice reference namelist INTEGER :: numoni = -1 !: logical unit for Output Namelist Ice INTEGER :: numevo_ice = -1 !: logical unit for ice variables (temp. evolution) INTEGER :: numsol = -1 !: logical unit for solver statistics INTEGER :: numdct_in = -1 !: logical unit for transports computing INTEGER :: numdct_vol = -1 !: logical unit for voulume transports output INTEGER :: numdct_heat = -1 !: logical unit for heat transports output INTEGER :: numdct_salt = -1 !: logical unit for salt transports output INTEGER :: numfl = -1 !: logical unit for floats ascii output INTEGER :: numflo = -1 !: logical unit for floats ascii output !!---------------------------------------------------------------------- !! Run control !!---------------------------------------------------------------------- INTEGER :: nstop = 0 !: error flag (=number of reason for a premature stop run) INTEGER :: nwarn = 0 !: warning flag (=number of warning found during the run) CHARACTER(lc) :: ctmp1, ctmp2, ctmp3 !: temporary characters 1 to 3 CHARACTER(lc) :: ctmp4, ctmp5, ctmp6 !: temporary characters 4 to 6 CHARACTER(lc) :: ctmp7, ctmp8, ctmp9 !: temporary characters 7 to 9 CHARACTER(lc) :: ctmp10 !: temporary character 10 CHARACTER(lc) :: cform_err = "(/,' ===>>> : E R R O R', /,' ===========',/)" !: CHARACTER(lc) :: cform_war = "(/,' ===>>> : W A R N I N G', /,' ===============',/)" !: LOGICAL :: lwm = .FALSE. !: boolean : true on the 1st processor only (always) LOGICAL :: lwp = .FALSE. !: boolean : true on the 1st processor only .OR. ln_ctl LOGICAL :: lsp_area = .TRUE. !: to make a control print over a specific area !!---------------------------------------------------------------------- !! NEMO/OCE 4.0 , NEMO Consortium (2018) !! $Id: in_out_manager.F90 6140 2015-12-21 11:35:23Z timgraham $ !! Software governed by the CeCILL licence (./LICENSE) !!===================================================================== END MODULE in_out_manager
{ "alphanum_fraction": 0.4884915962, "author": null, "avg_line_length": 66.2482269504, "converted": null, "ext": "f90", "file": null, "hexsha": "e8d8e5b82716c5c586c6569fb0fd8902fb824385", "include": null, "lang": "FORTRAN", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2021-04-09T16:25:26.000Z", "max_forks_repo_forks_event_min_datetime": "2021-04-09T16:25:26.000Z", "max_forks_repo_head_hexsha": "c25fa7092ef0ae895d0fe3accdec74254cd85c55", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "yumengch/nemo4_pdafomi", "max_forks_repo_path": "src/nemo_r4.0.4/tools/DOMAINcfg/src/in_out_manager.f90", "max_issues_count": 1, "max_issues_repo_head_hexsha": "c25fa7092ef0ae895d0fe3accdec74254cd85c55", "max_issues_repo_issues_event_max_datetime": "2021-04-24T18:56:09.000Z", "max_issues_repo_issues_event_min_datetime": "2021-04-24T18:56:09.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "yumengch/nemo4_pdafomi", "max_issues_repo_path": "src/nemo_r4.0.4/tools/DOMAINcfg/src/in_out_manager.f90", "max_line_length": 134, "max_stars_count": 2, "max_stars_repo_head_hexsha": "c25fa7092ef0ae895d0fe3accdec74254cd85c55", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "yumengch/nemo4_pdafomi", "max_stars_repo_path": "src/nemo_r4.0.4/tools/DOMAINcfg/src/in_out_manager.f90", "max_stars_repo_stars_event_max_datetime": "2021-10-05T12:32:14.000Z", "max_stars_repo_stars_event_min_datetime": "2021-03-24T11:08:31.000Z", "num_tokens": 2270, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 9341 }
[STATEMENT] lemma R_therm_dyn: assumes "a > 0" and "0 \<le> \<tau>" and "0 < Tmin" and "Tmax < L" shows "rel_R \<lceil>\<lambda>s. I Tmin Tmax s \<and> s$2 = 0 \<and> s$3 = s$1\<rceil> \<lceil>I Tmin Tmax\<rceil> \<ge> (IF (\<lambda>s. s$4 = 0) THEN (x\<acute>= (\<lambda>t. f a 0) & G Tmin Tmax a 0 on (\<lambda>s. {0..\<tau>}) UNIV @ 0) ELSE (x\<acute>= (\<lambda>t. f a L) & G Tmin Tmax a L on (\<lambda>s. {0..\<tau>}) UNIV @ 0))" [PROOF STATE] proof (prove) goal (1 subgoal): 1. IF (\<lambda>s. s $ 4 = 0) THEN x\<acute>=\<lambda>t. f a 0 & G Tmin Tmax a 0 on \<lambda>s. {0..\<tau>} UNIV @ 0 ELSE x\<acute>=\<lambda>t. f a L & G Tmin Tmax a L on \<lambda>s. {0..\<tau>} UNIV @ 0 \<subseteq> rel_R \<lceil>\<lambda>s. I Tmin Tmax s \<and> s $ 2 = 0 \<and> s $ 3 = s $ 1\<rceil> \<lceil>I Tmin Tmax\<rceil> [PROOF STEP] apply(rule order_trans, rule R_cond_mono) [PROOF STATE] proof (prove) goal (3 subgoals): 1. x\<acute>=\<lambda>t. f a 0 & G Tmin Tmax a 0 on \<lambda>s. {0..\<tau>} UNIV @ 0 \<subseteq> ?X'3 2. x\<acute>=\<lambda>t. f a L & G Tmin Tmax a L on \<lambda>s. {0..\<tau>} UNIV @ 0 \<subseteq> ?Y'3 3. IF (\<lambda>s. s $ 4 = 0) THEN ?X'3 ELSE ?Y'3 \<subseteq> rel_R \<lceil>\<lambda>s. I Tmin Tmax s \<and> s $ 2 = 0 \<and> s $ 3 = s $ 1\<rceil> \<lceil>I Tmin Tmax\<rceil> [PROOF STEP] apply(rule R_therm_dyn_down[OF assms]) [PROOF STATE] proof (prove) goal (2 subgoals): 1. x\<acute>=\<lambda>t. f a L & G Tmin Tmax a L on \<lambda>s. {0..\<tau>} UNIV @ 0 \<subseteq> ?Y'3 2. IF (\<lambda>s. s $ 4 = 0) THEN rel_R \<lceil>\<lambda>s. s $ 4 = 0 \<and> I Tmin Tmax s \<and> s $ 2 = 0 \<and> s $ 3 = s $ 1\<rceil> \<lceil>I Tmin Tmax\<rceil> ELSE ?Y'3 \<subseteq> rel_R \<lceil>\<lambda>s. I Tmin Tmax s \<and> s $ 2 = 0 \<and> s $ 3 = s $ 1\<rceil> \<lceil>I Tmin Tmax\<rceil> [PROOF STEP] using R_therm_dyn_down[OF assms] R_therm_dyn_up[OF assms] [PROOF STATE] proof (prove) using this: x\<acute>=\<lambda>t. f a 0 & G Tmin Tmax a 0 on \<lambda>s. {0..\<tau>} UNIV @ 0 \<subseteq> rel_R \<lceil>\<lambda>s. s $ 4 = 0 \<and> I Tmin Tmax s \<and> s $ 2 = 0 \<and> s $ 3 = s $ 1\<rceil> \<lceil>I Tmin Tmax\<rceil> x\<acute>=\<lambda>t. f a L & G Tmin Tmax a L on \<lambda>s. {0..\<tau>} UNIV @ 0 \<subseteq> rel_R \<lceil>\<lambda>s. s $ 4 \<noteq> 0 \<and> I Tmin Tmax s \<and> s $ 2 = 0 \<and> s $ 3 = s $ 1\<rceil> \<lceil>I Tmin Tmax\<rceil> goal (2 subgoals): 1. x\<acute>=\<lambda>t. f a L & G Tmin Tmax a L on \<lambda>s. {0..\<tau>} UNIV @ 0 \<subseteq> ?Y'3 2. IF (\<lambda>s. s $ 4 = 0) THEN rel_R \<lceil>\<lambda>s. s $ 4 = 0 \<and> I Tmin Tmax s \<and> s $ 2 = 0 \<and> s $ 3 = s $ 1\<rceil> \<lceil>I Tmin Tmax\<rceil> ELSE ?Y'3 \<subseteq> rel_R \<lceil>\<lambda>s. I Tmin Tmax s \<and> s $ 2 = 0 \<and> s $ 3 = s $ 1\<rceil> \<lceil>I Tmin Tmax\<rceil> [PROOF STEP] by (auto intro!: R_cond)
{ "alphanum_fraction": null, "author": null, "avg_line_length": null, "converted": null, "ext": null, "file": "Hybrid_Systems_VCs_KleeneAlgebraTests_HS_VC_KAT_Examples_rel", "hexsha": null, "include": null, "lang": null, "length": 4, "llama_tokens": 1393, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": null, "max_forks_repo_licenses": null, "max_forks_repo_name": null, "max_forks_repo_path": null, "max_issues_count": null, "max_issues_repo_head_hexsha": null, "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": null, "max_issues_repo_name": null, "max_issues_repo_path": null, "max_line_length": null, "max_stars_count": null, "max_stars_repo_head_hexsha": null, "max_stars_repo_licenses": null, "max_stars_repo_name": null, "max_stars_repo_path": null, "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": null, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": null }
import keras import numpy as np import os from keras.preprocessing.image import ImageDataGenerator , array_to_img, img_to_array, load_img from Tkinter import Tk from tkFileDialog import askdirectory import GetFilePathFromDir as getfdir Tk().withdraw() PathMana = getfdir.GetFileinDir() datagen = ImageDataGenerator( rotation_range=0.1, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.1, zoom_range=0.1, horizontal_flip=True, vertical_flip=True, dim_ordering='th', fill_mode='nearest' ) dirpath = askdirectory() outputdirpath = askdirectory() FilenameList = PathMana.GetPathFilesinFolder(dirpath) #outputdirpath = "D:\A_002ImageClassificationData\kerasSample" arrimglist = [] namenum = 0 for imgpath in FilenameList: img = load_img(imgpath) arrimg = img_to_array(img) arrimg = np.reshape(arrimg,(1,)+arrimg.shape) count = 0 tempname = format(namenum, "03d") for item in datagen.flow(arrimg, batch_size=1, save_to_dir = outputdirpath,save_prefix=tempname,save_format='bmp'): count += 1 namenum += 1; if count == 10: break;
{ "alphanum_fraction": 0.7164048866, "author": null, "avg_line_length": 22.92, "converted": null, "ext": "py", "file": null, "hexsha": "6a6a0c63b09d51f883f72290b580ecb17a7fbf5a", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "1c8f6720afa323712bcd3085c68a13b9c2a1a059", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "pimier15/VisionImgTool", "max_forks_repo_path": "ImgSampleGenerator/ImgSampleGenerator/ImgSampleGenerator.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "1c8f6720afa323712bcd3085c68a13b9c2a1a059", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "pimier15/VisionImgTool", "max_issues_repo_path": "ImgSampleGenerator/ImgSampleGenerator/ImgSampleGenerator.py", "max_line_length": 119, "max_stars_count": null, "max_stars_repo_head_hexsha": "1c8f6720afa323712bcd3085c68a13b9c2a1a059", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "pimier15/VisionImgTool", "max_stars_repo_path": "ImgSampleGenerator/ImgSampleGenerator/ImgSampleGenerator.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 316, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 1146 }
Require Import bedrock2.Syntax. Require Import bedrock2.NotationsCustomEntry. Require Import bedrock2.FE310CSemantics. Require Import coqutil.Z.Lia. From bedrock2 Require Import BasicC64Semantics ProgramLogic. From bedrock2 Require Import Array Scalars Separation. From coqutil Require Import Word.Interface Map.Interface. From coqutil.Tactics Require Import letexists. From coqutil.Tactics Require Import syntactic_unify. From coqutil.Macros Require Import symmetry. From coqutil.Tactics Require Import syntactic_unify. From coqutil.Macros Require Import symmetry. Require Import coqutil.Datatypes.List. Section WithParameters. Import Syntax BinInt String List.ListNotations ZArith. Local Open Scope string_scope. Local Open Scope Z_scope. Local Open Scope list_scope. Definition tf : bedrock_func := ("tf", (["buf"; "len"; "i"; "j"], [], bedrock_func_body:( require ( i < len ) else { /*skip*/ }; store1(buf + i, $0); require ( j < len ) else { r = $-1 }; r = load1(buf + j) ))). Local Infix "*" := sep : type_scope. Local Open Scope sep_scope. Local Notation "a [ i ]" := (List.hd _ (List.skipn i a)) (at level 10, left associativity, format "a [ i ]"). Local Notation "a [: i ]" := (List.firstn i a) (at level 10, left associativity, format "a [: i ]"). Local Notation "a [ i :]" := (List.skipn i a) (at level 10, left associativity, format "a [ i :]"). Local Notation bytes := (array ptsto (word.of_Z 1)). (* Local Notation word_to_nat x := (Z.to_nat (word.unsigned x)). *) Local Infix "+" := word.add. Local Infix "+" := word.add. Local Instance spec_of_tf : spec_of "tf". refine (fun functions => forall t m buf len bs i j R, (sep (array ptsto (word.of_Z 1) buf bs) R) m -> word.unsigned len = Z.of_nat (List.length bs) -> WeakestPrecondition.call functions "tf" t m [buf; len; i; j] (fun T M rets => True)). (* word.unsigned i < word.unsigned len -> word.unsigned j < word.unsigned len -> rets = [word.of_Z (word.unsigned ((bs[:word_to_nat i]++ word.of_Z 0 :: List.tl (bs[word_to_nat i:]))[word_to_nat j]))] *) Defined. Import SeparationLogic Lift1Prop. Goal program_logic_goal_for_function! tf. Proof. repeat straightline. letexists. split; [solve[repeat straightline] |]. split; [|solve [repeat straightline]]; repeat straightline. eapply Properties.word.if_nonzero in H1; rewrite word.unsigned_ltu in H1; eapply Z.ltb_lt in H1. simple refine (store_one_of_sep _ _ _ _ _ _ (Lift1Prop.subrelation_iff1_impl1 _ _ _ _ _ H) _); shelve_unifiable. 1: (etransitivity; [etransitivity|]); cycle -1; [ | | eapply Proper_sep_iff1; [|reflexivity]; eapply bytearray_index_inbounds]; try ecancel; try blia. repeat straightline. intros. seprewrite_in (symmetry! @array_cons) H2. seprewrite_in (@bytearray_index_merge) H2. { pose proof Properties.word.unsigned_range i. rewrite length_firstn_inbounds; blia. } letexists. split; [solve[repeat straightline]|]. split; [|solve [repeat straightline]]. repeat straightline. eapply Properties.word.if_nonzero in H3; rewrite word.unsigned_ltu in H3; eapply Z.ltb_lt in H3. letexists. split. { letexists. split; repeat straightline. letexists; split. { eapply load_one_of_sep. simple refine (Lift1Prop.subrelation_iff1_impl1 _ _ _ _ _ H2). (etransitivity; [|etransitivity]); [ | eapply Proper_sep_iff1; [|reflexivity]; eapply bytearray_index_inbounds | ]. 3: ecancel. 1: ecancel. pose proof Properties.word.unsigned_range i. pose proof Properties.word.unsigned_range j. rewrite List.app_length, length_cons, length_firstn_inbounds, length_skipn. all: blia. } 1: subst v1. exact eq_refl. } repeat (straightline; [|..]). exact I. Qed. (* [eseptract] solves goals of the form [state == needle * ?r] by "subtracting" [needle] from [state] with the help of decomposition hints of the form [a = b * c * d * ...]. [?r] will be instantiated with the result of the subtraction "state - needle"; in terms of the magic wand operator this tactic simplifies [needle -* state]. The process is directed by the syntactic form of [needle]: 1. If [needle] appears syntactically in [state], the equation is solved by cancellation. 2. If [needle] matches a part of a RHS of a decomposition lemma, the non-matched part of the RHS is into [r] and the LHS is subtracted from the state recursively. 3. If [needle] is a separating conjunct of multiple clauses, they of them will be subtracted separately. *) (* TODO: should side conditions be solved before or after recursing? - before does not work for arrays -- need to know which array before checking bounds - after would mean all leaves of every struct would be explored -- uncontroled search *) (* better algorithm might use hints containing: - needle pattern - state pattern = hint lhs - condition when to apply this hint (is needle actually inside pattern?) this might be a judgementally trivial but syntactically informative precondition - hint rhs on match, only the hint rhs (plus original frame) would be searched for further matches *) End WithParameters.
{ "alphanum_fraction": null, "author": "dderjoel", "avg_line_length": null, "converted": null, "ext": null, "file": null, "hexsha": null, "include": null, "lang": null, "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": null, "max_forks_repo_licenses": null, "max_forks_repo_name": null, "max_forks_repo_path": null, "max_issues_count": null, "max_issues_repo_head_hexsha": null, "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": null, "max_issues_repo_name": null, "max_issues_repo_path": null, "max_line_length": null, "max_stars_count": null, "max_stars_repo_head_hexsha": null, "max_stars_repo_licenses": null, "max_stars_repo_name": null, "max_stars_repo_path": null, "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": null, "path": "github-repos/coq/dderjoel-base/base-2aa122fb618100b7fed3119ea3cef73cec5bf4a5/fiat-crypto/rupicola/bedrock2/bedrock2/src/bedrock2Examples/ArrayLoadStore.v", "reason": null, "repo": "base", "save_path": "github-repos/coq/dderjoel-base", "sha": "2aa122fb618100b7fed3119ea3cef73cec5bf4a5", "size": null }
import os import numpy as np import skimage.data from skimage.io import imsave, imread from skimage import transform from skimage.color import rgb2gray import matplotlib.pyplot as plt img_w = 128 img_h = 128 PATH_NEW_IMGS_FOLDER = 'Resized_images' def load_imgs(): CONDITIONS = lambda img_name: False if '_mask' in img_name or '.json' in img_name or '.py' in img_name else True img_names = [img_name for img_name in os.listdir() if CONDITIONS(img_name) and os.path.isfile(img_name)] imgs = [imread(img_name) for img_name in img_names] return imgs, img_names def resize(imgs): resized_imgs = [transform.resize(img, (img_w, img_h)) for img in imgs] return resized_imgs def save_imgs(imgs, img_names): for img, img_name in zip(imgs, img_names): imsave(os.path.join(PATH_NEW_IMGS_FOLDER, img_name), img) if __name__ == '__main__': imgs, img_names = load_imgs() imgs = resize(imgs) save_imgs(imgs, img_names)
{ "alphanum_fraction": 0.728024819, "author": null, "avg_line_length": 24.7948717949, "converted": null, "ext": "py", "file": null, "hexsha": "167446a2686f057ed148efd699ec630f61ecdb72", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "44d4d7508b8cddb5c3bd78c6b5297de3850baf36", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Tarasila/Mask_RCNN_defect_detection", "max_forks_repo_path": "script_to_resize_imgs.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "44d4d7508b8cddb5c3bd78c6b5297de3850baf36", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Tarasila/Mask_RCNN_defect_detection", "max_issues_repo_path": "script_to_resize_imgs.py", "max_line_length": 114, "max_stars_count": null, "max_stars_repo_head_hexsha": "44d4d7508b8cddb5c3bd78c6b5297de3850baf36", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Tarasila/Mask_RCNN_defect_detection", "max_stars_repo_path": "script_to_resize_imgs.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 250, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 967 }
import numpy as np import os from scanorama import * from scipy.sparse import vstack from sklearn.preprocessing import LabelEncoder, scale from experiments import * from process import load_names from utils import * NAMESPACE = 'mono_macro' METHOD = 'svd' DIMRED = 100 data_names = [ 'data/macrophage/monocytes_1', 'data/macrophage/monocytes_2', 'data/macrophage/monocytes_3', 'data/macrophage/monocytes_4', 'data/pbmc/10x/cd14_monocytes', 'data/macrophage/mcsf_day3_1', 'data/macrophage/mcsf_day3_2', 'data/macrophage/mcsf_day6_1', 'data/macrophage/mcsf_day6_2', 'data/macrophage/mcsf_day6_3', 'data/macrophage/mcsf_day6_4', ] def write_table(dataset, genes, name): prefix = name.split('/')[-1] with open(name + '_table.txt', 'w') as f: header = '\t'.join([ prefix + str(i) for i in range(dataset.shape[0]) ]) f.write(header + '\n') for i in range(dataset.shape[1]): line = '\t'.join([ str(int(j)) for j in dataset[:, i] ]) f.write(genes[i] + '\t' + line + '\n') def keep_valid(): with open('data/pseudotime/GSE72857_umitab.txt') as f: all_cells = f.readline().rstrip().split()[1:] with open('data/pseudotime/meta.txt') as f: cell_to_type = [] for line in f: fields = line.rstrip().split() cell_to_type[fields[0]] = fields[1] valid_idx = [] cell_names = [] for c_idx, cell in enumerate(all_cells): if cell in cell_to_type: valid_idx.append(c_idx) cell_names.append(cell_to_type[cell]) return valid_idx, cell_names if __name__ == '__main__': datasets, genes_list, n_cells = load_names(data_names, norm=False) datasets, genes = merge_datasets(datasets, genes_list) X = vstack(datasets) if not os.path.isfile('data/dimred/{}_{}.txt'.format(METHOD, NAMESPACE)): log('Dimension reduction with {}...'.format(METHOD)) X_dimred = reduce_dimensionality( normalize(X), method=METHOD, dimred=DIMRED ) log('Dimensionality = {}'.format(X_dimred.shape[1])) np.savetxt('data/dimred/{}_{}.txt'.format(METHOD, NAMESPACE), X_dimred) else: X_dimred = np.loadtxt('data/dimred/{}_{}.txt'.format(METHOD, NAMESPACE)) cell_names = [] for i, a in enumerate(datasets): if 'monocyte' in data_names[i]: cell_names += [ 'monocyte' ] * a.shape[0] elif 'day3' in data_names[i]: cell_names += [ 'mcsf_day3' ] * a.shape[0] elif 'day6' in data_names[i]: cell_names += [ 'mcsf_day6' ] * a.shape[0] else: assert(False) le = LabelEncoder().fit(cell_names) cell_labels = le.transform(cell_names) write_table(X.toarray(), genes, 'data/pseudotime/' + NAMESPACE) with open('data/pseudotime/mono_macro_meta.txt', 'w') as of: of.write('Label\n') for idx in range(X.shape[0]): of.write('mono_macro{}\t{}'.format(idx, cell_names[idx])) from geosketch import gs, gs_gap, uniform gs_idx = gs(X_dimred, 110, replace=False) write_table(X[gs_idx, :].toarray(), genes, 'data/pseudotime/' + NAMESPACE + '_gs') report_cluster_counts(cell_labels[gs_idx]) with open('data/pseudotime/mono_macro_meta_gs.txt', 'w') as of: of.write('Label\n') i = 0 for idx in range(X.shape[0]): if idx not in gs_idx: continue of.write('mono_macro_gs{}\t{}\n'.format(i, cell_names[idx])) i += 1 uni_idx = uniform(X_dimred, 110, replace=False) write_table(X[uni_idx, :].toarray(), genes, 'data/pseudotime/' + NAMESPACE + '_uni') report_cluster_counts(cell_labels[uni_idx]) with open('data/pseudotime/mono_macro_meta_uni.txt', 'w') as of: of.write('Label\n') i = 0 for idx in range(X.shape[0]): if idx not in uni_idx: continue of.write('mono_macro_uni{}\t{}\n'.format(i, cell_names[idx])) i += 1 with open('data/pseudotime/mono_macro_genes.txt', 'w') as of: of.write('gene_short_name\n') for gene in genes: of.write('{}\t{}\n'.format(gene, gene))
{ "alphanum_fraction": 0.6035489143, "author": null, "avg_line_length": 34.264, "converted": null, "ext": "py", "file": null, "hexsha": "fe0bd4dd46af3144212d5c9bafba24a07f657346", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 6, "max_forks_repo_forks_event_max_datetime": "2022-01-25T08:49:58.000Z", "max_forks_repo_forks_event_min_datetime": "2019-02-01T01:10:56.000Z", "max_forks_repo_head_hexsha": "8902dd2c165fbdcb1b387fd5389157ec1ded0b03", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "prete/geosketch", "max_forks_repo_path": "bin/monocyte_macrophage.py", "max_issues_count": 9, "max_issues_repo_head_hexsha": "8902dd2c165fbdcb1b387fd5389157ec1ded0b03", "max_issues_repo_issues_event_max_datetime": "2021-05-21T22:51:45.000Z", "max_issues_repo_issues_event_min_datetime": "2019-02-11T10:42:08.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "brianhie/ample", "max_issues_repo_path": "bin/monocyte_macrophage.py", "max_line_length": 88, "max_stars_count": 45, "max_stars_repo_head_hexsha": "8902dd2c165fbdcb1b387fd5389157ec1ded0b03", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "brianhie/ample", "max_stars_repo_path": "bin/monocyte_macrophage.py", "max_stars_repo_stars_event_max_datetime": "2022-02-02T10:03:15.000Z", "max_stars_repo_stars_event_min_datetime": "2019-02-04T21:48:50.000Z", "num_tokens": 1172, "path": null, "reason": "import numpy,from scipy", "repo": null, "save_path": null, "sha": null, "size": 4283 }
"""------------------------------------------------------- Licensed under The MIT License [see LICENSE for details] Written by Kyungjun Lee -------------------------------------------------------""" import subprocess as sp import numpy as np # global variables ACCEPTABLE_AVAILABLE_MEMORY = 10000 # https://github.com/yselivonchyk/TensorFlow_DCIGN/blob/master/utils.py def _output_to_list(output): return output.decode('ascii').split('\n')[:-1] def get_idle_gpu(leave_unmasked=1, random=True): try: command = "nvidia-smi --query-gpu=memory.free --format=csv" memory_free_info = _output_to_list(sp.check_output(command.split()))[1:] memory_free_values = [int(x.split()[0]) for i, x in enumerate(memory_free_info)] available_gpus = [i for i, x in enumerate(memory_free_values) if x > ACCEPTABLE_AVAILABLE_MEMORY] if len(available_gpus) <= leave_unmasked: print('Found only %d usable GPUs in the system' % len(available_gpus)) return -1 if random: available_gpus = np.asarray(available_gpus) np.random.shuffle(available_gpus) gpu_to_use = available_gpus[0] print("Using GPU: ", gpu_to_use) return int(gpu_to_use) """ # update CUDA variable gpus = available_gpus[:leave_unmasked] setting = ','.join(map(str, gpus)) os.environ["CUDA_VISIBLE_DEVICES"] = setting print('Left next %d GPU(s) unmasked: [%s] (from %s available)' % (leave_unmasked, setting, str(available_gpus))) """ except FileNotFoundError as e: print('"nvidia-smi" is probably not installed. GPUs are not masked') print(e) return -1 except sp.CalledProcessError as e: print("Error on GPU masking:\n", e.output) return -1
{ "alphanum_fraction": 0.6532634033, "author": null, "avg_line_length": 35.75, "converted": null, "ext": "py", "file": null, "hexsha": "d3e1ec5281558f540c8af6352141710864d8c709", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "9e389039da374d3301f47ea93340c1cd1f397999", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "kyungjunlee/Faster-RCNN-TensorFlow-Python3", "max_forks_repo_path": "lib/utils/util.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "9e389039da374d3301f47ea93340c1cd1f397999", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "kyungjunlee/Faster-RCNN-TensorFlow-Python3", "max_issues_repo_path": "lib/utils/util.py", "max_line_length": 101, "max_stars_count": null, "max_stars_repo_head_hexsha": "9e389039da374d3301f47ea93340c1cd1f397999", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "kyungjunlee/Faster-RCNN-TensorFlow-Python3", "max_stars_repo_path": "lib/utils/util.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 427, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 1716 }
include("1.jl") include("11.jl") include("14.jl") function mark_allzicper(r::Robot) angleofsquare1111(r::Robot) putmarker!(r) snake11per(r) if (isborder(r,Ost)) putmarkholeper(r,West) else putmarkholeper(r,inverse(West)) end end function snake11per(r::Robot) while !isborder(r,Nord) if (isborder(r,Ost)) putmarkholeper(r,West) else putmarkholeper(r,inverse(West)) end if (isborder(r,Ost) || isborder(r,West)) disexit(r,Nord) putmarker!(r) end end end function angleofsquare1111(r::Robot) while ((!isborder(r,Sud)) || (!isborder(r,West))) if (!isborder(r,Sud)) disexit(r,Sud) end if (!isborder(r,West)) disexit(r,West) end end end
{ "alphanum_fraction": 0.5260770975, "author": null, "avg_line_length": 22.05, "converted": null, "ext": "jl", "file": null, "hexsha": "aba4c82a42f953efc86415979f708e15b928c5a8", "include": null, "lang": "Julia", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "e8f1215b98e2f88b9374c89b1fedbfa1ad06b7b4", "max_forks_repo_licenses": [ "BSD-2-Clause" ], "max_forks_repo_name": "bubasooo/proggggggggg", "max_forks_repo_path": "16.jl", "max_issues_count": null, "max_issues_repo_head_hexsha": "e8f1215b98e2f88b9374c89b1fedbfa1ad06b7b4", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-2-Clause" ], "max_issues_repo_name": "bubasooo/proggggggggg", "max_issues_repo_path": "16.jl", "max_line_length": 54, "max_stars_count": null, "max_stars_repo_head_hexsha": "e8f1215b98e2f88b9374c89b1fedbfa1ad06b7b4", "max_stars_repo_licenses": [ "BSD-2-Clause" ], "max_stars_repo_name": "bubasooo/proggggggggg", "max_stars_repo_path": "16.jl", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 247, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 882 }
from .version import __version__ from .sr import ( pysr, PySRRegressor, best, best_tex, best_callable, best_row, ) from .julia_helpers import install from .feynman_problems import Problem, FeynmanProblem from .export_jax import sympy2jax from .export_torch import sympy2torch
{ "alphanum_fraction": 0.7633333333, "author": null, "avg_line_length": 21.4285714286, "converted": null, "ext": "py", "file": null, "hexsha": "e303becb2dddf32fd0d921f2d35a188568c65c49", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2020-09-18T00:53:04.000Z", "max_forks_repo_forks_event_min_datetime": "2020-09-17T11:57:37.000Z", "max_forks_repo_head_hexsha": "3c8d9b9cde8299d0f9411944a5d730986f03d583", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "MilesCranmer/Eureqa.jl", "max_forks_repo_path": "pysr/__init__.py", "max_issues_count": 1, "max_issues_repo_head_hexsha": "3c8d9b9cde8299d0f9411944a5d730986f03d583", "max_issues_repo_issues_event_max_datetime": "2020-09-19T02:41:59.000Z", "max_issues_repo_issues_event_min_datetime": "2020-09-19T02:41:59.000Z", "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "MilesCranmer/Eureqa.jl", "max_issues_repo_path": "pysr/__init__.py", "max_line_length": 53, "max_stars_count": 35, "max_stars_repo_head_hexsha": "3c8d9b9cde8299d0f9411944a5d730986f03d583", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "MilesCranmer/Eureqa.jl", "max_stars_repo_path": "pysr/__init__.py", "max_stars_repo_stars_event_max_datetime": "2020-09-18T18:40:21.000Z", "max_stars_repo_stars_event_min_datetime": "2020-09-14T12:40:07.000Z", "num_tokens": 83, "path": null, "reason": "import sympy", "repo": null, "save_path": null, "sha": null, "size": 300 }
Hills Drive is located in West Davis and leads into the parking lot for the Western Center for Agricultural Equipment. It is a Davis Isnt Flat very confused street. Image(hills.jpg, thumbnail, 500, right)
{ "alphanum_fraction": 0.7922705314, "author": null, "avg_line_length": 69, "converted": null, "ext": "f", "file": null, "hexsha": "dd2b0e670dd3302eead3a3b64d137640842cd7f9", "include": null, "lang": "FORTRAN", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "voflo/Search", "max_forks_repo_path": "lab/davisWiki/Hills_Drive.f", "max_issues_count": null, "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "voflo/Search", "max_issues_repo_path": "lab/davisWiki/Hills_Drive.f", "max_line_length": 205, "max_stars_count": null, "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "voflo/Search", "max_stars_repo_path": "lab/davisWiki/Hills_Drive.f", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 46, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 207 }
using ZStd using Base.Test @testset "helpers" begin @test typeof(ZStd.MAX_COMPRESSION) == Int @test ZStd.MAX_COMPRESSION > 0 @test ZStd.check_zstd_error(UInt64(0)) == UInt64(0) @test_throws ZStd.ZStdError ZStd.check_zstd_error(typemax(UInt64)) let err = try ZStd.check_zstd_error(typemax(UInt64)) catch ex ex end err_txt = sprint(showerror, err) @test startswith(err_txt, "ZStd: Error (generic)") end @test typeof(ZStd.maxcompressedsize(1)) == Int @test ZStd.maxcompressedsize(1) > 0 @test typeof(ZStd.maxcompressedsize(UInt(1))) == UInt @test ZStd.maxcompressedsize(UInt(1)) > UInt(0) @test ZStd.ZSTD_VERSION == v"1.3.3" end
{ "alphanum_fraction": 0.6427604871, "author": null, "avg_line_length": 26.3928571429, "converted": null, "ext": "jl", "file": null, "hexsha": "4fea09fb5024c3206f1278f9745d0730f27a2df2", "include": null, "lang": "Julia", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "ab35fc259958da4c6074c1811cb5ecdac73823eb", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "nkemnitz/ZStd.jl", "max_forks_repo_path": "test/runtests.jl", "max_issues_count": null, "max_issues_repo_head_hexsha": "ab35fc259958da4c6074c1811cb5ecdac73823eb", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "nkemnitz/ZStd.jl", "max_issues_repo_path": "test/runtests.jl", "max_line_length": 70, "max_stars_count": null, "max_stars_repo_head_hexsha": "ab35fc259958da4c6074c1811cb5ecdac73823eb", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "nkemnitz/ZStd.jl", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 222, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 739 }
#include <boost/mpl/aux_/preprocessed/dmc/equal_to.hpp>
{ "alphanum_fraction": 0.8035714286, "author": null, "avg_line_length": 28, "converted": null, "ext": "hpp", "file": null, "hexsha": "43ed648f21ed0b9ef73d8970cd9a7bcf2c8d96c7", "include": null, "lang": "C++", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 4, "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": [ "BSL-1.0" ], "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_path": "src/boost_mpl_aux__preprocessed_dmc_equal_to.hpp", "max_issues_count": 2, "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_licenses": [ "BSL-1.0" ], "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_path": "src/boost_mpl_aux__preprocessed_dmc_equal_to.hpp", "max_line_length": 55, "max_stars_count": 10, "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": [ "BSL-1.0" ], "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_path": "src/boost_mpl_aux__preprocessed_dmc_equal_to.hpp", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "num_tokens": 17, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 56 }
#!/usr/bin/python import threading import time from Monsoon import HVPM import struct from datetime import datetime import time import math from Monsoon.calibrationData import calibrationData from Monsoon import Operations as ops from copy import deepcopy import numpy as np import signal import sys import usb import os import scipy as sp import scipy.stats class channels: timeStamp = 0 MainCurrent = 1 USBCurrent = 2 AuxCurrent = 3 MainVoltage = 4 USBVoltage = 5 class triggers: SAMPLECOUNT_INFINITE = 0xFFFFFFFF @staticmethod def GREATER_THAN(x, y): if (x > y): return True else: return False @staticmethod def LESS_THAN(x, y): if (x < y): return True else: return False class ErrorHandlingModes: off = 0 # No error checking. Use if you're seeing a large number of dropped samples full = 1 # Automatically handle errors debug = 2 # Handle errors + output logging data. Not fully implemented yet. class SampleEngine: def __init__(self, Monsoon, bulkProcessRate=128, errorMode=ErrorHandlingModes.full, calsToKeep=5): """Declares global variables. During testing, we found the garbage collector would slow down sampling enough to cause a lot of dropped samples. We've tried to combat this by allocating as much as possible in advance.""" self.monsoon = Monsoon self.__errorMode = errorMode if (errorMode == ErrorHandlingModes.debug): os.environ['PYUSB_DEBUG'] = 'debug' os.environ['PYUSB_LOG_FILENAME'] = 'pyusb.log' usb._setup_log() self.__mainCal = calibrationData(calsToKeep) self.__usbCal = calibrationData(calsToKeep) self.__auxCal = calibrationData(calsToKeep) self.__padding = np.zeros((64)) self.__fineThreshold = Monsoon.fineThreshold self.__auxFineThreshold = Monsoon.auxFineThreshold self.__ADCRatio = (float)(62.5 / 1e6); # Each tick of the ADC represents this much voltage self.__mainVoltageScale = Monsoon.mainvoltageScale self.__usbVoltageScale = Monsoon.usbVoltageScale self.dropped = 0 self.bulkProcessRate = bulkProcessRate self.__packetSize = 64 self.__startTime = time.time() # Indices self.__mainCoarseIndex = 0 self.__mainFineIndex = 1 self.__usbCoarseIndex = 2 self.__usbFineIndex = 3 self.__auxCoarseIndex = 4 self.__auxFineIndex = 5 self.__mainVoltageIndex = 6 self.__usbVoltageIndex = 7 self.__timestampIndex = 10 # Output lists self.__mainCurrent = [] self.__usbCurrent = [] self.__auxCurrent = [] self.__usbVoltage = [] self.__mainVoltage = [] self.__timeStamps = [] # Output controls self.__outputConsoleMeasurements = True self.__outputTimeStamp = True self.__collectMainMeasurements = True self.__collectUSBMeasurements = False self.__collectAuxMeasurements = False self.__collectMainVoltage = True self.__collectUSBVoltage = False self.__channels = [self.__outputTimeStamp, self.__collectMainMeasurements, self.__collectUSBMeasurements, self.__collectAuxMeasurements, self.__collectMainVoltage, self.__collectUSBVoltage] self.__channelnames = ["Time(ms)", "Main(mA)", "USB(mA)", "Aux(mA)", "Main Voltage(V)", "USB Voltage(V)"] self.__channelOutputs = [self.__mainCurrent, self.__usbCurrent, self.__auxCurrent, self.__mainVoltage, self.__usbVoltage] self.__sampleCount = 0 self.__CSVOutEnable = False # Trigger Settings self.__startTriggerSet = False self.__stopTriggerSet = False self.__triggerChannel = channels.timeStamp self.__startTriggerLevel = 0 self.__startTriggerStyle = np.vectorize(triggers.GREATER_THAN) self.__stopTriggerLevel = triggers.SAMPLECOUNT_INFINITE self.__stopTriggerStyle = np.vectorize(triggers.GREATER_THAN) self.__sampleLimit = 50000 # output writer self.__f = None pass def setStartTrigger(self, triggerStyle, triggerLevel): """Controls the conditions when the sampleEngine starts recording measurements.""" """triggerLevel: threshold for trigger start.""" """triggerStyle: GreaterThan or Lessthan.""" self.__startTriggerLevel = triggerLevel self.__startTriggerStyle = np.vectorize(triggerStyle) pass def setStopTrigger(self, triggerstyle, triggerlevel): """Controls the conditions when the sampleEngine stops recording measurements.""" """triggerLevel: threshold for trigger stop.""" """triggerStyle: GreaterThan or Lessthan.""" self.__stopTriggerLevel = triggerlevel self.__stopTriggerStyle = np.vectorize(triggerstyle) def setTriggerChannel(self, triggerChannel): """Sets channel that controls the trigger. triggerChannel: selected from sampleEngine.channels""" self.__triggerChannel = triggerChannel def ConsoleOutput(self, boolValue): """Enables or disables the display of realtime measurements boolValue: True == Enable, False == Disable""" self.__outputConsoleMeasurements = boolValue def enableChannel(self, channel): """Enables a channel. Takes sampleEngine.channel class value as input. channel: selected from sampleEngine.channels""" self.__channels[channel] = True def disableChannel(self, channel): """Disables a channel. Takes sampleEngine.channel class value as input. channel: selected from sampleEngine.channels""" self.__channels[channel] = False def enableCSVOutput(self, filename): """Opens a file and causes the sampleEngine to periodically output samples when taking measurements filename: The file measurements will be output to.""" self.__outputFilename = filename self.__f = open(filename, "w") self.__CSVOutEnable = True def disableCSVOutput(self): """Closes the CSV file if open and disables CSV output.""" if (self.__f is not None): self.__f.close() self.__f = None self.__CSVOutEnable = False def __Reset(self): self.__startTriggerSet = False self.__stopTriggerSet = False; self.__sampleCount = 0 self.__mainCal.clear() self.__usbCal.clear() self.__auxCal.clear() self.__ClearOutput() def __ClearOutput(self): """Wipes away all of the old output data.""" self.__mainCurrent = [] self.__usbCurrent = [] self.__auxCurrent = [] self.__usbVoltage = [] self.__mainVoltage = [] self.__timeStamps = [] def __isCalibrated(self): """Returns true if every channel has sufficient calibration samples.""" A = self.__mainCal.calibrated() B = self.__usbCal.calibrated() C = self.__auxCal.calibrated() return A and B and C def __addMeasurement(self, channel, measurement): """Adds measurements to the global list of measurements. channel: selected from sampleEngine.channels measurement: An 1xn array of measurements. """ if (channel == self.__triggerChannel and not self.__startTriggerSet): self.__evalStartTrigger(measurement) elif (channel == self.__triggerChannel): self.__evalStopTrigger(measurement[::self.__granularity]) measurements = self.__getMeasurement(measurement) if (channel == channels.MainCurrent and not self.__stopTriggerSet): self.__mainCurrent.append(measurements) if (channel == channels.USBCurrent): self.__usbCurrent.append(measurements) if (channel == channels.AuxCurrent): self.__auxCurrent.append(measurements) if (channel == channels.USBVoltage): self.__usbVoltage.append(measurements) if (channel == channels.MainVoltage): self.__mainVoltage.append(measurements) if (channel == channels.timeStamp): self.__timeStamps.append(measurements) self.__sampleCount += len(measurements) def __getMeasurement(self, measurement): measurements = [] if ((self.__sampleCount + len(measurement[::self.__granularity])) > self.__sampleLimit): counter = self.__sampleCount for sample in measurement[::self.__granularity]: if (counter >= self.__sampleLimit): break measurements.append(sample) counter += 1 else: measurements = measurement return measurements def __evalStartTrigger(self, measurement): """ See if any of the measurements meet the conditions to start recording samples. measurement: a 1xn array. """ self.__startTriggerStyle(measurement, self.__startTriggerLevel) self.__startTriggerSet = np.any(self.__startTriggerStyle(measurement, self.__startTriggerLevel)) def __evalStopTrigger(self, measurement): """ See if any of the measurements meet the conditions to stop recording samples. measurement: a 1xn array of measurements. """ if (self.__sampleCount >= self.__sampleLimit and self.__sampleLimit is not triggers.SAMPLECOUNT_INFINITE): self.__stopTriggerSet = True if (self.__stopTriggerLevel is not triggers.SAMPLECOUNT_INFINITE): test = self.__stopTriggerStyle(measurement, self.__stopTriggerLevel) if (np.any(test)): self.__stopTriggerSet = True def __vectorProcess(self, measurements): """Translates raw ADC measurements into current values. measurements: An nxm array of integers indexed by the global channel index scheme. """ # Currents if (self.__isCalibrated()): measurements = np.array(measurements) sDebug = "" if (self.__channels[channels.MainCurrent]): # Main Coarse scale = self.monsoon.statusPacket.mainCoarseScale zeroOffset = self.monsoon.statusPacket.mainCoarseZeroOffset calRef = self.__mainCal.getRefCal(True) calZero = self.__mainCal.getZeroCal(True) zeroOffset += calZero if (calRef - zeroOffset != 0): slope = scale / (calRef - zeroOffset) else: slope = 0 Raw = measurements[:, self.__mainCoarseIndex] - zeroOffset mainCoarseCurrents = Raw * slope # Main Fine scale = self.monsoon.statusPacket.mainFineScale zeroOffset = self.monsoon.statusPacket.mainFineZeroOffset calRef = self.__mainCal.getRefCal(False) calZero = self.__mainCal.getZeroCal(False) zeroOffset += calZero if (calRef - zeroOffset != 0): slope = scale / (calRef - zeroOffset) else: slope = 0 Raw = measurements[:, self.__mainFineIndex] - zeroOffset mainFinecurrents = Raw * slope / 1000 mainCurrent = np.where(measurements[:, self.__mainFineIndex] < self.__fineThreshold, mainFinecurrents, mainCoarseCurrents) self.__addMeasurement(channels.MainCurrent, mainCurrent) # self.__mainCurrent.append(mainCurrent) sDebug = "Main Current: {:6.2f} mA".format(mainCurrent[0]) if (self.__channels[channels.USBCurrent]): # USB Coarse scale = self.monsoon.statusPacket.usbCoarseScale zeroOffset = self.monsoon.statusPacket.usbCoarseZeroOffset calRef = self.__usbCal.getRefCal(True) calZero = self.__usbCal.getZeroCal(True) zeroOffset += calZero if (calRef - zeroOffset != 0): slope = scale / (calRef - zeroOffset) else: slope = 0 Raw = measurements[:, self.__usbCoarseIndex] - zeroOffset usbCoarseCurrents = Raw * slope # USB Fine scale = self.monsoon.statusPacket.usbFineScale zeroOffset = self.monsoon.statusPacket.usbFineZeroOffset calRef = self.__usbCal.getRefCal(False) calZero = self.__usbCal.getZeroCal(False) zeroOffset += calZero if (calRef - zeroOffset != 0): slope = scale / (calRef - zeroOffset) else: slope = 0 Raw = measurements[:, self.__usbFineIndex] - zeroOffset usbFineCurrents = Raw * slope / 1000 usbCurrent = np.where(measurements[:, self.__usbFineIndex] < self.__fineThreshold, usbFineCurrents, usbCoarseCurrents) self.__addMeasurement(channels.USBCurrent, usbCurrent) # self.__usbCurrent.append(usbCurrent) sDebug = sDebug + ", USB Current: {:6.2f} mA".format(usbCurrent[0]) if (self.__channels[channels.AuxCurrent]): # Aux Coarse scale = self.monsoon.statusPacket.auxCoarseScale zeroOffset = 0 calRef = self.__auxCal.getRefCal(True) calZero = self.__auxCal.getZeroCal(True) zeroOffset += calZero if (calRef - zeroOffset != 0): slope = scale / (calRef - zeroOffset) else: slope = 0 Raw = measurements[:, self.__auxCoarseIndex] - zeroOffset auxCoarseCurrents = Raw * slope # Aux Fine scale = self.monsoon.statusPacket.auxFineScale zeroOffset = 0 calRef = self.__auxCal.getRefCal(False) calZero = self.__auxCal.getZeroCal(False) zeroOffset += calZero if (calRef - zeroOffset != 0): slope = scale / (calRef - zeroOffset) else: slope = 0 Raw = measurements[:, self.__auxFineIndex] - zeroOffset auxFineCurrents = Raw * slope / 1000 auxCurrent = np.where(measurements[:, self.__auxFineIndex] < self.__auxFineThreshold, auxFineCurrents, auxCoarseCurrents) self.__addMeasurement(channels.AuxCurrent, auxCurrent) # self.__auxCurrent.append(auxCurrent) sDebug = sDebug + ", Aux Current: {:6.2f} mA".format(auxCurrent[0]) # Voltages if (self.__channels[channels.MainVoltage]): mainVoltages = measurements[:, self.__mainVoltageIndex] * self.__ADCRatio * self.__mainVoltageScale self.__addMeasurement(channels.MainVoltage, mainVoltages) # self.__mainVoltage.append(mainVoltages) sDebug = sDebug + ", Main Voltage: {:4.2f} V".format(mainVoltages[0]) if (self.__channels[channels.USBVoltage]): usbVoltages = measurements[:, self.__usbVoltageIndex] * self.__ADCRatio * self.__usbVoltageScale self.__addMeasurement(channels.USBVoltage, usbVoltages) # self.__usbVoltage.append(usbVoltages) sDebug = sDebug + ", USB Voltage: {:4.2f} V".format(usbVoltages[0]) timeStamp = measurements[:, self.__timestampIndex] self.__addMeasurement(channels.timeStamp, timeStamp) # self.__timeStamps.append(timeStamp) sDebug = sDebug + ", Dropped: {:6d}".format(self.dropped) sDebug = sDebug + ", Total Sample Count: " + repr(self.__sampleCount) if (self.__outputConsoleMeasurements): time_str = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f') print(time_str + ' ' + sDebug) if not self.__startTriggerSet: self.__ClearOutput() def __processPacket(self, measurements): """Separates received packets into ZeroCal, RefCal, and measurement samples. measurements: an nxm array of swizzled packets from the Power Monitor """ Samples = [] for measurement in measurements: self.dropped = measurement[0] flags = measurement[1] numObs = measurement[2] offset = 3 for _ in range(0, numObs): sample = measurement[offset:offset + 10] sample.append(measurement[len(measurement) - 1]) sampletype = sample[8] & 0x30 if (sampletype == ops.SampleType.ZeroCal): self.__processZeroCal(sample) elif (sampletype == ops.SampleType.refCal): self.__processRefCal(sample) elif (sampletype == ops.SampleType.Measurement): Samples.append(sample) offset += 10 return Samples def __startupCheck(self, verbose=False): """Verify the sample engine is setup to start.""" if (verbose): print("Verifying ready to start up") print("Calibrating...") Samples = [[0 for _ in range(self.__packetSize + 1)] for _ in range(self.bulkProcessRate)] while (not self.__isCalibrated() and self.__sampleCount < 20000): self.__sampleLoop(0, Samples, 1) self.getSamples() if not self.__isCalibrated(): print("Connection error, failed to calibrate after 20,000 samples") return False if not self.__channels[self.__triggerChannel]: print("Error: Trigger channel not enabled.") return False return True def __processZeroCal(self, meas): """Adds raw measurement data to the zeroCal tracker meas: Zerocal measurements indexed by the global channel index scheme. """ self.__mainCal.addZeroCal(meas[self.__mainCoarseIndex], True) self.__mainCal.addZeroCal(meas[self.__mainFineIndex], False) self.__usbCal.addZeroCal(meas[self.__usbCoarseIndex], True) self.__usbCal.addZeroCal(meas[self.__usbFineIndex], False) self.__auxCal.addZeroCal(meas[self.__auxCoarseIndex], True) self.__auxCal.addZeroCal(meas[self.__auxFineIndex], False) return True def __processRefCal(self, meas): """Adds raw measurement data to the refcal tracker meas: RefCal measurements indexed by the global channel index scheme. """ self.__mainCal.addRefCal(meas[self.__mainCoarseIndex], True) self.__mainCal.addRefCal(meas[self.__mainFineIndex], False) self.__usbCal.addRefCal(meas[self.__usbCoarseIndex], True) self.__usbCal.addRefCal(meas[self.__usbFineIndex], False) self.__auxCal.addRefCal(meas[self.__auxCoarseIndex], True) self.__auxCal.addRefCal(meas[self.__auxFineIndex], False) return True def getSamples(self): """Returns samples in a Python list. Format is: [timestamp, main, usb, aux,mainVolts,usbVolts].""" result = self.__arrangeSamples(True) return result def __outputToCSV(self, callback=None): """This is intended to be called periodically during sampling. The alternative is to store measurements in an array or queue, which will overflow allocated memory within a few hours depending on system settings. Writes measurements to a CSV file""" output = self.__arrangeSamples() if callback: callback(output) for i in range(len(output[0])): sOut = "" for j in range(len(output)): sOut = sOut + repr(output[j][i]) + "," sOut = sOut + "\n" self.__f.write(sOut) def __outputToCSV2(self, output): """This is intended to be called periodically during sampling. The alternative is to store measurements in an array or queue, which will overflow allocated memory within a few hours depending on system settings. Writes measurements to a CSV file""" for i in range(len(output[0])): sOut = "" for j in range(len(output)): sOut = sOut + repr(output[j][i]) + "," sOut = sOut + "\n" self.__f.write(sOut) def __arrangeSamples(self, exportAllIndices=False): """Arranges output lists so they're a bit easier to process. exportAllIndices: Populates the list with every channel, even if no measurements are stored for that channel. Useful for making sure the indices in sampleEngine.channels match the output from this function.""" output = [] times = [] for data in self.__timeStamps: for measurement in data: times.append(measurement) output.append(times) self.__timeStamps = [] if (self.__channels[channels.MainCurrent] or exportAllIndices): main = [] for data in self.__mainCurrent: for measurement in data: main.append(measurement) output.append(main) self.__mainCurrent = [] if (self.__channels[channels.USBCurrent] or exportAllIndices): usb = [] for data in self.__usbCurrent: for measurement in data: usb.append(measurement) output.append(usb) self.__usbCurrent = [] if (self.__channels[channels.AuxCurrent] or exportAllIndices): Aux = [] for data in self.__auxCurrent: for measurement in data: Aux.append(measurement) output.append(Aux) self.__auxCurrent = [] if (self.__channels[channels.MainVoltage] or exportAllIndices): volts = [] for data in self.__mainVoltage: for measurement in data: volts.append(measurement) output.append(volts) self.__mainVoltage = [] if (self.__channels[channels.USBVoltage] or exportAllIndices): volts = [] for data in self.__usbVoltage: for measurement in data: volts.append(measurement) output.append(volts) self.__usbVoltage = [] return output def outputCSVHeaders(self): """Creates column headers in the CSV output file for each enabled channel.""" for i in range(len(self.__channelnames)): if (self.__channels[i]): self.__f.write((self.__channelnames[i] + ",")) self.__f.write("\n") def __sampleLoop(self, S, Samples, ProcessRate, legacy_timestamp=False): """ Collects and processes samples in batches. Numpy makes processing large numbers of samples in batches much faster than processing them as they're received. Useful in avoiding dropped samples. S: The number of samples in the current batch. Samples: An array that will be populated with samples. ProcessRate: Number of samples per batch. Should be a power of 2 for best results. legacy_timestamp: if true, use time.time() for timestamp instead of currentTime - startTime """ buffer = self.monsoon.BulkRead() for start in range(0, len(buffer), 64): if (self.__stopTriggerSet): break buf = buffer[start:start + 64] Sample = self.monsoon.swizzlePacket(buf) numSamples = Sample[2] if (legacy_timestamp): Sample.append(int(time.time())) else: Sample.append(time.time() - self.__startTime) Samples[S] = Sample S += numSamples if (S >= ProcessRate): bulkPackets = self.__processPacket(Samples) if (len(bulkPackets) > 0): self.__vectorProcess(bulkPackets) S = 0 return S def startSampling(self, samples=5000, granularity=1, legacy_timestamp=False, calTime=1250, output_callback=None): """Handle setup for sample collection. samples: Number of samples to collect, independent of the stop trigger. sampleEngine.triggers.SAMPLECOUNT_INFINITE to function solely through triggers. granularity: Samples to store. 1 = 1:1, 10 = store 1 out of every 10 samples, etc. legacy_timestamp: if true, use time.time() for timestamp instead of currentTime - startTime """ try: self.__Reset() self.__granularity = granularity self.__sampleLimit = samples Samples = [[0 for _ in range(self.__packetSize + 1)] for _ in range(self.bulkProcessRate)] S = 0 debugcount = 0 minutes = 0 granularity_index = 0 csvOutRateLimit = True csvOutThreshold = self.bulkProcessRate / 2 self.__startTime = time.time() if (self.__CSVOutEnable): self.outputCSVHeaders() self.monsoon.StartSampling(calTime, triggers.SAMPLECOUNT_INFINITE) if not self.__startupCheck(False): print("Error: startCheck failed") self.monsoon.stopSampling() while not self.__stopTriggerSet: S = self.__sampleLoop(S, Samples, self.bulkProcessRate, legacy_timestamp) if self.__startTriggerSet: output = self.__arrangeSamples() if S >= csvOutThreshold and self.__CSVOutEnable: self.__outputToCSV2(output) output_callback(output) if (S == 0): csvOutRateLimit = True Samples = [[0 for _ in range(self.__packetSize + 1)] for _ in range(self.bulkProcessRate)] self.monsoon.stopSampling() if (self.__CSVOutEnable): self.__outputToCSV() self.disableCSVOutput() except KeyboardInterrupt: print("Caught keyboard interrupt, test ending adruptly.") self.monsoon.stopSampling() if (self.__CSVOutEnable): self.__outputToCSV() self.disableCSVOutput() except usb.core.USBError: print("Caught disconnection event. Test restarting with default parameters") self.monsoon.Reconnect() self.monsoon.stopSampling() if (self.__CSVOutEnable): self.__outputToCSV() self.disableCSVOutput() self.enableCSVOutput(self.__outputFilename) self.startSampling(samples, granularity, legacy_timestamp, calTime, output_callback) except Exception as e: print("Error: Unknown exception caught. Test failed.") self.monsoon.stopSampling() if (self.__CSVOutEnable): self.__outputToCSV() self.disableCSVOutput() raise Exception(e) def periodicStartSampling(self, calTime=1250): """Causes the Power Monitor to enter sample mode, but doesn't actively collect samples. Call periodicCollectSamples() periodically get measurements. """ self.__Reset() self.__sampleLimit = triggers.SAMPLECOUNT_INFINITE self.__granularity = 1 if (self.__CSVOutEnable): self.outputCSVHeaders() Samples = [[0 for _ in range(self.__packetSize + 1)] for _ in range(self.bulkProcessRate)] self.__startTime = time.time() self.monsoon.StartSampling(calTime, triggers.SAMPLECOUNT_INFINITE) if not self.__startupCheck(): self.monsoon.stopSampling() return False result = self.getSamples() return result def periodicCollectSamples(self, samples=100, legacy_timestamp=False): """Start sampling with periodicStartSampling(), then call this to collect samples. Returns the most recent measurements made by the Power Monitor. samples: Number of samples to collect. legacy_timestamp: if true, use time.time() for timestamp instead of currentTime - startTime""" # TODO: This normally returns 3-5 samples over the requested number of samples. self.__sampleCount = 0 self.__sampleLimit = samples self.__stopTriggerSet = False self.monsoon.BulkRead() # Clear out stale buffer Samples = [[0 for _ in range(self.__packetSize + 1)] for _ in range(1)] while not self.__stopTriggerSet: S = self.__sampleLoop(0, Samples, 1, legacy_timestamp) if (self.__CSVOutEnable and self.__startTriggerSet): self.__outputToCSV() # Note that this will cause the script to return nothing. result = self.getSamples() return result def periodicStopSampling(self, closeCSV=False): """Performs cleanup tasks when finished sampling. closeCSV: Closes the CSV file along with exiting sample mode.""" if (self.__CSVOutEnable and self.__startTriggerSet): self.__outputToCSV() if (closeCSV): self.disableCSVOutput() self.monsoon.stopSampling()
{ "alphanum_fraction": 0.6107328526, "author": null, "avg_line_length": 43.5891812865, "converted": null, "ext": "py", "file": null, "hexsha": "ed36880f7cecc2cd7b4b9782ee538817bf3c1170", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "5e1b40cb2332073da6cd8a52bbba2712ae30f7bd", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "csarron/MobileAccelerator", "max_forks_repo_path": "energy/Monsoon/sampleEngine.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "5e1b40cb2332073da6cd8a52bbba2712ae30f7bd", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "csarron/MobileAccelerator", "max_issues_repo_path": "energy/Monsoon/sampleEngine.py", "max_line_length": 161, "max_stars_count": 2, "max_stars_repo_head_hexsha": "5e1b40cb2332073da6cd8a52bbba2712ae30f7bd", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "csarron/MobileAccelerator", "max_stars_repo_path": "energy/Monsoon/sampleEngine.py", "max_stars_repo_stars_event_max_datetime": "2021-10-29T22:49:09.000Z", "max_stars_repo_stars_event_min_datetime": "2021-07-25T01:10:03.000Z", "num_tokens": 6409, "path": null, "reason": "import numpy,import scipy", "repo": null, "save_path": null, "sha": null, "size": 29815 }
/* * Copyright 2016 The Cartographer Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cmath> #include <iostream> #include <string> #include <Eigen/Core> #include <Eigen/Geometry> #include "icp_localization/transform/math.hpp" #include "icp_localization/common/time.hpp" #include "icp_localization/common/output.hpp" #include "icp_localization/common/math.hpp" namespace icp_loco { template <typename FloatType> class Rigid3 { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW using Vector = Eigen::Matrix<FloatType, 3, 1>; using Quaternion = Eigen::Quaternion<FloatType>; using AngleAxis = Eigen::AngleAxis<FloatType>; Rigid3() : translation_(Vector::Zero()), rotation_(Quaternion::Identity()) {} Rigid3(const Vector& translation, const Quaternion& rotation) : translation_(translation), rotation_(rotation) {} Rigid3(const Vector& translation, const AngleAxis& rotation) : translation_(translation), rotation_(rotation) {} static Rigid3 Rotation(const AngleAxis& angle_axis) { return Rigid3(Vector::Zero(), Quaternion(angle_axis)); } static Rigid3 Rotation(const Quaternion& rotation) { return Rigid3(Vector::Zero(), rotation); } static Rigid3 Translation(const Vector& vector) { return Rigid3(vector, Quaternion::Identity()); } static Rigid3 FromArrays(const std::array<FloatType, 4>& rotation, const std::array<FloatType, 3>& translation) { return Rigid3(Eigen::Map<const Vector>(translation.data()), Eigen::Quaternion<FloatType>(rotation[0], rotation[1], rotation[2], rotation[3])); } static Rigid3<FloatType> Identity() { return Rigid3<FloatType>(); } template <typename OtherType> Rigid3<OtherType> cast() const { return Rigid3<OtherType>(translation_.template cast<OtherType>(), rotation_.template cast<OtherType>()); } const Vector& translation() const { return translation_; } const Quaternion& rotation() const { return rotation_; } Vector& translation() { return translation_; } Quaternion& rotation() { return rotation_; } Rigid3 inverse() const { const Quaternion rotation = rotation_.conjugate(); const Vector translation = -(rotation * translation_); return Rigid3(translation, rotation); } std::string asString() const { const double kRadToDeg = 180.0 / M_PI; const auto &t = translation(); const auto &q = rotation(); const std::string trans = string_format("t:[%f, %f, %f]", t.x(), t.y(), t.z()); const std::string rot = string_format("q:[%f, %f, %f, %f]",q.x(), q.y(), q.z(), q.w()); const auto rpy = toRPY(q) * kRadToDeg; const std::string rpyString = string_format("rpy (deg):[%f, %f, %f]",rpy.x(), rpy.y(),rpy.z()); return trans + " ; " + rot + " ; " + rpyString; } bool isValid() const { return !std::isnan(translation_.x()) && !std::isnan(translation_.y()) && !std::isnan(translation_.z()) && std::abs(FloatType(1) - rotation_.norm()) < FloatType(1e-3); } private: Vector translation_; Quaternion rotation_; }; template <typename FloatType> Rigid3<FloatType> operator*(const Rigid3<FloatType>& lhs, const Rigid3<FloatType>& rhs) { return Rigid3<FloatType>( lhs.rotation() * rhs.translation() + lhs.translation(), (lhs.rotation() * rhs.rotation()).normalized()); } template <typename FloatType> typename Rigid3<FloatType>::Vector operator*( const Rigid3<FloatType>& rigid, const typename Rigid3<FloatType>::Vector& point) { return rigid.rotation() * point + rigid.translation(); } using Rigid3d = Rigid3<double>; using Rigid3f = Rigid3<float>; // Converts (roll, pitch, yaw) to a unit length quaternion. Based on the URDF // specification http://wiki.ros.org/urdf/XML/joint. struct TimestampedTransform { Time time_; Rigid3d transform_; }; TimestampedTransform interpolate(const TimestampedTransform& start, const TimestampedTransform& end, const Time &time); } // namespace icp_loco
{ "alphanum_fraction": 0.6698697416, "author": null, "avg_line_length": 33.2127659574, "converted": null, "ext": "hpp", "file": null, "hexsha": "2743b1c2965fc356e2db5c413a6432a95c7538d7", "include": null, "lang": "C++", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 22, "max_forks_repo_forks_event_max_datetime": "2022-03-11T03:14:10.000Z", "max_forks_repo_forks_event_min_datetime": "2021-06-23T09:18:01.000Z", "max_forks_repo_head_hexsha": "271d99c59141fcd293190ec935020213783745e5", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "ibrahimhroob/icp_localization", "max_forks_repo_path": "include/icp_localization/transform/RigidTransform.hpp", "max_issues_count": 3, "max_issues_repo_head_hexsha": "271d99c59141fcd293190ec935020213783745e5", "max_issues_repo_issues_event_max_datetime": "2022-02-16T09:54:42.000Z", "max_issues_repo_issues_event_min_datetime": "2021-06-09T20:06:41.000Z", "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "ibrahimhroob/icp_localization", "max_issues_repo_path": "include/icp_localization/transform/RigidTransform.hpp", "max_line_length": 99, "max_stars_count": 72, "max_stars_repo_head_hexsha": "271d99c59141fcd293190ec935020213783745e5", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "ibrahimhroob/icp_localization", "max_stars_repo_path": "include/icp_localization/transform/RigidTransform.hpp", "max_stars_repo_stars_event_max_datetime": "2022-03-25T08:21:07.000Z", "max_stars_repo_stars_event_min_datetime": "2021-07-06T09:05:00.000Z", "num_tokens": 1127, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 4683 }
import numpy as np from scipy.optimize import minimize_scalar from astropy.modeling import models, fitting from astropy.io import fits from scipy.linalg import toeplitz, hankel def kronDecomp (P,*args): nargin = 1 + len(args) if (nargin < 2): print("Veuillez entrer le P et le Centre") if (nargin < 3): BC = zero #PSF im = fits.getdata("C:/Python37/pic.jpg") PSF = np.where(im == np.max(pic)) center1= int(PSF[0]) center2 = int(PSF[1]) [U,S,V] = np.linalg.svd(P, full_matrices=True) if (S[2,2]/S[1,1] > np.sqrt(eps)): print("Attention, le PSF n'est pas séparable") J = list(U) #transformation de tableau numpy en liste python G = list(V) #transformation de tableau numpy en liste python minU = abs(min(J[:,0])) maxU = max(abs(J[:,0])) if minU == maxU : U = -U V = -V c = np.sqrt(S[1,1]*U[:,1]) r = np.sqrt(S[1,1]*V[:,1]) if BC == 'zero': Ar = build_Topelitz (r, center2) Ac = build_Toeplitz (r, center1) elif BC == 'reflexive': Ar = build_Topelitz (r, center2) + buildHank(r, center2) Ac = build_Toeplitz (r, center1) + buildHank(r, center1) elif BC == 'periodic': Ar = buildCirc (r, center2) Ac = buildCirc (r, center1) else : print("Erreur, condition de bord invalide") return (Ar, Ac) def build_Topelitz (c, k): n = np.shape(c) col = np.zeros((n, 1), float) row = np.transpose(col) col[0:n-k,0] = c[k-1:n-1] row[0,0:k-1] = np.transpose(c[k-1:-2:1]) T = toeplitz(col, row) return (T) def buildCirc(c, k): n = np.shape(c) col = [c[k-1:n-1], c[0:k-1]] row = [np.transpose(c[k-1:-2:1]), np.transpose(c[n:-2:k+1])] C = toeplitz(col, row) return (C) def buildHank (c, k): n = np.shape(c) col = np.zeros((n, 1), float) col[0:n-k-1] = c[k:n-1] row = np.zeros((n, 1), float) row[n-k+1:n-1] = c[0:k-2] H = hankel(col, row) return (H)
{ "alphanum_fraction": 0.5500982318, "author": null, "avg_line_length": 22.1304347826, "converted": null, "ext": "py", "file": null, "hexsha": "769b70ab1787db2c5bf3b51a4bbd903fd745ad5e", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "0160dc4858f249655c5d16298ad1d7990600504f", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Hermann-web/deconvolution-methods-for-image-debluring", "max_forks_repo_path": "deconvolution-algorithms/Fonction KronDecomp.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "0160dc4858f249655c5d16298ad1d7990600504f", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Hermann-web/deconvolution-methods-for-image-debluring", "max_issues_repo_path": "deconvolution-algorithms/Fonction KronDecomp.py", "max_line_length": 67, "max_stars_count": null, "max_stars_repo_head_hexsha": "0160dc4858f249655c5d16298ad1d7990600504f", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Hermann-web/deconvolution-methods-for-image-debluring", "max_stars_repo_path": "deconvolution-algorithms/Fonction KronDecomp.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 710, "path": null, "reason": "import numpy,from scipy,from astropy", "repo": null, "save_path": null, "sha": null, "size": 2036 }
import Playground.Data.BinaryTree.Basic import Playground.Data.List.Is_Sorted import Mathlib.Data.List.Perm import Mathlib.Init.Algebra.Order namespace Data.BinaryTree section variable {type} [LinearOrder type] inductive IsBST : BinaryTree type → Prop | nil : nil.IsBST | node {value left right} (left_IsBST : left.IsBST) (left_le : ∀ x ∈ left, x ≤ value) (right_IsBST : right.IsBST) (right_ge : ∀ x ∈ right, value ≤ x) : (node value left right).IsBST theorem IsBST_iff (value : type) (left right) : (node value left right).IsBST ↔ (left.IsBST ∧ ∀ x ∈ left, x ≤ value) ∧ (right.IsBST ∧ ∀ x ∈ right, value ≤ x) := by constructor . intro h; cases h with | node _ _ _ _ => constructor . constructor; assumption; assumption . constructor; assumption; assumption . intro ⟨⟨_, _⟩, ⟨_, _⟩⟩ apply IsBST.node; assumption; assumption; assumption; assumption theorem IsBST_iff_infixList_IsSorted (tree : BinaryTree type) : tree.IsBST ↔ tree.infixList.IsSorted := by induction tree with | nil => constructor . intro _; constructor . intro _; constructor | node value left right left_rec right_rec => rw [IsBST_iff] show _ ↔ (left.infixList ++ value :: right.infixList).IsSorted rw [List.append_cons_IsSorted_iff_left_IsSorted_and_forall_le_and_right_IsSorted_and_forall_ge] rw [left_rec, left.forall_iff_forall_infixList] rw [right_rec, right.forall_iff_forall_infixList] end section variable {type} [LinearOrder type] def insertAsInBST (tree : BinaryTree type) (x : type) : BinaryTree type := match tree with | nil => node x nil nil | node value left right => if x ≤ value then node value (left.insertAsInBST x) right else node value left (right.insertAsInBST x) theorem Mem_insertAsInBST_iff (tree : BinaryTree type) (v : type) (x : type) : x ∈ tree.insertAsInBST v ↔ x ∈ tree ∨ x = v := match tree with | nil => ⟨λ (.eq hx) => .inr hx, λ (.inr hx) => .eq hx⟩ | node value left right => have left_rec : x ∈ _ ↔ x ∈ _ ∨ x = v := left.Mem_insertAsInBST_iff _ _ have right_rec : x ∈ _ ↔ x ∈ _ ∨ x = v := right.Mem_insertAsInBST_iff _ _ show x ∈ ite .. ↔ _ from if h_v : v ≤ value then if_pos h_v ▸ ⟨ λ | .eq hx => .inl (Mem.eq hx) | .left hx => match left_rec.mp hx with | .inl hx => .inl hx.left | .inr hx => .inr hx | .right hx => .inl hx.right , λ | .inl hx => match hx with | .eq hx => .eq hx | .left hx => .left $ left_rec.mpr $ .inl hx | .right hx => hx.right | .inr hx => .left $ left_rec.mpr $ .inr hx⟩ else if_neg h_v ▸ ⟨ λ | .eq hx => .inl (Mem.eq hx) | .left hx => .inl hx.left | .right hx => match right_rec.mp hx with | .inl hx => .inl hx.right | .inr hx => .inr hx , λ | .inl hx => match hx with | .eq hx => .eq hx | .left hx => hx.left | .right hx => .right $ right_rec.mpr $ .inl hx | .inr hx => .right $ right_rec.mpr $ .inr hx⟩ theorem insertAsInBST_IsBST_of_IsBST {tree : BinaryTree type} (h_tree : tree.IsBST) (x : type) : (tree.insertAsInBST x).IsBST := match tree, h_tree with | nil, _ => .node .nil (λ _ => λ.) .nil (λ _ => λ.) | node value left right, .node left_IsBST left_le right_IsBST right_ge => show IsBST (ite ..) from if h_value : x ≤ value then if_pos h_value ▸ have left_IsBST := left.insertAsInBST_IsBST_of_IsBST left_IsBST x have left_le := λ x hx => match (left.Mem_insertAsInBST_iff ..).mp hx with | .inl hx => left_le x hx | .inr hx => hx ▸ h_value .node left_IsBST left_le right_IsBST right_ge else if_neg h_value ▸ have h_value := le_of_lt (lt_of_not_ge h_value) have right_IsBST := right.insertAsInBST_IsBST_of_IsBST right_IsBST x have right_ge := λ x hx => match (right.Mem_insertAsInBST_iff ..).mp hx with | .inl hx => right_ge x hx | .inr hx => hx ▸ h_value .node left_IsBST left_le right_IsBST right_ge theorem insertAsInBST_infixList_Perm_cons_infixList (tree : BinaryTree type) (x : type) : (tree.insertAsInBST x).infixList ~ x :: tree.infixList := open List in match tree with | nil => List.Perm.refl [x] | node value _ _ => show infixList (ite ..) ~ _ from if h_value : x ≤ value then if_pos h_value ▸ (insertAsInBST_infixList_Perm_cons_infixList ..).append_right _ else if_neg h_value ▸ ((((insertAsInBST_infixList_Perm_cons_infixList ..).cons _).trans (List.Perm.swap ..).symm).append_left _).trans List.perm_middle def searchAsInBST (tree : BinaryTree type) (x : type) : Bool := match tree with | .nil => false | .node value left right => if value = x then true else if x < value then left.searchAsInBST x else right.searchAsInBST x theorem searchAsInBST_eq_decide_Mem_of_IsBST {tree : BinaryTree type} (h_tree : tree.IsBST) (x : type) : tree.searchAsInBST x = decide (x ∈ tree) := match tree, h_tree with | nil, _ => rfl | node value left right, .node left_is_bst left_le right_is_bst right_ge => show ite .. = ite .. from if h_e : value = x then if_pos h_e ▸ if_pos (Mem.eq h_e.symm) ▸ rfl else if h_l : x < value then if_neg h_e ▸ if_pos h_l ▸ searchAsInBST_eq_decide_Mem_of_IsBST left_is_bst _ ▸ show ite .. = _ from if v_in_left : x ∈ left then if_pos v_in_left ▸ if_pos v_in_left.left ▸ rfl else if_neg v_in_left ▸ if_neg (show ¬x ∈ node value left right from λ | .eq h => h_e h.symm | .left h => v_in_left h | .right h => not_le_of_gt h_l $ right_ge x h ) ▸ rfl else have h_r : value < x := lt_of_le_of_ne (le_of_not_lt h_l) h_e if_neg h_e ▸ if_neg h_l ▸ searchAsInBST_eq_decide_Mem_of_IsBST right_is_bst _ ▸ show ite .. = _ from if v_in_right : x ∈ right then if_pos v_in_right ▸ if_pos v_in_right.right ▸ rfl else if_neg v_in_right ▸ if_neg (show ¬x ∈ node value left right from λ | .eq h => h_e h.symm | .left h => not_le_of_gt h_r $ left_le x h | .right h => v_in_right h ) ▸ rfl end end Data.BinaryTree
{ "alphanum_fraction": null, "author": "michelsol", "avg_line_length": null, "converted": null, "ext": null, "file": null, "hexsha": null, "include": null, "lang": null, "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": null, "max_forks_repo_licenses": null, "max_forks_repo_name": null, "max_forks_repo_path": null, "max_issues_count": null, "max_issues_repo_head_hexsha": null, "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": null, "max_issues_repo_name": null, "max_issues_repo_path": null, "max_line_length": null, "max_stars_count": null, "max_stars_repo_head_hexsha": null, "max_stars_repo_licenses": null, "max_stars_repo_name": null, "max_stars_repo_path": null, "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": null, "path": "github-repos/lean/michelsol-lean-playground/lean-playground-0bfffb7bd41729fb9f95974e93f6ecbc0b6e59ca/Playground/Data/BinaryTree/IsBST.lean", "reason": null, "repo": "lean-playground", "save_path": "github-repos/lean/michelsol-lean-playground", "sha": "0bfffb7bd41729fb9f95974e93f6ecbc0b6e59ca", "size": null }
\documentclass{article} \title{Mystery Readme} \date{\today} \author{Paul Jones\\ Computer Architecture (01:198:211) \\ School of Arts and Sciences \\ Rutgers University} \begin{document} \maketitle \section{My Process} The way that I worked out what \texttt{mystery.s} does by first creating a \texttt{mystery.c} file, and commenting in all of \texttt{mystery.s}. This allowed me to interact directly with the assembly code, adding comments as insights came to me and tracing through it. I also compiled an executable and gathered information about the function calls and logic by performing an \texttt{objdump} and \texttt{readelf} on them. This allowed me to see some of the logic flow and function names. With this information and my own understanding of the \texttt{mystery.s} file from my \texttt{mystery.c} comment (which have been handed in for reference) allowed me to fully reproduce the assembly code in C (``Decompiling''). \section{Compiler Optimization} While it is impossible to irrefutable ascertain what the original C code was from the compiled assembly, it is highly likely that there were more variables than there were blocks of memory claimed for use by the program. The reason this is possible is because the compiler knows ahead of time how many registers will be required, and can (usually) make the number of registers less than the total number of variables in the program, and move the literals on and off as they are needed. This made it frustrating to read because sometimes registers previously used for some other purpose suddenly become re-commissioned for a new purpose (and then back again). \end{document}
{ "alphanum_fraction": 0.7887748944, "author": null, "avg_line_length": 42.4871794872, "converted": null, "ext": "tex", "file": null, "hexsha": "5b4d2d664d00447ea3f2fa0f82dfbdc3e2cff650", "include": null, "lang": "TeX", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 14, "max_forks_repo_forks_event_max_datetime": "2018-07-23T15:12:55.000Z", "max_forks_repo_forks_event_min_datetime": "2016-04-28T15:35:11.000Z", "max_forks_repo_head_hexsha": "8c6dfcdcf86b1cb2c3742d8dbe228ae1c13a8377", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "PLJNS/Rutgers-Comp-Arch-Spring-2013", "max_forks_repo_path": "Assembly Language Programming/Mystery/Paul Jones - Mystery Readme/readme.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "8c6dfcdcf86b1cb2c3742d8dbe228ae1c13a8377", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "PLJNS/Rutgers-Comp-Arch-Spring-2013", "max_issues_repo_path": "Assembly Language Programming/Mystery/Paul Jones - Mystery Readme/readme.tex", "max_line_length": 109, "max_stars_count": 3, "max_stars_repo_head_hexsha": "8c6dfcdcf86b1cb2c3742d8dbe228ae1c13a8377", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "PLJNS/Rutgers-Comp-Arch-Spring-2013", "max_stars_repo_path": "Assembly Language Programming/Mystery/Paul Jones - Mystery Readme/readme.tex", "max_stars_repo_stars_event_max_datetime": "2020-10-31T12:07:07.000Z", "max_stars_repo_stars_event_min_datetime": "2016-03-27T21:34:24.000Z", "num_tokens": 379, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 1657 }
#coding:utf-8 import numpy as np from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.datasets import load_iris import matplotlib.pyplot as plt ''' author: heucoder email: 812860165@qq.com date: 2019.6.13 ''' def lda(data, target, n_dim): ''' :param data: (n_samples, n_features) :param target: data class :param n_dim: target dimension :return: (n_samples, n_dims) ''' clusters = np.unique(target) if n_dim > len(clusters)-1: print("K is too much") print("please input again") exit(0) #within_class scatter matrix Sw = np.zeros((data.shape[1],data.shape[1])) for i in clusters: datai = data[target == i] datai = datai-datai.mean(0) Swi = np.mat(datai).T*np.mat(datai) Sw += Swi #between_class scatter matrix SB = np.zeros((data.shape[1],data.shape[1])) u = data.mean(0) #所有样本的平均值 for i in clusters: Ni = data[target == i].shape[0] ui = data[target == i].mean(0) #某个类别的平均值 SBi = Ni*np.mat(ui - u).T*np.mat(ui - u) SB += SBi S = np.linalg.inv(Sw)*SB eigVals,eigVects = np.linalg.eig(S) #求特征值,特征向量 eigValInd = np.argsort(eigVals) eigValInd = eigValInd[:(-n_dim-1):-1] w = eigVects[:,eigValInd] data_ndim = np.dot(data, w) return data_ndim if __name__ == '__main__': iris = load_iris() X = iris.data Y = iris.target data_1 = lda(X, Y, 2) data_2 = LinearDiscriminantAnalysis(n_components=2).fit_transform(X, Y) plt.figure(figsize=(8,4)) plt.subplot(121) plt.title("my_LDA") plt.scatter(data_1[:, 0], data_1[:, 1], c = Y) plt.subplot(122) plt.title("sklearn_LDA") plt.scatter(data_2[:, 0], data_2[:, 1], c = Y) plt.savefig("LDA.png") plt.show()
{ "alphanum_fraction": 0.611784141, "author": null, "avg_line_length": 25.2222222222, "converted": null, "ext": "py", "file": null, "hexsha": "b4ef81d1db0dc72090336114664cf1af538b7764", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 538, "max_forks_repo_forks_event_max_datetime": "2022-03-30T13:25:15.000Z", "max_forks_repo_forks_event_min_datetime": "2019-06-14T00:25:41.000Z", "max_forks_repo_head_hexsha": "59bcabbc9adcb62cec75a3ffe64472fa947e0945", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "paopaoai11/dimensionality_reduction_alo_codes", "max_forks_repo_path": "codes/LDA/LDA.py", "max_issues_count": 3, "max_issues_repo_head_hexsha": "59bcabbc9adcb62cec75a3ffe64472fa947e0945", "max_issues_repo_issues_event_max_datetime": "2022-03-23T12:10:57.000Z", "max_issues_repo_issues_event_min_datetime": "2020-03-07T02:36:49.000Z", "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "paopaoai11/dimensionality_reduction_alo_codes", "max_issues_repo_path": "codes/LDA/LDA.py", "max_line_length": 75, "max_stars_count": 1729, "max_stars_repo_head_hexsha": "c5611fb96796ca437e8827cbe6c7c5eabd4f7c23", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "lieutenant-Greenhand/dimensionality_reduction_alo_codes", "max_stars_repo_path": "codes/LDA/LDA.py", "max_stars_repo_stars_event_max_datetime": "2022-03-31T05:40:47.000Z", "max_stars_repo_stars_event_min_datetime": "2019-06-16T08:59:39.000Z", "num_tokens": 587, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 1816 }
import numpy as np def get_src_indices_by_row(row_idxs, shape, flat=True): """ Provide the src_indices when connecting a vectorized variable from an output to an input. Indices are selected by choosing the first indices to be passed, corresponding to node index in Dymos. Parameters ---------- row_idxs : array_like The rows/node indices to be connected from the source to the target. shape : tuple The shape of the variable at each node (ignores the first dimension). flat : bool If True, return the source indices in flat source indices form. Returns ------- array_like If flat, a numpy array of shape `(row_idxs,) + shape` where each element is the index of the source of that element in the source array, in C-order. """ if not flat: raise NotImplementedError('Currently get_src_indices_by_row only returns ' 'flat source indices.') num_src_rows = np.max(row_idxs) + 1 src_shape = (num_src_rows,) + shape other_idxs = [np.arange(n, dtype=int) for n in shape] ixgrid = np.ix_(row_idxs, *other_idxs) a = np.reshape(np.arange(np.prod(src_shape), dtype=int), newshape=src_shape) return a[ixgrid]
{ "alphanum_fraction": 0.6621835443, "author": null, "avg_line_length": 35.1111111111, "converted": null, "ext": "py", "file": null, "hexsha": "9176a851c4dedb1b7b2e44a3db85bdd6b64f6aa7", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 46, "max_forks_repo_forks_event_max_datetime": "2021-12-19T07:23:32.000Z", "max_forks_repo_forks_event_min_datetime": "2018-06-27T20:54:07.000Z", "max_forks_repo_head_hexsha": "602109eee4a1b061444dd2b45c7b1ed0ac1aa0f4", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "yonghoonlee/dymos", "max_forks_repo_path": "dymos/utils/indexing.py", "max_issues_count": 628, "max_issues_repo_head_hexsha": "602109eee4a1b061444dd2b45c7b1ed0ac1aa0f4", "max_issues_repo_issues_event_max_datetime": "2022-03-31T19:24:32.000Z", "max_issues_repo_issues_event_min_datetime": "2018-06-27T20:32:59.000Z", "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "yonghoonlee/dymos", "max_issues_repo_path": "dymos/utils/indexing.py", "max_line_length": 93, "max_stars_count": 104, "max_stars_repo_head_hexsha": "602109eee4a1b061444dd2b45c7b1ed0ac1aa0f4", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "yonghoonlee/dymos", "max_stars_repo_path": "dymos/utils/indexing.py", "max_stars_repo_stars_event_max_datetime": "2022-03-10T23:35:30.000Z", "max_stars_repo_stars_event_min_datetime": "2018-09-08T16:52:27.000Z", "num_tokens": 288, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 1264 }
r""" ISGCI: Information System on Graph Classes and their Inclusions This module implements an interface to the `ISGCI <http://www.graphclasses.org/>`_ database in Sage. This database gathers information on graph classes and their inclusions in each other. It also contains information on the complexity of several computational problems. It is available on the `GraphClasses.org <http://www.graphclasses.org/>`_ website maintained by H.N. de Ridder et al. How to use it? -------------- Presently, it is possible to use this database through the variables and methods present in the :obj:`graph_classes <GraphClasses>` object. For instance:: sage: Trees = graph_classes.Tree sage: Chordal = graph_classes.Chordal Inclusions ^^^^^^^^^^ It is then possible to check the inclusion of classes inside of others, if the information is available in the database:: sage: Trees <= Chordal True And indeed, trees are chordal graphs. The ISGCI database is not all-knowing, and so comparing two classes can return ``True``, ``False``, or ``Unknown`` (see the :mod:`documentation of the Unknown truth value <sage.misc.unknown>`). An *unknown* answer to ``A <= B`` only means that ISGCI cannot deduce from the information in its database that ``A`` is a subclass of ``B`` nor that it is not. For instance, ISGCI does not know at the moment that some chordal graphs are not trees:: sage: graph_classes.Chordal <= graph_classes.Tree Unknown Descriptions ^^^^^^^^^^^^ Given a graph class, one can obtain its associated information in the ISGCI database with the :meth:`~GraphClass.description` method:: sage: Chordal.description() Class of graphs : Chordal ------------------------- id : gc_32 name : chordal type : base <BLANKLINE> Problems : ----------- 3-Colourability : Linear Clique : Polynomial Clique cover : Polynomial Cliquewidth : Unbounded Cliquewidth expression : NP-complete Colourability : Linear Cutwidth : NP-complete Domination : NP-complete Feedback vertex set : Polynomial Hamiltonian cycle : NP-complete Hamiltonian path : NP-complete Independent set : Linear Maximum bisection : Unknown Maximum cut : NP-complete Minimum bisection : Unknown Recognition : Linear Treewidth : Polynomial Weighted clique : Polynomial Weighted feedback vertex set : Unknown Weighted independent set : Linear It is possible to obtain the complete list of the classes stored in ISGCI by calling the :meth:`~GraphClasses.show_all` method (beware -- long output):: sage: graph_classes.show_all() id | name | type | smallgraph ---------------------------------------------------------------------------------------------------------------------- gc_309 | $K_4$--minor--free | base | gc_541 | $N^*$ | base | gc_215 | $N^*$--perfect | base | gc_5 | $P_4$--bipartite | base | gc_3 | $P_4$--brittle | base | gc_6 | $P_4$--comparability | base | gc_7 | $P_4$--extendible | base | ... Until a proper search method is implemented, this lets one find classes which do not appear in :obj:`graph_classes.* <GraphClasses>`. To retrieve a class of graph from its ISGCI ID one may use the :meth:`~GraphClasses.get_class` method:: sage: GC = graph_classes.get_class("gc_5") sage: GC $P_4$--bipartite graphs Recognition of graphs ^^^^^^^^^^^^^^^^^^^^^ The graph classes represented by the ISGCI database can alternatively be used to access recognition algorithms. For instance, in order to check that a given graph is a tree one has the following the options :: sage: graphs.PathGraph(5) in graph_classes.Tree True or:: sage: graphs.PathGraph(5).is_tree() True Furthermore, all ISGCI graph classes which are defined by the exclusion of a finite sequence of induced subgraphs benefit from a generic recognition algorithm. For instance :: sage: g = graphs.PetersenGraph() sage: g in graph_classes.ClawFree False sage: g.line_graph() in graph_classes.ClawFree True Or directly from ISGCI :: sage: gc = graph_classes.get_class("gc_441") sage: gc diamond--free graphs sage: graphs.PetersenGraph() in gc True Predefined classes ------------------ :obj:`graph_classes <GraphClasses>` currently predefines the following graph classes .. list-table:: :widths: 20 30 :header-rows: 1 * - Class - Related methods * - Apex - :meth:`~Graph.is_apex()`, :meth:`~Graph.apex_vertices()` * - AT_free - :meth:`~Graph.is_asteroidal_triple_free` * - Biconnected - :meth:`~Graph.is_biconnected`, :meth:`~GenericGraph.blocks_and_cut_vertices`, :meth:`~GenericGraph.blocks_and_cuts_tree` * - BinaryTrees - :meth:`~sage.graphs.graph_generators.GraphGenerators.BalancedTree`, :meth:`~Graph.is_tree` * - Bipartite - :meth:`~sage.graphs.graph_generators.GraphGenerators.BalancedTree`, :meth:`~sage.graphs.graph.Graph.is_bipartite` * - Block - :meth:`~sage.graphs.graph.Graph.is_block_graph`, :meth:`~sage.graphs.generic_graph.GenericGraph.blocks_and_cut_vertices`, :meth:`~sage.graphs.graph_generators.GraphGenerators.RandomBlockGraph` * - Chordal - :meth:`~sage.graphs.generic_graph.GenericGraph.is_chordal` * - Claw-Free - :meth:`~sage.graphs.graph_generators.GraphGenerators.ClawGraph` * - Comparability - * - Gallai - :meth:`~sage.graphs.generic_graph.GenericGraph.is_gallai_tree` * - Grid - :meth:`~sage.graphs.graph_generators.GraphGenerators.Grid2dGraph`, :meth:`~sage.graphs.graph_generators.GraphGenerators.GridGraph` * - Interval - :meth:`~sage.graphs.graph_generators.GraphGenerators.RandomIntervalGraph`, :meth:`~sage.graphs.graph_generators.GraphGenerators.IntervalGraph`, :meth:`~sage.graphs.generic_graph.GenericGraph.is_interval` * - Line - :meth:`~sage.graphs.graph_generators.GraphGenerators.line_graph_forbidden_subgraphs`, :meth:`~sage.graphs.graph.Graph.is_line_graph` * - Modular - :meth:`~sage.graphs.graph.Graph.modular_decomposition` * - Outerplanar - :meth:`~sage.graphs.generic_graph.GenericGraph.is_circular_planar` * - Perfect - :meth:`~sage.graphs.graph.Graph.is_perfect` * - Planar - :meth:`~sage.graphs.generic_graph.GenericGraph.is_planar` * - Polyhedral - :meth:`~sage.graphs.generic_graph.Graph.is_polyhedral` * - Split - :meth:`~sage.graphs.graph.Graph.is_split` * - Tree - :meth:`~sage.graphs.graph_generators.GraphGenerators.trees`, :meth:`~Graph.is_tree` * - UnitDisk - :meth:`~sage.graphs.graph_generators.GraphGenerators.IntervalGraph` * - UnitInterval - :meth:`~sage.graphs.generic_graph.GenericGraph.is_interval` Sage's view of ISGCI -------------------- The database is stored by Sage in two ways. **The classes**: the list of all graph classes and their properties is stored in a huge dictionary (see :meth:`~sage.graphs.isgci.GraphClasses.classes`). Below is what Sage knows of ``gc_249``:: sage: graph_classes.classes()['gc_249'] # random {'problem': {'Independent set': 'Polynomial', 'Treewidth': 'Unknown', 'Weighted independent set': 'Polynomial', 'Cliquewidth expression': 'NP-complete', 'Weighted clique': 'Polynomial', 'Clique cover': 'Unknown', 'Domination': 'NP-complete', 'Clique': 'Polynomial', 'Colourability': 'NP-complete', 'Cliquewidth': 'Unbounded', '3-Colourability': 'NP-complete', 'Recognition': 'Linear'}, 'type': 'base', 'id': 'gc_249', 'name': 'line'} **The class inclusion digraph**: Sage remembers the class inclusions through the inclusion digraph (see :meth:`~sage.graphs.isgci.GraphClasses.inclusion_digraph`). Its nodes are ID of ISGCI classes:: sage: d = graph_classes.inclusion_digraph() sage: d.vertices()[-10:] ['gc_990', 'gc_991', 'gc_992', 'gc_993', 'gc_994', 'gc_995', 'gc_996', 'gc_997', 'gc_998', 'gc_999'] An arc from ``gc1`` to ``gc2`` means that ``gc1`` is a superclass of ``gc2``. This being said, not all edges are stored ! To ensure that a given class is included in another one, we have to check whether there is in the digraph a ``path`` from the first one to the other:: sage: bip_id = graph_classes.Bipartite._gc_id sage: perfect_id = graph_classes.Perfect._gc_id sage: d.has_edge(perfect_id, bip_id) False sage: d.distance(perfect_id, bip_id) 2 Hence bipartite graphs are perfect graphs. We can see how ISGCI obtains this result :: sage: p = d.shortest_path(perfect_id, bip_id) sage: len(p) - 1 2 sage: print(p) # random ['gc_56', 'gc_76', 'gc_69'] sage: for c in p: ....: print(graph_classes.get_class(c)) perfect graphs ... bipartite graphs What ISGCI knows is that perfect graphs contain unimodular graph which contain bipartite graphs. Therefore bipartite graphs are perfect ! .. note:: The inclusion digraph is **NOT ACYCLIC**. Indeed, several entries exist in the ISGCI database which represent the same graph class, for instance Perfect graphs and Berge graphs:: sage: graph_classes.inclusion_digraph().is_directed_acyclic() False sage: Berge = graph_classes.get_class("gc_274"); Berge Berge graphs sage: Perfect = graph_classes.get_class("gc_56"); Perfect perfect graphs sage: Berge <= Perfect True sage: Perfect <= Berge True sage: Perfect == Berge True Information for developpers ---------------------------- * The database is loaded not *so* large, but it is still preferable to only load it on demand. This is achieved through the cached methods :meth:`~sage.graphs.isgci.GraphClasses.classes` and :meth:`~sage.graphs.isgci.GraphClasses.inclusion_digraph`. * Upon the first access to the database, the information is extracted from the XML file and stored in the cache of three methods: * ``sage.graphs.isgci._classes`` (dictionary) * ``sage.graphs.isgci._inclusions`` (list of dictionaries) * ``sage.graphs.isgci._inclusion_digraph`` (DiGraph) Note that the digraph is only built if necessary (for instance if the user tries to compare two classes). .. TODO:: Technical things: * Query the database for non-inclusion results so that comparisons can return ``False``, and implement strict inclusions. * Implement a proper search method for the classes not listed in :obj:`graph_classes <GraphClasses>` .. SEEALSO:: :func:`sage.graphs.isgci.show_all`. * Some of the graph classes appearing in :obj:`graph_classes <GraphClasses>` already have a recognition algorithm implemented in Sage. It would be so nice to be able to write ``g in Trees``, ``g in Perfect``, ``g in Chordal``, ... :-) Long-term stuff: * Implement simple accessors for all the information in the ISGCI database (as can be done from the website) * Implement intersection of graph classes * Write generic recognition algorithms for specific classes (when a graph class is defined by the exclusion of subgraphs, one can write a generic algorithm checking the existence of each of the graphs, and this method already exists in Sage). * Improve the performance of Sage's graph library by letting it take advantage of the properties of graph classes. For example, :meth:`Graph.independent_set` could use the library to detect that a given graph is, say, a tree or a planar graph, and use a specialized algorithm for finding an independent set. AUTHORS: -------- * H.N. de Ridder et al. (ISGCI database) * Nathann Cohen (Sage implementation) Methods ------- """ from __future__ import print_function from six import itervalues from sage.structure.sage_object import SageObject from sage.structure.unique_representation import CachedRepresentation, UniqueRepresentation from sage.misc.unknown import Unknown from sage.env import GRAPHS_DATA_DIR import six #***************************************************************************** # Copyright (C) 2011 Nathann Cohen <nathann.cohen@gmail.com> # # Distributed under the terms of the GNU General Public License (GPL) # http://www.gnu.org/licenses/ #***************************************************************************** _XML_FILE = "isgci_sage.xml" _SMALLGRAPHS_FILE = "smallgraphs.txt" class GraphClass(SageObject, CachedRepresentation): r""" An instance of this class represents a Graph Class, matching some entry in the ISGCI database. EXAMPLES: Testing the inclusion of two classes:: sage: Chordal = graph_classes.Chordal sage: Trees = graph_classes.Tree sage: Trees <= Chordal True sage: Chordal <= Trees Unknown TESTS:: sage: Trees >= Chordal Unknown sage: Chordal >= Trees True """ def __init__(self, name, gc_id, recognition_function = None): r""" Class constructor INPUT: - ``gc_id`` -- the ISGCI class ID - ``recognition_function`` -- a function of one argument `g`, which return boolean answers to the question : *does ``g`` belong to the class represented by ``gc_id`` ?* EXAMPLES:: sage: graph_classes.Chordal # indirect doctest Chordal graphs """ self._name = name self._gc_id = gc_id if not recognition_function is None: self._recognition_function = recognition_function def _repr_(self): r""" Returns a short description of the class EXAMPLES:: sage: graph_classes.Chordal # indirect doctest Chordal graphs """ return self._name+" graphs" def __hash__(self): r""" Returns the class' ID hash EXAMPLES:: sage: hash(graph_classes.Chordal) == hash(graph_classes.Chordal) True """ return hash(self._gc_id) def __le__(self, other): r""" <= operator EXAMPLES:: sage: graph_classes.Chordal <= graph_classes.Tree Unknown """ return other >= self def __ge__(self, other): r""" >= operator EXAMPLES:: sage: graph_classes.Chordal >= graph_classes.Tree True """ inclusion_digraph = GraphClasses().inclusion_digraph() if inclusion_digraph.shortest_path(self._gc_id,other._gc_id) != []: return True else: return Unknown def __eq__(self, other): r""" == operator EXAMPLES:: sage: graph_classes.Chordal == graph_classes.Tree Unknown """ return self >= other and other >= self def __lt__(self, other): r""" >, !=, and < operators EXAMPLES:: sage: graph_classes.Chordal > graph_classes.Tree Traceback (most recent call last): ... NotImplementedError sage: graph_classes.Chordal < graph_classes.Tree Traceback (most recent call last): ... NotImplementedError sage: graph_classes.Chordal != graph_classes.Tree Traceback (most recent call last): ... NotImplementedError """ raise NotImplementedError __gt__ = __ne__ = __lt__ def forbidden_subgraphs(self): r""" Returns the list of forbidden induced subgraphs defining the class. If the graph class is not defined by a *finite* list of forbidden induced subgraphs, ``None`` is returned instead. EXAMPLES:: sage: graph_classes.Perfect.forbidden_subgraphs() sage: gc = graph_classes.get_class('gc_62') sage: gc claw--free graphs sage: gc.forbidden_subgraphs() [Graph on 4 vertices] sage: gc.forbidden_subgraphs()[0].is_isomorphic(graphs.ClawGraph()) True """ classes = GraphClasses().classes() gc = classes[self._gc_id] if gc.get("type",None) != "forbidden": return None excluded = gc.get("smallgraph", None) if not excluded: return None if not isinstance(excluded,list): excluded = [excluded] smallgraphs = GraphClasses().smallgraphs() if not all(g in smallgraphs for g in excluded): return None return [smallgraphs[g] for g in excluded] def __contains__(self, g): r""" Tests if ``g`` belongs to the graph class represented by ``self``. EXAMPLES:: sage: graphs.CompleteBipartiteGraph(3,3) in graph_classes.Bipartite True sage: graphs.CompleteGraph(4) in graph_classes.Chordal True sage: graphs.CompleteGraph(4) in graph_classes.Comparability True sage: graphs.CompleteGraph(4) in graph_classes.Interval True sage: graphs.CompleteGraph(4) in graph_classes.Line True sage: graphs.CompleteGraph(4) in graph_classes.Perfect True sage: graphs.CompleteGraph(4) in graph_classes.Planar True sage: graphs.CompleteGraph(4) in graph_classes.Split True sage: graphs.PathGraph(4) in graph_classes.Tree True """ from sage.graphs.graph import Graph if not isinstance(g, Graph): return False if hasattr(self, "_recognition_function"): return self._recognition_function(g) excluded = self.forbidden_subgraphs() if excluded is None: raise NotImplementedError("No recognition algorithm is available "+ "for this class.") for gg in excluded: if g.subgraph_search(gg, induced = True): return False return True def description(self): r""" Prints the information of ISGCI about the current class. EXAMPLES:: sage: graph_classes.Chordal.description() Class of graphs : Chordal ------------------------- id : gc_32 name : chordal type : base <BLANKLINE> Problems : ----------- 3-Colourability : Linear Clique : Polynomial Clique cover : Polynomial Cliquewidth : Unbounded Cliquewidth expression : NP-complete Colourability : Linear Cutwidth : NP-complete Domination : NP-complete Feedback vertex set : Polynomial Hamiltonian cycle : NP-complete Hamiltonian path : NP-complete Independent set : Linear Maximum bisection : Unknown Maximum cut : NP-complete Minimum bisection : Unknown Recognition : Linear Treewidth : Polynomial Weighted clique : Polynomial Weighted feedback vertex set : Unknown Weighted independent set : Linear """ classes = GraphClasses().classes() cls = classes[self._gc_id] print("Class of graphs : "+self._name) print("-" * (len(self._name)+18)) for key, value in sorted(cls.items()): if value != "" and key != "problem": print("{:30} : {}".format(key, value)) print("\nProblems :") print("-" * 11) for pbname,data in sorted(cls["problem"].items()): if "complexity" in data: print("{:30} : {}".format(pbname, data["complexity"])) from sage.misc.cachefunc import cached_method class GraphClasses(UniqueRepresentation): def get_class(self, id): r""" Returns the class corresponding to the given id in the ISGCI database. INPUT: - ``id`` (string) -- the desired class' ID .. SEEALSO:: :meth:`~sage.graphs.isgci.GraphClasses.show_all` EXAMPLES: With an existing id:: sage: Cographs = graph_classes.get_class("gc_151") sage: Cographs cograph graphs With a wrong id:: sage: graph_classes.get_class(-1) Traceback (most recent call last): ... ValueError: The given class id does not exist in the ISGCI database. Is the db too old ? You can update it with graph_classes.update_db(). """ classes = self.classes() if id in classes: c = classes[id] if c.get("name",""): name = c["name"] else: name = "class "+str(id) return GraphClass(name, id) else: raise ValueError("The given class id does not exist in the ISGCI database. Is the db too old ? You can update it with graph_classes.update_db().") @cached_method def classes(self): r""" Returns the graph classes, as a dictionary. Upon the first call, this loads the database from the local XML file. Subsequent calls are cached. EXAMPLES:: sage: t = graph_classes.classes() sage: type(t) <... 'dict'> sage: sorted(t["gc_151"].keys()) ['id', 'name', 'problem', 'type'] sage: t["gc_151"]['name'] 'cograph' sage: t["gc_151"]['problem']['Clique'] {'complexity': 'Linear'} """ self._get_ISGCI() return self.classes() @cached_method def inclusions(self): r""" Returns the graph class inclusions OUTPUT: a list of dictionaries Upon the first call, this loads the database from the local XML file. Subsequent calls are cached. EXAMPLES:: sage: t = graph_classes.inclusions() sage: type(t) <... 'list'> sage: t[0] {'sub': 'gc_1', 'super': 'gc_2'} """ self._get_ISGCI() return self.inclusions() @cached_method def smallgraphs(self): r""" Returns a dictionary associating a graph to a graph description string. Upon the first call, this loads the database from the local XML files. Subsequent calls are cached. EXAMPLES:: sage: t = graph_classes.smallgraphs() sage: t {'2C_4': Graph on 8 vertices, '2K_2': Graph on 4 vertices, '2K_3': Graph on 6 vertices, '2K_3 + e': Graph on 6 vertices, '2K_4': Graph on 8 vertices, '2P_3': Graph on 6 vertices, ... sage: t['fish'] Graph on 6 vertices """ self._get_ISGCI() return self.smallgraphs() @cached_method def inclusion_digraph(self): r""" Returns the class inclusion digraph Upon the first call, this loads the database from the local XML file. Subsequent calls are cached. EXAMPLES:: sage: g = graph_classes.inclusion_digraph(); g Digraph on ... vertices """ classes = self.classes() inclusions = self.inclusions() from sage.graphs.digraph import DiGraph inclusion_digraph = DiGraph() inclusion_digraph.add_vertices(classes.keys()) for edge in inclusions: if edge.get("confidence","") == "unpublished": continue inclusion_digraph.add_edge(edge['super'], edge['sub']) return inclusion_digraph def _download_db(self): r""" Downloads the current version of the ISGCI db EXAMPLES:: sage: graph_classes._download_db() # Not tested -- requires internet """ # import compatible with py2 and py3 from six.moves.urllib.request import urlopen from sage.misc.misc import SAGE_TMP import os.path u = urlopen('http://www.graphclasses.org/data.zip') localFile = open(os.path.join(SAGE_TMP,'isgci.zip'), 'w') localFile.write(u.read()) localFile.close() import os, zipfile z = zipfile.ZipFile(os.path.join(SAGE_TMP,'isgci.zip')) # Save a systemwide updated copy whenever possible try: z.extract(_XML_FILE, GRAPHS_DATA_DIR) z.extract(_SMALLGRAPHS_FILE, GRAPHS_DATA_DIR) except IOError: z.extract(_XML_FILE, SAGE_TMP) z.extract(_SMALLGRAPHS_FILE, GRAPHS_DATA_DIR) def _parse_db(self, directory): r""" Parses the ISGCI database and stores its content in ``self``. INPUT: - ``directory`` -- the name of the directory containing the latest version of the database. EXAMPLES:: sage: from sage.env import GRAPHS_DATA_DIR sage: graph_classes._parse_db(GRAPHS_DATA_DIR) """ import xml.etree.cElementTree as ET import os.path from sage.graphs.graph import Graph xml_file = os.path.join(GRAPHS_DATA_DIR,_XML_FILE) tree = ET.ElementTree(file=xml_file) root = tree.getroot() DB = _XML_to_dict(root) classes = {c['id']:c for c in DB['GraphClasses']["GraphClass"]} for c in itervalues(classes): c["problem"] = { pb.pop("name"):pb for pb in c["problem"]} inclusions = DB['Inclusions']['incl'] # Parses the list of ISGCI small graphs smallgraph_file = open(os.path.join(GRAPHS_DATA_DIR,_SMALLGRAPHS_FILE),'r') smallgraphs = {} for l in smallgraph_file.readlines(): key, string = l.split("\t") smallgraphs[key] = Graph(string) smallgraph_file.close() self.inclusions.set_cache(inclusions) self.classes.set_cache(classes) self.smallgraphs.set_cache(smallgraphs) def update_db(self): r""" Updates the ISGCI database by downloading the latest version from internet. This method downloads the ISGCI database from the website `GraphClasses.org <http://www.graphclasses.org/>`_. It then extracts the zip file and parses its XML content. Depending on the credentials of the user running Sage when this command is run, one attempt is made at saving the result in Sage's directory so that all users can benefit from it. If the credentials are not sufficient, the XML file are saved instead in the user's directory (in the SAGE_DB folder). EXAMPLES:: sage: graph_classes.update_db() # Not tested -- requires internet """ self._download_db() print("Database downloaded") self.classes.clear_cache() self.inclusions.clear_cache() self.inclusion_digraph.clear_cache() def _get_ISGCI(self): r""" Returns the contents of the ISGCI database. This method is mostly for internal use, but often provides useful information during debugging operations. OUTPUT: A pair ``(classes, inclusions)`` where ``classes`` is a dict of dict, and ``inclusions`` is a list of dicts. .. NOTE:: This method returns the data contained in the most recent ISGCI database present on the computer. See :meth:`update_db` to update the latter. EXAMPLES:: sage: graph_classes._get_ISGCI() # long time (4s on sage.math, 2012) """ import os.path from sage.misc.misc import SAGE_DB try: open(os.path.join(SAGE_DB,_XML_FILE)) # Which copy is the most recent on the disk ? if (os.path.getmtime(os.path.join(SAGE_DB,_XML_FILE)) > os.path.getmtime(os.path.join(GRAPHS_DATA_DIR,_XML_FILE))): directory = os.path.join(SAGE_DB,_XML_FILE) else: directory = os.path.join(GRAPHS_DATA_DIR,_XML_FILE) except IOError: directory = os.path.join(GRAPHS_DATA_DIR,_XML_FILE) self._parse_db(directory) def show_all(self): r""" Prints all graph classes stored in ISGCI EXAMPLES:: sage: graph_classes.show_all() id | name | type | smallgraph ---------------------------------------------------------------------------------------------------------------------- gc_309 | $K_4$--minor--free | base | gc_541 | $N^*$ | base | gc_215 | $N^*$--perfect | base | gc_5 | $P_4$--bipartite | base | gc_3 | $P_4$--brittle | base | gc_6 | $P_4$--comparability | base | gc_7 | $P_4$--extendible | base | ... """ classes = self.classes() # We want to print the different fields, and this dictionary stores the # maximal number of characters of each field. MAX = { "id" : 0, "type" : 0, "smallgraph": 0, "name": 0 } # We sort the classes alphabetically, though we would like to display the # meaningful classes at the top of the list def sort_key(x): name = x.get("name","zzzzz") return "{}{:4}".format(name, int(x["id"].split('_')[1])) classes_list = sorted(classes.values(), key=sort_key) # Maximum width of a field MAX_LEN = 40 # Computing te max of each field with the database for key in MAX: MAX[key] = len(max((str(x.get(key,"")) for x in classes_list), key = len)) # At most MAX characters per field for key, length in six.iteritems(MAX): MAX[key] = min(length, MAX_LEN) # Head of the table print(("{0:"+str(MAX["id"])+"} | {1:"+str(MAX["name"])+"} | {2:"+str(MAX["type"])+"} | {3:"+str(MAX["smallgraph"])+"}").format("id", "name", "type", "smallgraph")) print("-" * (sum(MAX.values())+9)) # Entries for entry in classes_list: ID = entry.get("id","") name = entry.get("name","") type = entry.get("type","") smallgraph = entry.get("smallgraph","") print(("{0:"+str(MAX["id"])+"} | {1:"+str(MAX["name"])+"} | {2:"+str(MAX["type"])+"} | ").format(ID, name[:MAX_LEN], type[:MAX_LEN])+str(smallgraph)[:MAX_LEN]) def _XML_to_dict(root): r""" Returns the XML data as a dictionary INPUT: - ``root`` -- an ``xml.etree.cElementTree.ElementTree`` object. OUTPUT: A dictionary representing the XML data. EXAMPLES:: sage: graph_classes.Perfect.description() # indirect doctest Class of graphs : Perfect ------------------------- id : gc_56 name : perfect type : base ... """ ans = root.attrib.copy() for child in root: if child.tag in ans: if not isinstance(ans[child.tag],list): ans[child.tag] = [ans[child.tag]] ans[child.tag].append(_XML_to_dict(child)) else: ans[child.tag] = _XML_to_dict(child) # If the dictionary is empty, perhaps the only content is a text, and we # return this instead. Useful sometimes in the ISGCI db, for graph names. if not ans: return root.text return ans graph_classes = GraphClasses() # Any object added to this list should also appear in the class' documentation, at the top of the file. graph_classes.Apex = GraphClass("Apex", "gc_1181", recognition_function = lambda x:x.is_apex()) graph_classes.AT_free = GraphClass("AT-free", "gc_61", recognition_function = lambda x:x.is_asteroidal_triple_free()) graph_classes.Biconnected = GraphClass("Biconnected", "gc_771", recognition_function = lambda x:x.is_biconnected()) graph_classes.BinaryTrees = GraphClass("BinaryTrees", "gc_847") graph_classes.Bipartite = GraphClass("Bipartite", "gc_69", recognition_function = lambda x:x.is_bipartite()) graph_classes.Block = GraphClass("Block", "gc_93", recognition_function = lambda x:x.is_block_graph()) graph_classes.Cactus = GraphClass("Cactus", "gc_108", recognition_function = lambda x:x.is_cactus()) graph_classes.Chordal = GraphClass("Chordal", "gc_32", recognition_function = lambda x:x.is_chordal()) graph_classes.ClawFree = GraphClass("Claw-free", "gc_62") graph_classes.CoGraph = GraphClass("CoGraph", "gc_151", recognition_function = lambda x:x.is_cograph()) graph_classes.Comparability = GraphClass("Comparability", "gc_72", recognition_function = lambda x: __import__('sage').graphs.comparability.is_comparability) graph_classes.Gallai = GraphClass("Gallai", "gc_73") graph_classes.Grid = GraphClass("Grid", "gc_464") graph_classes.Interval = GraphClass("Interval", "gc_234", recognition_function = lambda x:x.is_interval()) graph_classes.Line = GraphClass("Line", "gc_249", recognition_function = lambda x:x.is_line_graph()) graph_classes.Modular = GraphClass("Modular", "gc_50") graph_classes.Outerplanar = GraphClass("Outerplanar", "gc_110") graph_classes.Perfect = GraphClass("Perfect", "gc_56", recognition_function = lambda x:x.is_perfect()) graph_classes.Planar = GraphClass("Planar", "gc_43", recognition_function = lambda x:x.is_planar()) graph_classes.Polyhedral = GraphClass("Polyhedral", "gc_986", recognition_function = lambda x:x.is_polyhedral()) graph_classes.Split = GraphClass("Split", "gc_39", recognition_function = lambda x:x.is_split()) graph_classes.Tree = GraphClass("Tree", "gc_342", recognition_function = lambda x:x.is_tree()) graph_classes.UnitDisk = GraphClass("UnitDisk", "gc_389") graph_classes.UnitInterval = GraphClass("UnitInterval", "gc_299")
{ "alphanum_fraction": 0.5851443219, "author": null, "avg_line_length": 33.110701107, "converted": null, "ext": "py", "file": null, "hexsha": "916c8998891e5aa68734496beab032e3acc96ba9", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2020-07-23T10:29:58.000Z", "max_forks_repo_forks_event_min_datetime": "2020-07-23T10:29:58.000Z", "max_forks_repo_head_hexsha": "2d495be78e0bdc7a0a635454290b27bb4f5f70f0", "max_forks_repo_licenses": [ "BSL-1.0" ], "max_forks_repo_name": "bopopescu/sage", "max_forks_repo_path": "src/sage/graphs/isgci.py", "max_issues_count": 2, "max_issues_repo_head_hexsha": "2d495be78e0bdc7a0a635454290b27bb4f5f70f0", "max_issues_repo_issues_event_max_datetime": "2020-07-23T12:13:30.000Z", "max_issues_repo_issues_event_min_datetime": "2018-10-30T13:40:20.000Z", "max_issues_repo_licenses": [ "BSL-1.0" ], "max_issues_repo_name": "bopopescu/sage", "max_issues_repo_path": "src/sage/graphs/isgci.py", "max_line_length": 171, "max_stars_count": 3, "max_stars_repo_head_hexsha": "2d495be78e0bdc7a0a635454290b27bb4f5f70f0", "max_stars_repo_licenses": [ "BSL-1.0" ], "max_stars_repo_name": "bopopescu/sage", "max_stars_repo_path": "src/sage/graphs/isgci.py", "max_stars_repo_stars_event_max_datetime": "2019-09-10T15:26:37.000Z", "max_stars_repo_stars_event_min_datetime": "2018-09-11T11:16:26.000Z", "num_tokens": 8227, "path": null, "reason": "from sage", "repo": null, "save_path": null, "sha": null, "size": 35892 }
import argparse import numpy as np import glob import os import json import sys from tqdm import tqdm from allennlp.commands.elmo import ElmoEmbedder from allennlp.common.util import lazy_groups_of from allennlp.data import vocabulary parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( '--elmo_weights_path', type=str, default='models/$l_weights.hdf5', help="Path to elmo weight file. Can use $l as a placeholder for language argument") parser.add_argument( '--elmo_options_path', type=str, default='models/options262.json', help="Path to elmo options file. n_characters in the file should be 262") parser.add_argument( '-l', '--language', type=str, default='en', help="language to be used for paths") parser.add_argument( '--txt_files', type=str, default='wiki_files/$l/dev*.txt', help= "Path to files with sentences (one per line). Can use $l as a placeholder for language argument" ) parser.add_argument( '--vocab_file', type=str, default='vocabs/$l_50k.vocab', help= "Path to vocab file with tokens (one per line) to include in output. Should also include <UNK> token. Can use $l as a placeholder for language" ) parser.add_argument( '--out_dir', type=str, default='anchors_output/$l', help="Path to output directory. Can use $l as a placeholder for language argument") parser.add_argument( '--layers', type=int, nargs='+', default=[0, 1, 2], help="Layers of Elmo to store") parser.add_argument( '-bs', '--batch_size', type=int, default=64, help="Batch size") parser.add_argument( '-d', '--emb_dim', type=int, default=1024, help="Embeddings size") parser.add_argument( '-c', '--cuda_device', type=int, default=-1, help="Cuda device. Use -1 for cpu") args = parser.parse_args() def parse_config(args): ''' replace $l with args.language print args ''' print('-' * 30) for k in vars(args): val = getattr(args, k) if type(val) is str and "$l" in val: val = val.replace("$l", args.language) setattr(args, k, val) print("{}: {}".format(k, getattr(args, k))) print('-' * 30) return args def iter_line_words(f): ''' Iterating over a text file line by line (each line is one sentence). Yielding sentence as list of words ''' for line in f: yield line.strip().split() def run_elmo(txt_files, elmo_options_file, elmo_weights_file, vocab, layers, batch_size, cuda_device): ''' Running ELMo to compute anchors and norms per layer. txt_files - path to files with sentence per line (* in the path will be expended) elmo_options_file - json for model. n_characters should be 262 elmo_weights_file - saved model vocab - file with token per word. Only those tokens will be saved layer - what layers to compute for (0 is uncontextualized layer) batch_size - batch size cuda_device - cuda device Returns dicts of anchors and norm (per layer) and the list of occurrences per token (indices by vocab) ''' print('Loading ELMo Embedder...') elmo = ElmoEmbedder(elmo_options_file, elmo_weights_file, cuda_device) num_occurrences = [0] * vocab.get_vocab_size() anchors = {} norms = {} total_words = 0 for l in layers: norms[l] = 0.0 anchors[l] = np.zeros( shape=(vocab.get_vocab_size(), args.emb_dim)) oov_ind = vocab.get_token_index(vocab._oov_token) shards = list(glob.glob(txt_files)) for i, shard in enumerate(shards, start=1): print( ' --- Processing file %d out of %d: %s' % (i, len(shards), shard)) num_lines = sum(1 for line in open(shard)) f = open(shard, 'r', encoding='utf-8', newline='\n', errors='ignore') for batch in tqdm( lazy_groups_of(iter_line_words(f), batch_size), total=int(num_lines / batch_size)): embeds = elmo.embed_batch(batch) for sent, em in zip(batch, embeds): for j, w in enumerate(sent): w_id = vocab.get_token_index(w) if w_id == oov_ind: continue n = num_occurrences[w_id] for l in layers: anchors[l][ w_id, :] = anchors[l][w_id, :] * ( n / (n + 1)) + em[l, j, :] / (n + 1) norm = np.linalg.norm(em[l,j,:]) norms[l] = norms[l] * (total_words / (total_words + 1)) + norm / (total_words + 1) total_words += 1 num_occurrences[w_id] += 1 f.close() return anchors, norms, num_occurrences def save_embeds(file_path, embeds, vocab, num_occurrences, emb_dim): # Don't include words not in the text. n_tokens = len(np.nonzero(num_occurrences)[0]) with open(file_path, 'w') as f: f.write('%d %d\n' % (n_tokens, emb_dim)) for i in range(embeds.shape[0]): if num_occurrences[i] == 0: continue token = vocab.get_token_from_index(i) to_dump = token + ' ' + ' '.join([str(v) for v in embeds[i, :]]) + '\n' f.write(to_dump) if __name__ == '__main__': args = parse_config(args) if os.path.exists(args.out_dir): print("Output dir already exists: {}".format(args.out_dir)) sys.exit(1) vocab = vocabulary.Vocabulary() vocab.set_from_file(args.vocab_file, oov_token='<UNK>') print("Loaded vocabulary of size {}".format(vocab.get_vocab_size())) anchors, norms, num_occurrences = run_elmo( args.txt_files, args.elmo_options_path, args.elmo_weights_path, vocab, args.layers, args.batch_size, args.cuda_device) os.makedirs(args.out_dir, exist_ok=True) norm_dict = {} print('Saving outputs to {}'.format(args.out_dir)) for l in tqdm(args.layers): norm_key = 'avg_norm_layer_{}'.format(l) norm_dict[norm_key] = norms[l] file_path = os.path.join(args.out_dir, 'avg_embeds_{}.txt'.format(l)) save_embeds(file_path, anchors[l], vocab, num_occurrences, args.emb_dim) file_path = os.path.join(args.out_dir, 'norms.json'.format(l)) json.dump(norm_dict, open(file_path, 'w'))
{ "alphanum_fraction": 0.6134817563, "author": null, "avg_line_length": 33.8638743455, "converted": null, "ext": "py", "file": null, "hexsha": "0b9530dbad6a712c242271f76e5b0d3691c83a72", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 7, "max_forks_repo_forks_event_max_datetime": "2020-01-06T14:12:11.000Z", "max_forks_repo_forks_event_min_datetime": "2019-03-22T03:45:39.000Z", "max_forks_repo_head_hexsha": "1689370ce7d3f4f8c32da4e87b1f5ea51258b27f", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "TalSchuster/CrossLingualELMo", "max_forks_repo_path": "gen_anchors.py", "max_issues_count": 10, "max_issues_repo_head_hexsha": "1689370ce7d3f4f8c32da4e87b1f5ea51258b27f", "max_issues_repo_issues_event_max_datetime": "2020-02-12T22:15:35.000Z", "max_issues_repo_issues_event_min_datetime": "2019-03-28T13:35:22.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "TalSchuster/CrossLingualELMo", "max_issues_repo_path": "gen_anchors.py", "max_line_length": 147, "max_stars_count": 63, "max_stars_repo_head_hexsha": "1689370ce7d3f4f8c32da4e87b1f5ea51258b27f", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "TalSchuster/CrossLingualELMo", "max_stars_repo_path": "gen_anchors.py", "max_stars_repo_stars_event_max_datetime": "2020-02-18T20:40:47.000Z", "max_stars_repo_stars_event_min_datetime": "2019-02-25T05:31:29.000Z", "num_tokens": 1604, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 6468 }
# -*- coding: utf-8 -*- """ Created on Fri May 10 12:57:47 2019 @Title: FrontierLab exchange program - DBPS source code (for Bayesian Logistic Regression) @Author: Chou I-Ping @Reference: C. Sherlock and A. H. Thiery, “A discrete bouncy particle sampler,” 2017. """ import time import numpy as np import copy def ess(z): return np.sum(z)**2/np.sum(z**2) def expit(z): return np.exp(z) / (1 + np.exp(z)) class DBPS_BLR: def __init__(self, X, Y, delta, store_skip): # X: input # Y: output # delta: time step size # store_skip: thinned factor # init input self.X = X self.Y = Y self.dataX = X self.dataY = Y self.delta = delta self.store_skip = store_skip # storage self.all_beta = [] # status self.count = 0 self.stage1_accept = 0 self.stage2_accept = 0 # init parameters # self.beta = np.random.normal(0,1,2) self.beta = np.float64([0,0]) self.v = np.random.normal(0,1,2) # burnin time is computer time # not BPS clock self.burnin_time = 0 self.burnin_sample = 0 def storage(self): if self.count % self.store_skip == 0: self.all_beta.append(copy.deepcopy(self.beta)) def log_pi_cal(self, beta): pred_y = np.multiply(beta[1], self.X) + beta[0] target = self.constant * (np.sum(np.log(np.exp(pred_y)/(1+np.exp(pred_y)))*self.Y) + np.sum(np.log(1/(1+np.exp(pred_y)))*(1-self.Y))) - 1/2*np.sum(beta**2) return target def grad_cal(self, beta): pred_y = np.multiply(beta[1], self.X) + beta[0] b1_temp = np.sum(self.Y * (np.exp(pred_y)*self.X/(1+np.exp(pred_y))-self.X)) + np.sum((1-self.Y) * np.exp(pred_y)*self.X/(1+np.exp(pred_y))) + beta[1] b0_temp = np.sum(self.Y * (np.exp(pred_y)/(1+np.exp(pred_y))-1)) + np.sum((1-self.Y) * np.exp(pred_y)/(1+np.exp(pred_y))) + beta[0] grad_x = np.array([-b0_temp, -b1_temp]) return pred_y, grad_x def bounce_v(self, beta): pred_y, grad_x = self.grad_cal(beta) temp = -self.v + 2 * np.dot(self.v, grad_x) / np.sum(grad_x**2) * grad_x return temp def sto_data(self, sto): N = self.dataX.shape[0] ind = np.random.randint(0, N, sto) self.X = self.dataX[ind] self.Y = self.dataY[ind] def DBPS_sampler(self, burninIters, iterations, verbose, kappa = 0, subset = 0, sto = 0): if sto != 0: self.constant = len(self.dataX) / sto else: self.constant = 1 while(1): if sto != 0: self.sto_data(sto) self.count += 1 # stage1 c_beta = copy.deepcopy(self.beta) s1_beta, s1_v = copy.deepcopy(self.beta + self.delta * self.v), copy.deepcopy(-self.v) log_pi_1, log_pi_2 = self.log_pi_cal(c_beta), self.log_pi_cal(s1_beta) stage1_p = np.minimum(1, np.exp(log_pi_2-log_pi_1)) if np.random.uniform(0,1,1) < stage1_p: self.beta, self.v = s1_beta, s1_v self.stage1_accept += 1 else: # stage 2 (basic bps) if subset == 0: s2_v = self.bounce_v(s1_beta) s2_beta = self.beta + self.delta*(self.v - s2_v) log_pi_3 = self.log_pi_cal(s2_beta) stage2_p = np.minimum(1, (1-np.minimum(1, np.exp(log_pi_2-log_pi_3)))/(1-np.minimum(1,np.exp(log_pi_2-log_pi_1))) * np.exp(log_pi_3-log_pi_1)) if np.random.uniform(0,1,1) < stage2_p: self.beta, self.v = s2_beta, s2_v self.stage2_accept += 1 else: zeta = np.random.uniform(0,1,2) zeta = zeta / np.linalg.norm(zeta) pred_y, grad_x = self.grad_cal(c_beta) zeta = np.sign(np.dot(zeta, -grad_x))*zeta r = np.dot(zeta, grad_x) / np.dot(self.v, grad_x) a = (r**2-1)/(2*r-2*np.dot(zeta,self.v)) b = r-a s2_v = np.linalg.norm(self.v)/b*zeta - a/b*self.v s2_beta = self.beta + self.delta * (self.v-s2_v) log_pi_3 = self.log_pi_cal(s2_beta) # print([pi_1, pi_2, pi_3]) stage2_p = np.minimum(1, (1-np.minimum(1, np.exp(log_pi_2-log_pi_3)))/(1-np.minimum(1,np.exp(log_pi_2-log_pi_1))) * np.exp(log_pi_3-log_pi_1)) if np.random.normal(0,1,1) < stage2_p: self.beta, self.v = s2_beta, s2_v self.stage2_prob.append(stage2_p) # flip self.v = -self.v # third step: using kappa if kappa > 0: zeta = np.random.randn(2) zeta -= zeta.dot(self.v) * self.v / np.linalg.norm(self.v)**2 zeta /= np.linalg.norm(zeta) self.v = (self.v + np.sqrt(kappa)*np.sqrt(self.delta)*zeta) / np.sqrt(1+kappa*self.delta) del zeta # store weight self.storage() if self.count % verbose == 0: print('Current counts:' + str(self.count)) if self.count == burninIters: self.burnin_time = time.time() self.burnin_sample = copy.deepcopy(self.count) if self.count > iterations: print('Current counts:' + str(self.count)) print(str(iterations) + ' finished') break
{ "alphanum_fraction": 0.5043537647, "author": null, "avg_line_length": 39.0466666667, "converted": null, "ext": "py", "file": null, "hexsha": "890723217adad4347dab799ef692495e34bf90c7", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "10bb56525a54e66c5422e350fd40770a94b204f4", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "IPINGCHOU/FrontierLab-BPSs-code", "max_forks_repo_path": "BayesianLogisticRegression/DBPS_BLR_util.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "10bb56525a54e66c5422e350fd40770a94b204f4", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "IPINGCHOU/FrontierLab-BPSs-code", "max_issues_repo_path": "BayesianLogisticRegression/DBPS_BLR_util.py", "max_line_length": 163, "max_stars_count": 1, "max_stars_repo_head_hexsha": "10bb56525a54e66c5422e350fd40770a94b204f4", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "IPINGCHOU/FrontierLab-BPSs-code", "max_stars_repo_path": "BayesianLogisticRegression/DBPS_BLR_util.py", "max_stars_repo_stars_event_max_datetime": "2020-12-22T09:21:59.000Z", "max_stars_repo_stars_event_min_datetime": "2020-12-22T09:21:59.000Z", "num_tokens": 1587, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 5857 }
\chapter{Reduction Recognition} Figures~\ref{Tutorial:exampleReductionRecognition} shows a translator which finds the first loop of a main function and recognizes reduction operations and variables within the loop. A reduction recognition algorithm (\lstinline{ReductionRecognition()}) is implemented in the SageInterface namespace and follows the C/C++ reduction restrictions defined in the OpenMP 3.0 specification. %-------------translator--------------------- \begin{figure}[!h] {\indent {\mySmallestFontSize % Do this when processing latex to generate non-html (not using latex2html) \begin{latexonly} \lstinputlisting{\TutorialExampleDirectory/reductionRecognition.C} \end{latexonly} % Do this when processing latex to build html (using latex2html) \begin{htmlonly} \verbatiminput{\TutorialExampleDirectory/reductionRecognition.C} \end{htmlonly} % end of scope in font size } % End of scope in indentation } \caption{Example source code showing reduction recognition.} \label{Tutorial:exampleReductionRecognition} \end{figure} Using this translator we can compile the code shown in figure~\ref{Tutorial:exampleInputCode_reductionRecognition}. The output is shown in figure~\ref{Tutorial:exampleOutput_reductionRecognition}. %-------------input code--------------------- \begin{figure}[!h] {\indent {\mySmallFontSize % Do this when processing latex to generate non-html (not using latex2html) \begin{latexonly} \lstinputlisting{\TutorialExampleDirectory/inputCode_reductionRecognition.C} \end{latexonly} % Do this when processing latex to build html (using latex2html) \begin{htmlonly} \verbatiminput{\TutorialExampleDirectory/inputCode_reductionRecognition.C} \end{htmlonly} % end of scope in font size } % End of scope in indentation } \caption{Example source code used as input to loop reduction recognition processor.} \label{Tutorial:exampleInputCode_reductionRecognition} \end{figure} %-------------output--------------------- \begin{figure}[!h] {\indent {\mySmallFontSize % Do this when processing latex to generate non-html (not using latex2html) \begin{latexonly} \lstinputlisting{\TutorialExampleBuildDirectory/reductionRecognition.out} \end{latexonly} % Do this when processing latex to build html (using latex2html) \begin{htmlonly} \verbatiminput{\TutorialExampleBuildDirectory/reductionRecognition.out} \end{htmlonly} % end of scope in font size } % End of scope in indentation } \caption{Output of input to reduction recognition processor.} \label{Tutorial:exampleOutput_reductionRecognition} \end{figure}
{ "alphanum_fraction": 0.7711171662, "author": null, "avg_line_length": 29.5287356322, "converted": null, "ext": "tex", "file": null, "hexsha": "e72775f5b9d8249f7ef23aeba2c9f9d21b66bcc0", "include": null, "lang": "TeX", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 146, "max_forks_repo_forks_event_max_datetime": "2022-03-04T07:32:53.000Z", "max_forks_repo_forks_event_min_datetime": "2015-04-27T02:48:34.000Z", "max_forks_repo_head_hexsha": "7435d4fa1941826c784ba97296c0ec55fa7d7c7e", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "sujankh/rose-matlab", "max_forks_repo_path": "docs/Rose/Tutorial/reductionRecognition.tex", "max_issues_count": 174, "max_issues_repo_head_hexsha": "7435d4fa1941826c784ba97296c0ec55fa7d7c7e", "max_issues_repo_issues_event_max_datetime": "2022-03-31T16:51:05.000Z", "max_issues_repo_issues_event_min_datetime": "2015-01-28T18:41:32.000Z", "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "sujankh/rose-matlab", "max_issues_repo_path": "docs/Rose/Tutorial/reductionRecognition.tex", "max_line_length": 106, "max_stars_count": 488, "max_stars_repo_head_hexsha": "7597292cf14da292bdb9a4ef573001b6c5b9b6c0", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "maurizioabba/rose", "max_stars_repo_path": "docs/Rose/Tutorial/reductionRecognition.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-30T07:15:46.000Z", "max_stars_repo_stars_event_min_datetime": "2015-01-09T08:54:48.000Z", "num_tokens": 584, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 2569 }
# import Pkg; Pkg.add(["FiniteDifferences", "Plots"]) using Faust, FiniteDifferences, Plots process = compile(""" import("stdfaust.lib"); process = pm.ks(pm.f2l(ba.midikey2hz(60)), 0.1); """) function f(x) init!(process, block_size=size(x, 1)) process.inputs = x compute!(process) end x = randn(1024, 1) ∇f = jacobian(central_fdm(5, 1), f, x)[1] heatmap(∇f)
{ "alphanum_fraction": 0.6524064171, "author": null, "avg_line_length": 19.6842105263, "converted": null, "ext": "jl", "file": null, "hexsha": "ee22e7dd675365210ba38d4dcda1a2527bddc971", "include": null, "lang": "Julia", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2021-08-14T14:10:38.000Z", "max_forks_repo_forks_event_min_datetime": "2021-08-11T07:38:26.000Z", "max_forks_repo_head_hexsha": "bb12a229af9e80b94defc57ad3f491a5813241cb", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "sletz/Faust.jl", "max_forks_repo_path": "examples/finitediff.jl", "max_issues_count": 1, "max_issues_repo_head_hexsha": "bb12a229af9e80b94defc57ad3f491a5813241cb", "max_issues_repo_issues_event_max_datetime": "2021-06-26T22:09:19.000Z", "max_issues_repo_issues_event_min_datetime": "2021-06-26T22:09:18.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "sletz/Faust.jl", "max_issues_repo_path": "examples/finitediff.jl", "max_line_length": 53, "max_stars_count": 5, "max_stars_repo_head_hexsha": "f52c5ad841b327fd15f05f5920417707cb03917f", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "corajr/Faust.jl", "max_stars_repo_path": "examples/finitediff.jl", "max_stars_repo_stars_event_max_datetime": "2021-12-18T22:31:22.000Z", "max_stars_repo_stars_event_min_datetime": "2021-06-23T19:56:30.000Z", "num_tokens": 134, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 374 }
''' This is the unittest for pinn module ''' import unittest import os import sys module_path = os.path.abspath(os.path.join('..')) if module_path not in sys.path: sys.path.append(module_path) import numpy as np import tensorflow as tf import prep_data import Logger import nn dataset = '../data/single_action_2_pendulum_data_L100.npz' class UnitTests(unittest.TestCase): ''' ''' def test_pinn_layer_shape(self): tf_optimizer = tf.keras.optimizers.Adam(learning_rate=0.007, epsilon=1e-1) logger = Logger.Logger(frequency=10) g = 10.0 # default gravity value in openAI length = float(100) N_f = 1500 N_u = 1000 X_f, Exact_u, X_u_train, u_train, lb, ub = \ prep_data.prep_data(dataset, N_u, N_f, noise=0.1) try: layers = [2, 80, 80, 80, 80, 80, 80, 80, 80, 1] pinns= pinn.PhysicsInformedNN(layers, tf_optimizer, logger, X_u_train, ub, lb, g, length) pinns.fit(X_u_train, u_train, 100) except (ValueError): pass else: raise Exception("Input data must match input layer shape") def test_pinn_f_model(self): N_f = 1500 N_u = 1000 X_f, Exact_u, X_u_train, u_train, lb, ub = \ prep_data.prep_data(dataset, N_u, N_f, noise=0.1) self.t_f = tf.convert_to_tensor(X_u_train, dtype=self.dtype) pinns= pinn.PhysicsInformedNN(layers, tf_optimizer, logger, X_u_train, ub, lb, g, length) solution = pinns.f_model() suite = unittest.TestLoader().loadTestsFromTestCase(UnitTests) _ = unittest.TextTestRunner().run(suite)
{ "alphanum_fraction": 0.6071019473, "author": null, "avg_line_length": 28.1612903226, "converted": null, "ext": "py", "file": null, "hexsha": "c5b544968d44551eff65fdf1c36e9c2d0b4697ef", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "db9eb412e529b07d1460aefd3fb8526e7b27c862", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "weishiyan/Physics-Informed-Reinforcement-Learning", "max_forks_repo_path": "pendulum_SL/tests/test_pinn.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "db9eb412e529b07d1460aefd3fb8526e7b27c862", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "weishiyan/Physics-Informed-Reinforcement-Learning", "max_issues_repo_path": "pendulum_SL/tests/test_pinn.py", "max_line_length": 101, "max_stars_count": 3, "max_stars_repo_head_hexsha": "db9eb412e529b07d1460aefd3fb8526e7b27c862", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "weishiyan/Physics-Informed-Reinforcement-Learning", "max_stars_repo_path": "pendulum_SL/tests/test_pinn.py", "max_stars_repo_stars_event_max_datetime": "2021-11-23T15:39:04.000Z", "max_stars_repo_stars_event_min_datetime": "2021-08-30T06:12:50.000Z", "num_tokens": 467, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 1746 }
#include <appbase/application.hpp> #include "chain_state_types.hpp" #include "state_history.hpp" #include <abieos.h> #include <boost/beast/core/flat_buffer.hpp> using namespace appbase; using boost::beast::flat_buffer; namespace chronicle { // Channels published by receiver_plugin namespace channels { using namespace abieos; enum class fork_reason_val : uint8_t { network = 1, // fork occurred in the EOSIO network restart = 2, // explicit fork on receiver restart resync = 3, // full resync from the genesis }; inline string to_string(fork_reason_val reason) { switch (reason) { case fork_reason_val::network: return "network"; case fork_reason_val::restart: return "restart"; case fork_reason_val::resync: return "resync"; } return "unknown"; } struct fork_event { uint32_t fork_block_num; uint32_t depth; fork_reason_val fork_reason; uint32_t last_irreversible; }; template <typename F> constexpr void for_each_field(fork_event*, F f) { f("block_num", member_ptr<&fork_event::fork_block_num>{}); f("depth", member_ptr<&fork_event::depth>{}); f("fork_reason", member_ptr<&fork_event::fork_reason>{}); f("last_irreversible", member_ptr<&fork_event::last_irreversible>{}); } using forks = channel_decl<struct forks_tag, std::shared_ptr<fork_event>>; struct block { uint32_t block_num; abieos::checksum256 block_id; uint32_t last_irreversible; state_history::signed_block block; std::shared_ptr<flat_buffer> buffer; }; template <typename F> constexpr void for_each_field(block*, F f) { f("block_num", member_ptr<&block::block_num>{}); f("block_id", member_ptr<&block::block_id>{}); f("last_irreversible", member_ptr<&block::last_irreversible>{}); f("block", member_ptr<&block::block>{}); } using blocks = channel_decl<struct blocks_tag, std::shared_ptr<block>>; struct block_table_delta { uint32_t block_num; abieos::block_timestamp block_timestamp; state_history::table_delta_v0 table_delta; std::shared_ptr<flat_buffer> buffer; }; template <typename F> constexpr void for_each_field(block_table_delta*, F f) { f("block_num", member_ptr<&block_table_delta::block_num>{}); f("block_timestamp", member_ptr<&block_table_delta::block_timestamp>{}); f("table_delta", member_ptr<&block_table_delta::table_delta>{}); } using block_table_deltas = channel_decl<struct block_table_deltas_tag, std::shared_ptr<block_table_delta>>; struct transaction_trace { uint32_t block_num; abieos::block_timestamp block_timestamp; state_history::transaction_trace trace; std::shared_ptr<flat_buffer> buffer; }; template <typename F> constexpr void for_each_field(transaction_trace*, F f) { f("block_num", member_ptr<&transaction_trace::block_num>{}); f("block_timestamp", member_ptr<&transaction_trace::block_timestamp>{}); f("trace", member_ptr<&transaction_trace::trace>{}); } using transaction_traces = channel_decl<struct transaction_traces_tag, std::shared_ptr<transaction_trace>>; struct abi_update { uint32_t block_num; abieos::block_timestamp block_timestamp; abieos::name account; abieos::bytes abi_bytes; abieos::abi_def abi; }; template <typename F> constexpr void for_each_field(abi_update*, F f) { f("block_num", member_ptr<&abi_update::block_num>{}); f("block_timestamp", member_ptr<&abi_update::block_timestamp>{}); f("account", member_ptr<&abi_update::account>{}); f("abi_bytes", member_ptr<&abi_update::abi_bytes>{}); f("abi", member_ptr<&abi_update::abi>{}); } using abi_updates = channel_decl<struct abi_updates_tag, std::shared_ptr<abi_update>>; struct abi_removal { uint32_t block_num; abieos::block_timestamp block_timestamp; abieos::name account; }; template <typename F> constexpr void for_each_field(abi_removal*, F f) { f("block_num", member_ptr<&abi_removal::block_num>{}); f("block_timestamp", member_ptr<&abi_removal::block_timestamp>{}); f("account", member_ptr<&abi_removal::account>{}); } using abi_removals = channel_decl<struct abi_removals_tag, std::shared_ptr<abi_removal>>; struct abi_error { uint32_t block_num; abieos::block_timestamp block_timestamp; abieos::name account; string error; }; template <typename F> constexpr void for_each_field(abi_error*, F f) { f("block_num", member_ptr<&abi_error::block_num>{}); f("block_timestamp", member_ptr<&abi_error::block_timestamp>{}); f("account", member_ptr<&abi_error::account>{}); f("error", member_ptr<&abi_error::error>{}); } using abi_errors = channel_decl<struct abi_errors_tag, std::shared_ptr<abi_error>>; struct table_row_update { uint32_t block_num; abieos::block_timestamp block_timestamp; bool added; // false==removed chain_state::key_value_object kvo; std::shared_ptr<flat_buffer> buffer; }; template <typename F> constexpr void for_each_field(table_row_update*, F f) { f("block_num", member_ptr<&table_row_update::block_num>{}); f("block_timestamp", member_ptr<&table_row_update::block_timestamp>{}); f("added", member_ptr<&table_row_update::added>{}); f("kvo", member_ptr<&table_row_update::kvo>{}); } using table_row_updates = channel_decl<struct table_row_updates_tag, std::shared_ptr<table_row_update>>; struct permission_update { uint32_t block_num; abieos::block_timestamp block_timestamp; bool added; // false==removed chain_state::permission_object permission; std::shared_ptr<flat_buffer> buffer; }; template <typename F> constexpr void for_each_field(permission_update*, F f) { f("block_num", member_ptr<&permission_update::block_num>{}); f("block_timestamp", member_ptr<&permission_update::block_timestamp>{}); f("added", member_ptr<&permission_update::added>{}); f("permission", member_ptr<&permission_update::permission>{}); } using permission_updates = channel_decl<struct permission_updates_tag, std::shared_ptr<permission_update>>; struct permission_link_update { uint32_t block_num; abieos::block_timestamp block_timestamp; bool added; // false==removed chain_state::permission_link_object permission_link; std::shared_ptr<flat_buffer> buffer; }; template <typename F> constexpr void for_each_field(permission_link_update*, F f) { f("block_num", member_ptr<&permission_link_update::block_num>{}); f("block_timestamp", member_ptr<&permission_link_update::block_timestamp>{}); f("added", member_ptr<&permission_link_update::added>{}); f("permission_link", member_ptr<&permission_link_update::permission_link>{}); } using permission_link_updates = channel_decl<struct permission_link_updates_tag, std::shared_ptr<permission_link_update>>; struct account_metadata_update { uint32_t block_num; abieos::block_timestamp block_timestamp; chain_state::account_metadata_object account_metadata; std::shared_ptr<flat_buffer> buffer; }; template <typename F> constexpr void for_each_field(account_metadata_update*, F f) { f("block_num", member_ptr<&account_metadata_update::block_num>{}); f("block_timestamp", member_ptr<&account_metadata_update::block_timestamp>{}); f("account_metadata", member_ptr<&account_metadata_update::account_metadata>{}); } using account_metadata_updates = channel_decl<struct account_metadata_updates_tag, std::shared_ptr<account_metadata_update>>; struct receiver_pause { uint32_t head; uint32_t acknowledged; }; template <typename F> constexpr void for_each_field(receiver_pause*, F f) { f("head", member_ptr<&receiver_pause::head>{}); f("acknowledged", member_ptr<&receiver_pause::acknowledged>{}); } using receiver_pauses = channel_decl<struct receiver_pauses_tag, std::shared_ptr<receiver_pause>>; struct block_finished { uint32_t block_num; abieos::block_timestamp block_timestamp; uint32_t last_irreversible; }; template <typename F> constexpr void for_each_field(block_finished*, F f) { f("block_num", member_ptr<&block_finished::block_num>{}); f("block_timestamp", member_ptr<&block_finished::block_timestamp>{}); f("last_irreversible", member_ptr<&block_finished::last_irreversible>{}); } using block_completed = channel_decl<struct block_completed_tag, std::shared_ptr<block_finished>>; struct interactive_request { uint32_t block_num_start; uint32_t block_num_end; }; using interactive_requests = channel_decl<struct interactive_requests_tag, std::shared_ptr<interactive_request>>; } } class receiver_plugin : public appbase::plugin<receiver_plugin> { public: APPBASE_PLUGIN_REQUIRES(); receiver_plugin(); virtual ~receiver_plugin(); virtual void set_program_options(options_description& cli, options_description& cfg) override; void plugin_initialize(const variables_map& options); void plugin_startup(); void plugin_shutdown(); bool is_interactive(); void request_block(uint32_t block_num); bool is_noexport(); void exporter_will_ack_blocks(uint32_t max_unconfirmed); void ack_block(uint32_t block_num); void slowdown(bool pause); abieos_context* get_contract_abi_ctxt(abieos::name account); void add_dependency(appbase::abstract_plugin* plug, string plugname); void abort_receiver(); private: std::unique_ptr<class receiver_plugin_impl> my; std::vector<std::tuple<appbase::abstract_plugin*, std::string>> dependent_plugins; void start_after_dependencies(); }; // Global functions bool is_noexport_opt(const variables_map& options); extern receiver_plugin* receiver_plug; void exporter_initialized(); inline bool is_interactive_mode() { return receiver_plug->is_interactive(); } inline bool is_noexport_mode() { return receiver_plug->is_noexport(); } void exporter_will_ack_blocks(uint32_t max_unconfirmed); inline void ack_block(uint32_t block_num) { receiver_plug->ack_block(block_num); } inline void slowdown_receiver(bool pause) { receiver_plug->slowdown(pause); } void donot_start_receiver_before(appbase::abstract_plugin* plug, string plugname); void abort_receiver(); inline abieos_context* get_contract_abi_ctxt(abieos::name account) { return receiver_plug->get_contract_abi_ctxt(account); }
{ "alphanum_fraction": 0.6513994911, "author": null, "avg_line_length": 36.1656441718, "converted": null, "ext": "hpp", "file": null, "hexsha": "58357ffae2c109fdbc68e6fb1b987b35ccdbd06b", "include": null, "lang": "C++", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "c94faba17ac5f39f9e37aaafc3c523c0fc2ec432", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "yakud/eos-chronicle", "max_forks_repo_path": "chronicle-receiver/receiver_plugin.hpp", "max_issues_count": null, "max_issues_repo_head_hexsha": "c94faba17ac5f39f9e37aaafc3c523c0fc2ec432", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "yakud/eos-chronicle", "max_issues_repo_path": "chronicle-receiver/receiver_plugin.hpp", "max_line_length": 117, "max_stars_count": null, "max_stars_repo_head_hexsha": "c94faba17ac5f39f9e37aaafc3c523c0fc2ec432", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "yakud/eos-chronicle", "max_stars_repo_path": "chronicle-receiver/receiver_plugin.hpp", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2571, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 11790 }
# _ # | | # __ _ _ _ __ _ _ __ ___ ___ _ __ | |_ ___ _ __ # / _` | | | |/ _` | '_ ` _ \ / _ \ '_ \| __/ _ \ '__| # | (_| | |_| | (_| | | | | | | __/ | | | || __/ | # \__,_|\__,_|\__, |_| |_| |_|\___|_| |_|\__\___|_| # __/ | # |___/ """image_augmenter.py Holds image augmentation class KaleidoscopeAugmenter """ import constants import cv2 import os import shutil import sys import transformations from itertools import takewhile from numpy import ndarray from random import shuffle from string import ascii_lowercase # This environmental variable is set in the Docker Container the script runs in # It is set when the Pod is created with `kubectl create -f job.yaml` or during Interface().transform() DESTINATION_S3 = os.environ["DESTINATION_S3"] class KaleidoscopeAugmenter(object): def __init__(self, image, image_name, num_transformations, verbose=True): """ Augments `image` by chaining together transformations. Saves to S3 bucket. :param image: Raw original image, 2d 3-channel np.ndarray :param image_name: filename of the original image :param num_transformations: how many transformations to chain for this image """ self._vprint = sys.stdout.write if verbose else lambda *a, **k: None self._image_name = image_name # normalize the original image size before transforming self._image = self._resize_raw_image(image) # TODO: Pass in a list of transformation functions as an argument instead of using a hardcoded list # Only have 6 transformations currently, and only want to do one of each type if num_transformations <= constants.NUM_POSSIBLE_TRANSFORMS: self._num_transformations = num_transformations else: raise ValueError( "No more than {} transformations possible.".format( constants.NUM_POSSIBLE_TRANSFORMS ) ) self._transforms = self._generate_shuffled_transforms() # _aug_image_names are used as a guide to apply the transformations self._aug_image_names = self._generate_augmented_image_names() self._images = self._generate_images() self._save() @staticmethod def _resize_raw_image(image): """ Re-sizes the raw image into a randomly cropped square """ # TODO: Add conversion to ndarray when possible # NOTE: Is `image` ever NOT an ndarray? if type(image) is ndarray: return transformations.random_square_crop_with_resize(image) else: raise TypeError("`image` must be np.ndarray not {}".format(type(image))) def _generate_shuffled_transforms(self): """ Shuffles the transformation list so it is different for each original image :return: Dict of transformations """ possible_transforms = [ transformations.rotate_and_zoom, transformations.adjust_contrast, transformations.adjust_brightness, # transformations.adjust_saturation, transformations.flip_left_right, transformations.noisy, transformations.noisy, ] shuffle(possible_transforms) # keys are letters ("a", "b", etc) # values are transformation functions transforms = {ascii_lowercase[i]: possible_transforms[i] for i in range(0, self._num_transformations)} return transforms def _generate_augmented_image_names(self): """ Generates new file names for the augmented images images :return: list of filenames """ # from `path/filename.extension` get filename # this looks the same as os.basename(path) # TODO: test os.basename(_image_name) s = self._image_name[::-1] s = "".join(takewhile(lambda x: x != "/", s))[::-1] # add chars to end of filename, before extension words_to_append = self._generate_chars_to_append() name, extension = s.split(".") aug_image_names = [ self._image_name[: -len(s)] + name + word + "." + extension for word in words_to_append ] # sort the names, shortest first, then alphabetical aug_image_names.sort(key=lambda item: (len(item), item)) return aug_image_names # TODO: Explain how this works and provide example # TODO: Find a slicker way # Note: Maybe move this function up above _generate_augmented_image_names def _generate_chars_to_append(self): """ Generates a list of characters. :usage: _generate_chars_to_append(4) output : ['d', 'c', 'cd', 'b', 'bd', 'bc', bcd', 'a', 'ad', 'ac', acd', 'ab', 'abd', 'abc', 'abcd'] This list is used to keep track of which images need which transformations. :return: """ # NOTE: there is *surely* a slicker way to do this. # Trying to do https://stackoverflow.com/a/16241785/10918177 but with letters words = [] # Generate the (unordered) sets and represent them with a set of bits. for i in range(1, 1 << self._num_transformations): s = "{0:0{1:d}b}".format(i, self._num_transformations) word = "" # generate the corresponding letters # 0001 = "a", 0010 = "b", etc for j in range(0, self._num_transformations): if s[j] == "1": word += ascii_lowercase[j] words.append(word) return words def _generate_images(self): """ Generates the augmented images by applying transformations according to the names in _aug_image_names :return: dict of images """ # keys = image names # values = transformed image images = {} images[self._image_name] = self._image for augmented_name in self._aug_image_names: name, extension = augmented_name.split(".") # take the last character of `name` and use it as a key to get a transform transform = self._transforms[name[-1:]] key = name[:-1] + "." + extension new_image = images[key] # new_image = self._image.copy() if new_image is not None: images[augmented_name] = transform(new_image) else: print('new image was none.') images[augmented_name] = self._image return images def _save(self): """ Save the collection of images to S3 :return: """ # NOTE: This would be faster if it skipped the 'save locally' step for image_name in self._images: os.makedirs( os.path.dirname(constants.TMP_SAVE_DIR + image_name), exist_ok=True ) # save locally cv2.imwrite(constants.TMP_SAVE_DIR + image_name, self._images[image_name]) self._vprint(f'saving to {DESTINATION_S3}') # TODO: Test using boto3 instead of a call the command line. os.system( "aws s3 cp " + constants.TMP_SAVE_DIR + f" s3://{DESTINATION_S3} --recursive --quiet" ) # delete the local files shutil.rmtree(constants.TMP_SAVE_DIR)
{ "alphanum_fraction": 0.602147651, "author": null, "avg_line_length": 35.8173076923, "converted": null, "ext": "py", "file": null, "hexsha": "696c8dfb01a801a1c0d5b29bf4853680a42f5f1c", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2020-12-22T16:54:00.000Z", "max_forks_repo_forks_event_min_datetime": "2020-12-22T16:54:00.000Z", "max_forks_repo_head_hexsha": "a84ffbec9dda98f438b0e94f1350d6c810031c94", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "andrewasheridan/super-duper-chainsaw", "max_forks_repo_path": "kaleidoscope/image_augmenter.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "a84ffbec9dda98f438b0e94f1350d6c810031c94", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "andrewasheridan/super-duper-chainsaw", "max_issues_repo_path": "kaleidoscope/image_augmenter.py", "max_line_length": 110, "max_stars_count": 1, "max_stars_repo_head_hexsha": "a84ffbec9dda98f438b0e94f1350d6c810031c94", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "andrewasheridan/kaleidoscope", "max_stars_repo_path": "kaleidoscope/image_augmenter.py", "max_stars_repo_stars_event_max_datetime": "2020-12-22T16:53:38.000Z", "max_stars_repo_stars_event_min_datetime": "2020-12-22T16:53:38.000Z", "num_tokens": 1653, "path": null, "reason": "from numpy", "repo": null, "save_path": null, "sha": null, "size": 7450 }
#include <vsim/env/environment.hpp> #include <vsim/env/model.hpp> #include <vsim/env/geometry.hpp> #include <vsim/env/node.hpp> #include <vsim/env/pose.hpp> #include <vsim/env/drawable.hpp> #include <vsim/util/format.hpp> #include <vsim/util/filesystem.hpp> #include <vsim/util/strings.hpp> #include <pugixml/pugixml.hpp> #include <fstream> #include <Eigen/Geometry> #include <stack> using namespace Eigen ; using namespace std ; namespace vsim { using namespace pugi ; class EnvLoader { public: EnvLoader(Environment &env): env_(env) {} void load(const string &fname) ; private: void parse(const xml_node &root) ; void parseWorld(const xml_node &root) ; void parseEnvironment(const xml_node &root) ; void parseLibrary(const xml_node &root) ; NodePtr parseVisual(const xml_node &root) ; void parseShape(const xml_node &root) ; void parseLink(const xml_node &root) ; void parseRigidBody(const xml_node &root) ; void parseRigidConstraint(const xml_node &root) ; void parseModel(const xml_node &root) ; NodePtr parseNode(const xml_node &root) ; NodePtr parseNodeInstance(const xml_node &root) ; NodePtr parseImportNode(const xml_node &root) ; MaterialPtr parseMaterial(const xml_node &root) ; MaterialPtr parseMaterialInstance(const xml_node &root) ; DrawablePtr parseDrawable(const xml_node &root) ; GeometryPtr parseGeometry(const xml_node &root) ; GeometryPtr parseGeometryInstance(const xml_node &root) ; GeometryPtr parseBox(const xml_node &root) ; GeometryPtr parseMesh(const xml_node &root) ; void parsePose(const xml_node &root, Pose &p) ; private: Environment &env_ ; string root_dir_ ; map<string, NodePtr> node_instances_ ; map<string, MaterialPtr> material_instances_ ; map<string, GeometryPtr> geometry_instances_ ; }; void Environment::loadXML(const string &path) { EnvLoader loader(*this) ; loader.load(path); } struct find_by_id_walker: pugi::xml_tree_walker { find_by_id_walker(const string &tag, const string &id): tag_(tag), id_(id) { } virtual bool for_each(pugi::xml_node& node) { if ( pugi::node_element != node.type() ) return true ; string ename = node.name() ; string id = node.attribute("id").as_string() ; if ( ename == tag_ && id == id_ ) { node_ = node ; return false ; } return true; // continue traversal } string tag_, id_ ; xml_node node_ ; }; void EnvLoader::load(const string &file_name) { xml_document doc ; string dir, base, ext ; util::split_path(file_name, dir, base, ext) ; xml_parse_result result = doc.load_file(file_name.c_str()) ; if ( !result ) throw EnvironmentLoadException(result.description()) ; xml_node root = doc.child("vsim") ; root_dir_ = root.attribute("root_dir").as_string(dir.c_str()) ; parse(root) ; } void EnvLoader::parse(const xml_node &root) { if ( xml_node library = root.child("library") ) parseLibrary(library) ; if ( xml_node world = root.child("world") ) parseWorld(world) ; else throw EnvironmentLoadException("No <world> defined"); if ( xml_node env = root.child("environment") ) parseEnvironment(env) ; } void EnvLoader::parseWorld(const xml_node &parent) { for( xml_node &child: parent.children() ) { string tag_name = child.name() ; if ( tag_name == "rigid_body" ) { parseRigidBody(child) ; } else if ( tag_name == "model" ) { parseModel(child) ; } else throw EnvironmentLoadException(util::format("Invalid element <%> inside <world>", tag_name)); } } void EnvLoader::parseEnvironment(const xml_node &root) { } void EnvLoader::parseLibrary(const xml_node &root) { } NodePtr EnvLoader::parseVisual(const xml_node &parent) { return parseNode(parent) ; } NodePtr EnvLoader::parseNode(const xml_node &parent) { NodePtr p(new Node) ; string id = parent.attribute("id").as_string() ; if ( !id.empty() ) node_instances_.insert({id, p}) ; bool is_first_pose = true ; for( xml_node &child: parent.children() ) { string tag_name = child.name() ; if ( tag_name == "node" ) { auto child_node = parseNode(child) ; p->children_.push_back(child_node) ; } else if ( tag_name == "include" ) { parseImportNode(child) ; } else if ( tag_name == "material" ) { parseMaterial(child) ; } else if ( tag_name == "drawable" ) { parseDrawable(child) ; } else if ( tag_name == "pose" ) { if ( is_first_pose ) { parsePose(child, p->pose_) ; is_first_pose = false ; } else throw EnvironmentLoadException("Only one <pose> expected inside <node>"); } else if ( tag_name == "node_instance" ) { return parseNodeInstance(child) ; } else throw EnvironmentLoadException(util::format("Invalid element <%> inside <node>", tag_name)); } return p; } NodePtr EnvLoader::parseNodeInstance(const xml_node &parent) { string ref = parent.attribute("uri").as_string() ; if ( ref.empty() ) throw EnvironmentLoadException("Attribute \"uri\" is required for <node_instance>"); string id = ref.substr(1) ; auto it = node_instances_.find(id) ; if ( it == node_instances_.end() ) throw EnvironmentLoadException(util::format("Unresolved reference \"%\" of <node_instance>", ref)); return it->second ; } MaterialPtr EnvLoader::parseMaterialInstance(const xml_node &parent) { string ref = parent.attribute("uri").as_string() ; if ( ref.empty() ) throw EnvironmentLoadException("Attribute \"uri\" is required for <material_instance>"); string id = ref.substr(1) ; auto it = material_instances_.find(id) ; if ( it == material_instances_.end() ) throw EnvironmentLoadException(util::format("Unresolved reference \"%\" of <material_instance>", ref)); return it->second ; } NodePtr EnvLoader::parseImportNode(const xml_node &root) { return nullptr; } MaterialPtr EnvLoader::parseMaterial(const xml_node &root) { return nullptr; } DrawablePtr EnvLoader::parseDrawable(const xml_node &parent) { DrawablePtr drawable(new Drawable) ; for( xml_node &child: parent.children() ) { string tag_name = child.name() ; if ( tag_name == "geometry" ) { if ( !drawable->geometry_ ) drawable->geometry_ = parseGeometry(child) ; } else if ( tag_name == "geometry_instance" ) { if ( !drawable->geometry_ ) drawable->geometry_ = parseGeometryInstance(child) ; } else if ( tag_name == "material" ) { if ( !drawable->material_ ) drawable->material_ = parseMaterial(child) ; } else if ( tag_name == "material_instance" ) { if ( !drawable->material_ ) drawable->material_ = parseMaterialInstance(child) ; } else throw EnvironmentLoadException(util::format("Invalid element <%> inside <node>", tag_name)); } if ( !drawable->geometry_ ) throw EnvironmentLoadException("No geometry defined inside <drawable>"); return drawable ; } GeometryPtr EnvLoader::parseGeometry(const xml_node &parent) { GeometryPtr geom ; for( xml_node &child: parent.children() ) { string tag_name = child.name() ; if ( tag_name == "box" ) { geom = parseBox(child) ; } else if ( tag_name == "mesh" ) { geom = parseMesh(child) ; } else throw EnvironmentLoadException(util::format("Invalid element <%> inside <geometry>", tag_name)); } if ( !geom ) throw EnvironmentLoadException("Empty <geometry> element"); return geom ; } GeometryPtr EnvLoader::parseGeometryInstance(const xml_node &parent) { string ref = parent.attribute("uri").as_string() ; if ( ref.empty() ) throw EnvironmentLoadException("Attribute \"uri\" is required for <geometry_instance>"); string id = ref.substr(1) ; auto it = geometry_instances_.find(id) ; if ( it == geometry_instances_.end() ) throw EnvironmentLoadException(util::format("Unresolved reference \"%\" of <geometry_instance>", ref)); return it->second ; } GeometryPtr EnvLoader::parseBox(const xml_node &parent) { GeometryPtr geom ; if ( xml_node extents = parent.child("extents") ) { BoxGeometry *box = new BoxGeometry ; istringstream strm(extents.child_value()) ; Vector3f &e = box->half_extents_ ; strm >> e.x() >> e.y() >> e.z() ; geom.reset(box) ; } else throw EnvironmentLoadException("<extents> is missing from <box>"); return geom ; } GeometryPtr EnvLoader::parseMesh(const xml_node &parent) { GeometryPtr geom ; Mesh *m = new Mesh ; string src = parent.attribute("url").as_string() ; try { m->load(root_dir_ + '/' + src) ; } catch ( ModelLoaderException &e ) { throw EnvironmentLoadException(e.what()) ; } return GeometryPtr(m) ; } void EnvLoader::parsePose(const xml_node &parent, Pose &p) { for( xml_node &child: parent.children() ) { string tag_name = child.name() ; string cont = child.child_value() ; istringstream sstrm(cont) ; if ( tag_name == "translate" ) { float tx, ty, tz ; sstrm >> tx >> ty >> tz ; p.mat_.translate(Vector3f(tx, ty, tz)) ; } else if ( tag_name == "rotate" ) { float ax, ay, az, angle ; sstrm >> ax >> ay >> az >> angle ; p.mat_.rotate(AngleAxisf(angle * M_PI/180.0, Vector3f(ax, ay, az))) ; } else if ( tag_name == "scale" ) { float sx, sy, sz ; sstrm >> sx >> sy >> sz ; p.mat_.scale(Vector3f(sx, sy, sz)) ; } else if ( tag_name == "matrix" ) { Matrix4f m ; sstrm >> m(0, 0) >> m(0, 1) >> m(0, 2) >> m(0, 3) ; sstrm >> m(1, 0) >> m(1, 1) >> m(1, 2) >> m(1, 3) ; sstrm >> m(2, 0) >> m(2, 1) >> m(2, 2) >> m(2, 3) ; sstrm >> m(3, 0) >> m(3, 1) >> m(3, 2) >> m(3, 3) ; p.mat_ *= m ; } else throw EnvironmentLoadException(util::format("Invalid element <%> inside <pose>", tag_name)); } } void EnvLoader::parseShape(const xml_node &root) { } void EnvLoader::parseLink(const xml_node &root) { } void EnvLoader::parseRigidBody(const xml_node &parent) { for( xml_node &child: parent.children() ) { string tag_name = child.name() ; if ( tag_name == "visual" ) { parseVisual(child) ; } else if ( tag_name == "shape" ) { parseShape(child) ; } else if ( tag_name == "link" ) { parseLink(child) ; } else throw EnvironmentLoadException(util::format("Invalid element <%> inside <rigid_body>", tag_name)); } } void EnvLoader::parseRigidConstraint(const xml_node &root) { } void EnvLoader::parseModel(const xml_node &parent) { for( xml_node &child: parent.children() ) { string tag_name = child.name() ; if ( tag_name == "rigid_body" ) { parseRigidBody(child) ; } else if ( tag_name == "rigid_constraint" ) { parseRigidConstraint(child) ; } else throw EnvironmentLoadException(util::format("Invalid element <%> inside <model>", tag_name)); } } #if 0 class EnvParser: public util::XMLSAXParser { public: enum ElementType { ModelElement, ModelElementReference, PoseElement, MeshElement, MeshElementReference, NodeElement, NodeElementReference, RootElement, SceneElement, BodyElement, BodyElementReference, GeometryElement, MaterialElement, MaterialElementReference, TranslationElement, RotationElement, SkewElement, ScaleElement, MatrixElement } ; EnvParser(const string &fname, istream &strm, Environment &env): util::XMLSAXParser(strm), fname_(fname), env_(env) { string base, ext ; util::split_path(fname, dir_, base, ext) ; } void startElement(const std::string &qname, const util::Dictionary &attr_list) override ; void endElement(const std::string &qname) override ; void characters(const std::string &text_data) override { string trimmed = util::trimCopy(text_data) ; if ( !trimmed.empty() ) text_ = trimmed ; } void error(ErrorCode code, uint line, uint column ) { throw EnvironmentLoadException(util::format("Error parsing file: %\nXML parsing error near line: %, col: %, code: %", fname_, line, column, code)); } void createModelElement(const Dictionary &attrs) ; void createNodeElement(const Dictionary &attrs) ; void createMaterialElement(const Dictionary &attrs) ; void createPrimitiveElement(const string &type, const Dictionary &attrs) ; void createPoseElement(const Dictionary &attrs) ; void parsePoseTranslation() ; void parsePoseScale() ; void parsePoseRotation(const string &type) ; void parsePoseMatrix() ; Environment &env_ ; string fname_, dir_ ; stack<ElementType> elements_ ; stack<ModelPtr> model_stack_ ; stack<NodePtr> node_stack_ ; ScenePtr scene_ ; MeshPtr mesh_ ; Pose pose_ ; BodyPtr body_ ; GeometryPtr geom_ ; MaterialPtr material_ ; map<string, ModelPtr> model_map_ ; map<string, MeshPtr> mesh_map_ ; map<string, MaterialPtr> material_map_ ; map<string, FramePtr> frame_map_ ; string text_ ; }; void Environment::loadXML(const string &path) { ifstream strm(path) ; EnvParser parser(path, strm, *this) ; parser.parse() ; } void EnvParser::createModelElement(const Dictionary &attrs) { ElementType et = elements_.top() ; if ( et != SceneElement && et != BodyElement && et != ModelElement ) throw EnvironmentLoadException("<model> should be declared only inside <scene> or <body> or <model> element") ; string uri = attrs.get("uri") ; ModelPtr model ; if ( !uri.empty() ) { // this is a reference to a model auto it = model_map_.find(uri.substr(1)) ; if ( it == model_map_.end() ) throw EnvironmentLoadException(util::format("Unresolved reference % of <model>", uri)) ; else model = it->second ; elements_.push(ModelElementReference) ; } else { // this is an inline model or a model loaded from file string id = attrs.get("id") ; string src = attrs.get("src") ; if ( !src.empty() ) { try { model = Model::load(dir_ + '/' + src) ; } catch ( ModelLoaderException &e ) { throw EnvironmentLoadException(e.what()) ; } } else model.reset(new Model) ; if ( !id.empty() ) model_map_.insert({id, model}) ; elements_.push(ModelElement) ; if ( et == BodyElement ) scene_->models_.push_back(model) ; else if ( et == SceneElement ) scene_->models_.push_back(model) ; else if ( et == ModelElement ) { ModelPtr parent = model_stack_.top() ; parent->children_.push_back(model) ; } } if ( et == BodyElement ) { body_->model_ = model ; } model_stack_.push(model) ; } void EnvParser::createNodeElement(const Dictionary &attrs) { ElementType et = elements_.top() ; if ( et != ModelElement && et != NodeElement ) throw EnvironmentLoadException("<node> should be declared only inside <model> or <node> element") ; string id = attrs.get("id") ; NodePtr node(new Node) ; if ( !node_stack_.empty() ) { NodePtr parent = node_stack_.top() ; parent->children_.push_back(node) ; } node_stack_.push(node) ; if ( elements_.top() == ModelElement ) { model_stack_.top()->nodes_.push_back(node) ; } elements_.push(NodeElement) ; } void EnvParser::createPrimitiveElement(const std::string &pname, const Dictionary &attrs) { ElementType et = elements_.top() ; if ( et != SceneElement && et != GeometryElement && et != ModelElement ) throw EnvironmentLoadException(util::format("<%> should be declared only inside <scene> or <geometry> or <model> element", pname)) ; string uri = attrs.get("uri") ; MeshPtr mesh ; if ( !uri.empty() ) { string rid = uri.substr(1) ; auto it = mesh_map_.find(rid) ; if ( it == mesh_map_.end() ) throw EnvironmentLoadException(util::format("Unresolved reference \"%\" of <%>", pname)) ; else mesh = it->second ; elements_.push(MeshElementReference) ; } else { string id = attrs.get("id") ; if ( pname == "cube" ) { float sz = attrs.value<float>("size", 1.0) ; mesh = Mesh::createSolidCube(sz) ; } else if ( pname == "cylinder" ) { float radius = attrs.value<float>("radius", 1.0) ; float height = attrs.value<float>("height", 1.0) ; int slices = attrs.value<int>("slices", 12) ; int stacks = attrs.value<int>("stacks", 4) ; mesh = Mesh::createSolidCylinder(radius, height, slices, stacks) ; } else if ( pname == "cone" ) { float radius = attrs.value<float>("radius", 1.0) ; float height = attrs.value<float>("height", 1.0) ; int slices = attrs.value<int>("slices", 12) ; int stacks = attrs.value<int>("stacks", 4) ; mesh = Mesh::createSolidCone(radius, height, slices, stacks) ; } else if ( pname == "sphere" ) { float radius = attrs.value<float>("radius", 1.0) ; int slices = attrs.value<int>("slices", 12) ; int stacks = attrs.value<int>("stacks", 12) ; mesh = Mesh::createSolidSphere(radius, slices, stacks) ; } if ( !id.empty() ) mesh_map_.insert({id, mesh}) ; elements_.push(MeshElement) ; if ( et == GeometryElement ) geom_->parent_->model_->meshes_.push_back(mesh_) ; else if ( et == SceneElement ) scene_->meshes_.push_back(mesh) ; else if ( et == ModelElement ) model_stack_.top()->meshes_.push_back(mesh) ; } if ( et == GeometryElement ) { if ( geom_->mesh_ ) throw EnvironmentLoadException(util::format("Only one instance of <mesh> is allowed within <geometry>")) ; geom_->mesh_ = mesh ; } mesh_ = mesh ; } void EnvParser::parsePoseTranslation() { istringstream sstrm(text_) ; float tx, ty, tz ; sstrm >> tx >> ty >> tz ; pose_.mat_.translate(Vector3f(tx, ty, tz)) ; } void EnvParser::parsePoseRotation(const string &rtype) { if ( rtype == "axis" ) { istringstream sstrm(text_) ; float ax, ay, az, angle ; sstrm >> ax >> ay >> az >> angle ; pose_.mat_.rotate(AngleAxisf(angle * M_PI/180.0, Vector3f(ax, ay, az))) ; } } void EnvParser::parsePoseScale() { istringstream sstrm(text_) ; float ax, ay, az ; sstrm >> ax >> ay >> az ; pose_.mat_.scale(Vector3f(ax, ay, az)) ; } void EnvParser::parsePoseMatrix() { istringstream sstrm(text_) ; Matrix4f m ; sstrm >> m(0, 0) >> m(0, 1) >> m(0, 2) >> m(0, 3) ; sstrm >> m(1, 0) >> m(1, 1) >> m(1, 2) >> m(1, 3) ; sstrm >> m(2, 0) >> m(2, 1) >> m(2, 2) >> m(2, 3) ; sstrm >> m(3, 0) >> m(3, 1) >> m(3, 2) >> m(3, 3) ; pose_.mat_ *= m ; } void EnvParser::createMaterialElement(const Dictionary &attrs) { ElementType et = elements_.top() ; if ( et != SceneElement && et != GeometryElement && et != ModelElement ) throw EnvironmentLoadException("<material> should be declared only inside <scene> or <geometry> or <model> element") ; string uri = attrs.get("uri") ; MaterialPtr material ; if ( !uri.empty() ) { string rid = uri.substr(1) ; auto it = material_map_.find(rid) ; if ( it == material_map_.end() ) throw EnvironmentLoadException(util::format("Unresolved reference \"%\" of <material>", uri)) ; else material = it->second ; elements_.push(MaterialElementReference) ; } else { string id = attrs.get("id") ; material.reset(new Material) ; if ( !id.empty() ) material_map_.insert({id, material}) ; elements_.push(MaterialElement) ; } if ( et == GeometryElement ) { if ( geom_->material_ ) throw EnvironmentLoadException(util::format("Only one instance of <material> is allowed within <geometry>")) ; geom_->material_ = material ; } else if ( et == SceneElement ) scene_->materials_.push_back(material) ; else if ( et == ModelElement ) model_stack_.top()->materials_.push_back(material) ; material_ = material ; } void EnvParser::createPoseElement(const Dictionary &attrs) { ElementType et = elements_.top() ; if ( et != BodyElement && et != NodeElement && et != ModelElement ) throw EnvironmentLoadException("<pose> should be declared only inside <body> or <node> or <model> element") ; string parent_id = attrs.get("frame") ; if ( !parent_id.empty() ) { auto it = frame_map_.find(parent_id.substr(1)) ; if ( it == frame_map_.end() ) throw EnvironmentLoadException(util::format("Unresolved reference \"%\" of \"frame\" in <pose>", parent_id)) ; else { pose_.frame_ = it->second ; } } else pose_.frame_.reset() ; elements_.push(PoseElement) ; } void EnvParser::startElement(const string &qname, const util::Dictionary &attrs) { if ( qname == "vsim" ) { elements_.push(RootElement) ; } else if ( qname == "scene" ) { if ( elements_.top() != RootElement || scene_ ) { throw EnvironmentLoadException("<scene> declared outside of <root> element or already defined") ; } elements_.push(SceneElement) ; scene_.reset(new Scene) ; env_.scene_ = scene_ ; } else if ( qname == "body" ) { if ( elements_.top() != SceneElement ) { throw EnvironmentLoadException("<body> declared outside of <scene> element") ; } elements_.push(BodyElement) ; body_ = std::make_shared<Body>() ; scene_->bodies_.push_back(body_) ; } else if ( qname == "geometry" ) { if ( elements_.top() != NodeElement ) { throw EnvironmentLoadException("<geometry> declared outside of <scene> element") ; } elements_.push(GeometryElement) ; geom_.reset(new Geometry) ; node_stack_.top()->geometries_.push_back(geom_) ; } else if ( qname == "model" ) { createModelElement(attrs); } else if ( qname == "node" ) { createNodeElement(attrs) ; } else if ( qname == "cube" || qname == "sphere" || qname == "cylinder" || qname == "cone" ) { createPrimitiveElement(qname, attrs) ; } else if ( qname == "material" ) { createMaterialElement(attrs) ; } else if ( qname == "pose" ) { createPoseElement(attrs) ; } else if ( qname == "translate" ) { if ( elements_.top() != PoseElement ) throw EnvironmentLoadException("<translate> declared outside of <pose> element") ; elements_.push(TranslationElement) ; } else if ( qname == "rotate" ) { if ( elements_.top() != PoseElement ) throw EnvironmentLoadException("<rotate> declared outside of <pose> element") ; elements_.push(RotationElement) ; } else if ( qname == "scale" ) { if ( elements_.top() != PoseElement ) throw EnvironmentLoadException("<scale> declared outside of <pose> element") ; elements_.push(ScaleElement) ; } else if ( qname == "matrix" ) { if ( elements_.top() != PoseElement ) throw EnvironmentLoadException("<scale> declared outside of <pose> element") ; elements_.push(MatrixElement) ; } } void EnvParser::endElement(const string &qname) { if ( qname == "vsim" ) { elements_.pop() ; } else if ( qname == "scene" ) { elements_.pop() ; } else if ( qname == "body" ) { elements_.pop() ; body_.reset() ; } else if ( qname == "model" ) { elements_.pop() ; model_stack_.pop() ; } else if ( qname == "node" ) { elements_.pop() ; node_stack_.pop() ; } else if ( qname == "cube" || qname == "sphere" || qname == "cylinder" || qname == "cone") { elements_.pop() ; mesh_.reset() ; } else if ( qname == "geometry" ) { elements_.pop() ; geom_.reset() ; } else if ( qname == "material" ) { elements_.pop() ; material_.reset() ; } else if ( qname == "translate" ) { elements_.pop() ; parsePoseTranslation(); } else if ( qname == "rotate" ) { elements_.pop() ; parsePoseRotation("axis"); } else if ( qname == "scale" ) { elements_.pop() ; parsePoseScale(); } else if ( qname == "matrix" ) { elements_.pop() ; parsePoseMatrix(); } else if ( qname == "pose" ) { elements_.pop() ; ElementType et = elements_.top() ; if ( et == BodyElement ) body_->pose_ = pose_ ; else if ( et == NodeElement ) node_stack_.top()->pose_ = pose_ ; else if ( et == ModelElement ) model_stack_.top()->pose_ = pose_ ; pose_.frame_.reset() ; pose_.mat_.setIdentity() ; } } #endif }
{ "alphanum_fraction": 0.5950406849, "author": null, "avg_line_length": 31.2421686747, "converted": null, "ext": "cpp", "file": null, "hexsha": "3bb87182b14ebce426591ca5be2884c3c3c5982b", "include": null, "lang": "C++", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "2a69e27364bab29194328af3d050e34f907e226b", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "malasiot/vsim", "max_forks_repo_path": "src/env/xml_loader.cpp", "max_issues_count": null, "max_issues_repo_head_hexsha": "2a69e27364bab29194328af3d050e34f907e226b", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "malasiot/vsim", "max_issues_repo_path": "src/env/xml_loader.cpp", "max_line_length": 155, "max_stars_count": null, "max_stars_repo_head_hexsha": "2a69e27364bab29194328af3d050e34f907e226b", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "malasiot/vsim", "max_stars_repo_path": "src/env/xml_loader.cpp", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 6481, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 25931 }
import os import cv2 import queue import random import threading import face_recognition import numpy as np from sklearn import svm import joblib q = queue.Queue() # 加载人脸图片并进行编码 def Encode(): print("Start Encoding") image_path = 'C:\\Users\\Administrator\\Desktop\\face_recognition-master\\examples\\knn_examples\\test\\' person_list = os.listdir(image_path) #print(person_list) for person in person_list: image_list = os.listdir(image_path) for image in image_list: #print(person +' '+ image) face = face_recognition.load_image_file(image_path + image) face_locations = face_recognition.face_locations(face) face_enc = face_recognition.face_encodings(face, face_locations)[0] np.save(image.split(".")[0], face_enc) #print(image.split(".")[0]) # 训练SVC def Train_SVC(): print("Start Training") encodings = [] names = [] name_dict = {} # 加载人脸数据库并学习 data_path = "C:\\Users\\Administrator\\Desktop\\face_recognition-master\\examples\\knn_examples\\test\\" person_list = os.listdir(data_path) print(person_list) for i, person in enumerate(person_list): data_list = os.listdir(data_path) for data in data_list: print(i, data) encodings.append(np.load(data_path + person).tolist()) names.append(int(i)) name_dict[i] = person clf = svm.SVC(C=20, probability=True) clf.fit(encodings, names) joblib.dump(clf, "my_model.m") f = open('name.txt', 'w') f.write(str(name_dict)) f.close() # 线程1获取网络摄像头图像 def Receive(): print("Start Reveive") cap = cv2.VideoCapture(0, cv2.CAP_DSHOW) # cap = cv2.VideoCapture("rtsp://admin:a123456789@121.248.50.30/h264/ch1/main/av_stream") ret, frame = cap.read() q.put(frame) while ret: ret, frame = cap.read() q.put(frame) # 线程2进行人脸检测识别并显示 def Display(): print("Start DisPlaying") clf = joblib.load("my_model.m") f = open('name.txt', 'r') name_dict = eval(f.read()) f.close() face_locations = [] face_names = [] count = 0 threshold = 1/(0.75 * len(name_dict)) while True: if not q.empty(): count += 1 frame = q.get() small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25) rgb_small_frame = small_frame[:, :, ::-1] # 每0.2秒进行一次人脸检测 if count % 5 == 0: face_locations = face_recognition.face_locations(rgb_small_frame) # 每0.4秒进行一次人脸识别 if count % 10 == 0: face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations) face_names = [] for face_encoding in face_encodings: #print(clf.predict[face_encoding]) print(clf.predict_proba([face_encoding])) # if np.max(clf.predict_proba([face_encoding])) > threshold: face_names.append(name_dict[int(clf.predict([face_encoding]))]) else: face_names.append("Unknown") # 显示人脸定位框及姓名 for (top, right, bottom, left), name in zip(face_locations, face_names): top *= 4 right *= 4 bottom *= 4 left *= 4 cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2) cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED) cv2.putText(frame, name, (left + 6, bottom - 6), cv2.FONT_HERSHEY_DUPLEX, 1.0, (255, 255, 255), 1) cv2.imshow('Video', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break if __name__ == '__main__': #Encode() Train_SVC() p1 = threading.Thread(target=Receive) p2 = threading.Thread(target=Display) p1.start() p2.start()
{ "alphanum_fraction": 0.5763819095, "author": null, "avg_line_length": 29.0510948905, "converted": null, "ext": "py", "file": null, "hexsha": "7a2215bce70345c5e86711c4ff1147b527f9287c", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "b8eebced1e6120583b262e1c4641a0ed140ab409", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "LiangJunYu88/face_recognition", "max_forks_repo_path": "examples/my_face4_knn.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "b8eebced1e6120583b262e1c4641a0ed140ab409", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "LiangJunYu88/face_recognition", "max_issues_repo_path": "examples/my_face4_knn.py", "max_line_length": 114, "max_stars_count": null, "max_stars_repo_head_hexsha": "b8eebced1e6120583b262e1c4641a0ed140ab409", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "LiangJunYu88/face_recognition", "max_stars_repo_path": "examples/my_face4_knn.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1068, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 3980 }
[STATEMENT] lemma FINITE_states: fixes X :: "'a set" shows "finite X \<Longrightarrow> finite {(s :: 'a state). fmdom' s = X}" [PROOF STATE] proof (prove) goal (1 subgoal): 1. finite X \<Longrightarrow> finite {s. fmdom' s = X} [PROOF STEP] proof (induction rule: finite.induct) [PROOF STATE] proof (state) goal (2 subgoals): 1. finite {s. fmdom' s = {}} 2. \<And>A a. \<lbrakk>finite A; finite {s. fmdom' s = A}\<rbrakk> \<Longrightarrow> finite {s. fmdom' s = insert a A} [PROOF STEP] case emptyI [PROOF STATE] proof (state) this: goal (2 subgoals): 1. finite {s. fmdom' s = {}} 2. \<And>A a. \<lbrakk>finite A; finite {s. fmdom' s = A}\<rbrakk> \<Longrightarrow> finite {s. fmdom' s = insert a A} [PROOF STEP] then [PROOF STATE] proof (chain) picking this: [PROOF STEP] have "{s. fmdom' s = {}} = {fmempty}" [PROOF STATE] proof (prove) goal (1 subgoal): 1. {s. fmdom' s = {}} = {fmempty} [PROOF STEP] by (simp add: empty_domain_fmap_set) [PROOF STATE] proof (state) this: {s. fmdom' s = {}} = {fmempty} goal (2 subgoals): 1. finite {s. fmdom' s = {}} 2. \<And>A a. \<lbrakk>finite A; finite {s. fmdom' s = A}\<rbrakk> \<Longrightarrow> finite {s. fmdom' s = insert a A} [PROOF STEP] then [PROOF STATE] proof (chain) picking this: {s. fmdom' s = {}} = {fmempty} [PROOF STEP] show ?case [PROOF STATE] proof (prove) using this: {s. fmdom' s = {}} = {fmempty} goal (1 subgoal): 1. finite {s. fmdom' s = {}} [PROOF STEP] by (simp add: \<open>{s. fmdom' s = {}} = {fmempty}\<close>) [PROOF STATE] proof (state) this: finite {s. fmdom' s = {}} goal (1 subgoal): 1. \<And>A a. \<lbrakk>finite A; finite {s. fmdom' s = A}\<rbrakk> \<Longrightarrow> finite {s. fmdom' s = insert a A} [PROOF STEP] next [PROOF STATE] proof (state) goal (1 subgoal): 1. \<And>A a. \<lbrakk>finite A; finite {s. fmdom' s = A}\<rbrakk> \<Longrightarrow> finite {s. fmdom' s = insert a A} [PROOF STEP] case (insertI A a) [PROOF STATE] proof (state) this: finite A finite {s. fmdom' s = A} goal (1 subgoal): 1. \<And>A a. \<lbrakk>finite A; finite {s. fmdom' s = A}\<rbrakk> \<Longrightarrow> finite {s. fmdom' s = insert a A} [PROOF STEP] assume P1: "finite A" and P2: "finite {s. fmdom' s = A}" [PROOF STATE] proof (state) this: finite A finite {s. fmdom' s = A} goal (1 subgoal): 1. \<And>A a. \<lbrakk>finite A; finite {s. fmdom' s = A}\<rbrakk> \<Longrightarrow> finite {s. fmdom' s = insert a A} [PROOF STEP] then [PROOF STATE] proof (chain) picking this: finite A finite {s. fmdom' s = A} [PROOF STEP] show ?case [PROOF STATE] proof (prove) using this: finite A finite {s. fmdom' s = A} goal (1 subgoal): 1. finite {s. fmdom' s = insert a A} [PROOF STEP] proof (cases "a \<in> A") [PROOF STATE] proof (state) goal (2 subgoals): 1. \<lbrakk>finite A; finite {s. fmdom' s = A}; a \<in> A\<rbrakk> \<Longrightarrow> finite {s. fmdom' s = insert a A} 2. \<lbrakk>finite A; finite {s. fmdom' s = A}; a \<notin> A\<rbrakk> \<Longrightarrow> finite {s. fmdom' s = insert a A} [PROOF STEP] case True [PROOF STATE] proof (state) this: a \<in> A goal (2 subgoals): 1. \<lbrakk>finite A; finite {s. fmdom' s = A}; a \<in> A\<rbrakk> \<Longrightarrow> finite {s. fmdom' s = insert a A} 2. \<lbrakk>finite A; finite {s. fmdom' s = A}; a \<notin> A\<rbrakk> \<Longrightarrow> finite {s. fmdom' s = insert a A} [PROOF STEP] then [PROOF STATE] proof (chain) picking this: a \<in> A [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) using this: a \<in> A goal (1 subgoal): 1. finite {s. fmdom' s = insert a A} [PROOF STEP] using insertI.IH insert_Diff [PROOF STATE] proof (prove) using this: a \<in> A finite {s. fmdom' s = A} ?a \<in> ?A \<Longrightarrow> insert ?a (?A - {?a}) = ?A goal (1 subgoal): 1. finite {s. fmdom' s = insert a A} [PROOF STEP] by fastforce [PROOF STATE] proof (state) this: finite {s. fmdom' s = insert a A} goal (1 subgoal): 1. \<lbrakk>finite A; finite {s. fmdom' s = A}; a \<notin> A\<rbrakk> \<Longrightarrow> finite {s. fmdom' s = insert a A} [PROOF STEP] next [PROOF STATE] proof (state) goal (1 subgoal): 1. \<lbrakk>finite A; finite {s. fmdom' s = A}; a \<notin> A\<rbrakk> \<Longrightarrow> finite {s. fmdom' s = insert a A} [PROOF STEP] case False [PROOF STATE] proof (state) this: a \<notin> A goal (1 subgoal): 1. \<lbrakk>finite A; finite {s. fmdom' s = A}; a \<notin> A\<rbrakk> \<Longrightarrow> finite {s. fmdom' s = insert a A} [PROOF STEP] then [PROOF STATE] proof (chain) picking this: a \<notin> A [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) using this: a \<notin> A goal (1 subgoal): 1. finite {s. fmdom' s = insert a A} [PROOF STEP] proof - [PROOF STATE] proof (state) goal (1 subgoal): 1. a \<notin> A \<Longrightarrow> finite {s. fmdom' s = insert a A} [PROOF STEP] have "finite ( ((\<lambda>s. fmupd a True s) ` {s. fmdom' s = A}) \<union> ((\<lambda>s. fmupd a False s) ` {s. fmdom' s = A}))" [PROOF STATE] proof (prove) goal (1 subgoal): 1. finite (fmupd a True ` {s. fmdom' s = A} \<union> fmupd a False ` {s. fmdom' s = A}) [PROOF STEP] using False construction_of_all_possible_states_lemma insertI.IH [PROOF STATE] proof (prove) using this: a \<notin> A ?v \<notin> ?X \<Longrightarrow> {s. fmdom' s = insert ?v ?X} = fmupd ?v True ` {s. fmdom' s = ?X} \<union> fmupd ?v False ` {s. fmdom' s = ?X} finite {s. fmdom' s = A} goal (1 subgoal): 1. finite (fmupd a True ` {s. fmdom' s = A} \<union> fmupd a False ` {s. fmdom' s = A}) [PROOF STEP] by blast [PROOF STATE] proof (state) this: finite (fmupd a True ` {s. fmdom' s = A} \<union> fmupd a False ` {s. fmdom' s = A}) goal (1 subgoal): 1. a \<notin> A \<Longrightarrow> finite {s. fmdom' s = insert a A} [PROOF STEP] then [PROOF STATE] proof (chain) picking this: finite (fmupd a True ` {s. fmdom' s = A} \<union> fmupd a False ` {s. fmdom' s = A}) [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) using this: finite (fmupd a True ` {s. fmdom' s = A} \<union> fmupd a False ` {s. fmdom' s = A}) goal (1 subgoal): 1. finite {s. fmdom' s = insert a A} [PROOF STEP] using False construction_of_all_possible_states_lemma [PROOF STATE] proof (prove) using this: finite (fmupd a True ` {s. fmdom' s = A} \<union> fmupd a False ` {s. fmdom' s = A}) a \<notin> A ?v \<notin> ?X \<Longrightarrow> {s. fmdom' s = insert ?v ?X} = fmupd ?v True ` {s. fmdom' s = ?X} \<union> fmupd ?v False ` {s. fmdom' s = ?X} goal (1 subgoal): 1. finite {s. fmdom' s = insert a A} [PROOF STEP] by fastforce [PROOF STATE] proof (state) this: finite {s. fmdom' s = insert a A} goal: No subgoals! [PROOF STEP] qed [PROOF STATE] proof (state) this: finite {s. fmdom' s = insert a A} goal: No subgoals! [PROOF STEP] qed [PROOF STATE] proof (state) this: finite {s. fmdom' s = insert a A} goal: No subgoals! [PROOF STEP] qed \<comment> \<open>NOTE added lemma.\<close>
{ "alphanum_fraction": null, "author": null, "avg_line_length": null, "converted": null, "ext": null, "file": "Factored_Transition_System_Bounding_FactoredSystem", "hexsha": null, "include": null, "lang": null, "length": 34, "llama_tokens": 3052, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": null, "max_forks_repo_licenses": null, "max_forks_repo_name": null, "max_forks_repo_path": null, "max_issues_count": null, "max_issues_repo_head_hexsha": null, "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": null, "max_issues_repo_name": null, "max_issues_repo_path": null, "max_line_length": null, "max_stars_count": null, "max_stars_repo_head_hexsha": null, "max_stars_repo_licenses": null, "max_stars_repo_name": null, "max_stars_repo_path": null, "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": null, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": null }
// // Copyright (c) 2017-2019 Native Instruments GmbH, Berlin // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. // #include <ni/media/pcm/limits.h> #include <ni/media/pcm/numspace.h> #include <gtest/gtest.h> #include <boost/range/algorithm/adjacent_find.hpp> #include <boost/range/size.hpp> #include <algorithm> #include <cmath> #include <type_traits> template <class Value> class limits : public testing::Test { public: protected: int bits() const { static constexpr auto max_bits = 31; return std::min( pcm::numeric_limits<Value>::bits(), max_bits ); } auto values() const { return pcm::numspace<Value>( bits() ); } size_t actual_size() const { return size_t( boost::size( values() ) ); } size_t expected_size() const { return size_t( 1 ) << bits(); } bool equidistant() const { auto step = pcm::numeric_limits<Value>::step( bits() ); return std::end( values() ) == boost::adjacent_find( values(), [step]( Value a, Value b ) { return a + step != b; } ); } Value expected_min() const { return pcm::numeric_limits<Value>::min(); } Value actual_min() const { return *std::begin( values() ); } Value expected_max() const { return pcm::numeric_limits<Value>::max( bits() ); } Value actual_max() const { return *--std::end( values() ); } Value expected_zero() const { return pcm::numeric_limits<Value>::zero(); } Value actual_zero() const { return *( std::begin( values() ) + boost::size( values() ) / 2 ); } }; TYPED_TEST_CASE_P( limits ); TYPED_TEST_P( limits, expect_equidistant ) { // this takes too long // EXPECT_TRUE( this->equidistant()); EXPECT_TRUE( true ); } TYPED_TEST_P( limits, expect_size ) { EXPECT_EQ( this->expected_size(), this->actual_size() ); } TYPED_TEST_P( limits, expect_min ) { EXPECT_EQ( this->expected_min(), this->actual_min() ); } TYPED_TEST_P( limits, expect_max ) { EXPECT_EQ( this->expected_max(), this->actual_max() ); } TYPED_TEST_P( limits, expect_zero ) { EXPECT_EQ( this->expected_zero(), this->actual_zero() ); } REGISTER_TYPED_TEST_CASE_P( limits, expect_equidistant, expect_size, expect_min, expect_max, expect_zero ); using limits_test_t = testing::Types<uint8_t, uint16_t, uint32_t, uint64_t, int8_t, int16_t, int32_t, int64_t, float, double>; INSTANTIATE_TYPED_TEST_CASE_P( all, limits, limits_test_t );
{ "alphanum_fraction": 0.6674113376, "author": null, "avg_line_length": 25.7625899281, "converted": null, "ext": "cpp", "file": null, "hexsha": "9cf9331328573a6eef537c627c9e9fdb2a6a7425", "include": null, "lang": "C++", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "f06fe3384a3176c46c857cd4d42b43cca9d25870", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "ni-mheppner/ni-media", "max_forks_repo_path": "pcm/test/ni/media/pcm/limits.test.cpp", "max_issues_count": null, "max_issues_repo_head_hexsha": "f06fe3384a3176c46c857cd4d42b43cca9d25870", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "ni-mheppner/ni-media", "max_issues_repo_path": "pcm/test/ni/media/pcm/limits.test.cpp", "max_line_length": 108, "max_stars_count": null, "max_stars_repo_head_hexsha": "f06fe3384a3176c46c857cd4d42b43cca9d25870", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "ni-mheppner/ni-media", "max_stars_repo_path": "pcm/test/ni/media/pcm/limits.test.cpp", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 879, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 3581 }
from collections import deque import os import cv2 from .wrapper_base import Wrapper, ObservationWrapper import gym import gym.spaces as spaces import numpy as np os.environ.setdefault("PATH", "") cv2.ocl.setUseOpenCL(False) class FrameStack(Wrapper): def __init__(self, env, k): """Stack k last frames.""" Wrapper.__init__(self, env) self.k = k self.frames = deque([], maxlen=k) shp = env.observation_space.shape self.observation_space = spaces.Box( low=0, high=255, shape=(shp[:-1] + (shp[-1] * k,)), dtype=env.observation_space.dtype, ) # pylint: disable=method-hidden def reset(self, **kwargs): ob = self.env.reset(**kwargs) for _ in range(self.k): self.frames.append(ob) return self._get_ob() # pylint: disable=method-hidden def step(self, action): ob, reward, done, info = self.env.step(action) self.frames.append(ob) return self._get_ob(), reward, done, info def _get_ob(self): assert len(self.frames) == self.k return np.concatenate(list(self.frames), axis=-1) class ScaledFloatFrame(ObservationWrapper): def __init__(self, env): ObservationWrapper.__init__(self, env) self.observation_space = gym.spaces.Box( low=0, high=1, shape=env.observation_space.shape, dtype=np.float32 ) def observation(self, observation): # careful! This undoes the memory optimization, use # with smaller replay buffers only. img = np.array(observation, dtype=np.float32) img /= 255.0 return img class NormalizeActions(Wrapper): """Makes env expect actions that are zero-mean and unit variance """ def __init__(self, env): Wrapper.__init__(self, env) self._mask = np.logical_and( np.isfinite(env.action_space.low), np.isfinite(env.action_space.high)) self._low = np.where(self._mask, env.action_space.low, -1) self._high = np.where(self._mask, env.action_space.high, 1) low = np.where(self._mask, -np.ones_like(self._low), self._low) high = np.where(self._mask, np.ones_like(self._low), self._high) self.action_space = gym.spaces.Box(low, high, dtype=np.float32) def step(self, action): original = (action + 1) / 2 * (self._high - self._low) + self._low original = np.where(self._mask, original, action) return self.env.step(original) class FrameSkip(Wrapper): def __init__(self, env, skip=4): """Return only every `skip`-th frame""" Wrapper.__init__(self, env) # most recent raw observations (for max pooling across time steps) self._skip = max(1, skip) # pylint: disable=method-hidden def step(self, action): """Repeat action, sum reward, and max over last observations.""" total_reward = 0.0 done = None for i in range(self._skip): obs, reward, done, info = self.env.step(action) total_reward += reward if done: break return obs, total_reward, done, info # pylint: disable=method-hidden def reset(self, *args, **kwargs): return self.env.reset(*args, **kwargs) class ObsDict(ObservationWrapper): """Transform the normal observation into an obs dict with a key given by arg""" def __init__(self, env, default_key="obs_1d"): super(ObsDict, self).__init__(env) self.observation_space = gym.spaces.Dict({default_key: self.env.observation_space}) self._key = default_key def observation(self, observation): return {self._key: observation} class ObsDictRenameKey(ObservationWrapper): """Renames a key for an obs dict""" def __init__(self, env, old_name="observation",new_name="obs_1d"): super(ObsDictRenameKey, self).__init__(env) old_obs_space = env.observation_space assert isinstance(old_obs_space,gym.spaces.Dict) import copy new_obs_space = copy.deepcopy(old_obs_space) new_obs_space.spaces[new_name] = new_obs_space.spaces.pop(old_name) self.observation_space = new_obs_space self.old_name = old_name self.new_name = new_name def observation(self, observation:dict): observation[self.new_name] = observation.pop(self.old_name) return observation class RewardObs(Wrapper): """Make the reward part """ def __init__(self, env): Wrapper.__init__(self, env) spaces = self.env.observation_space.spaces if "obs_1d" in spaces: assert isinstance(spaces["obs_1d"], gym.spaces.Box) assert spaces["obs_1d"].dtype == np.float32 new_space = gym.spaces.Box(-np.inf, np.inf, shape=tuple(np.array(spaces["obs_1d"].shape) + 1)) else: new_space = gym.spaces.Box(-np.inf, np.inf, shape=(1,)) spaces["obs_1d"] = new_space self.observation_space = gym.spaces.Dict(spaces) def step(self, action): obs, reward, done, info = self.env.step(action) obs['vector'] = np.concatenate( (obs.get('vector', ()), np.array([reward], dtype=np.float32)), axis=-1 ) return obs, reward, done, info def reset(self): obs = self.env.reset() obs['vector'] = np.concatenate( (obs.get('vector', ()), np.array([0], dtype=np.float32)), axis=-1 ) return obs
{ "alphanum_fraction": 0.6177364563, "author": null, "avg_line_length": 32.9, "converted": null, "ext": "py", "file": null, "hexsha": "fbdd3eaa7901ac9df91adb588889e7d2a7b1284d", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "023d617ad86477763e424fd09fb496571083c50c", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "llucid-97/FastDeepQLearning", "max_forks_repo_path": "franQ/Env/wrappers/common.py", "max_issues_count": 4, "max_issues_repo_head_hexsha": "023d617ad86477763e424fd09fb496571083c50c", "max_issues_repo_issues_event_max_datetime": "2021-08-28T15:45:24.000Z", "max_issues_repo_issues_event_min_datetime": "2021-08-04T11:53:47.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "llucid-97/FastDeepQLearning", "max_issues_repo_path": "franQ/Env/wrappers/common.py", "max_line_length": 91, "max_stars_count": 5, "max_stars_repo_head_hexsha": "023d617ad86477763e424fd09fb496571083c50c", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "llucid-97/FastDeepQLearning", "max_stars_repo_path": "franQ/Env/wrappers/common.py", "max_stars_repo_stars_event_max_datetime": "2021-06-30T14:52:04.000Z", "max_stars_repo_stars_event_min_datetime": "2020-08-19T14:41:37.000Z", "num_tokens": 1352, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 5593 }
import numpy as np import torch from torch import nn from torch.nn import functional as F import pandas as pd from matplotlib import pyplot as plt def training_plot(scores, folder_name, file_name): plt.figure(figsize=(15,5)) plt.plot(range(len(scores["train"])), scores["train"], label=f'train {file_name}') plt.plot(range(len(scores["train"])), scores["val"], label=f'val {file_name}') plt.title(f'{file_name} plot'); plt.xlabel('Epoch'); plt.ylabel(f'{file_name}'); plt.legend(); #plt.show() plt.savefig("{}/{}.png".format(folder_name, file_name), dpi=300) def dict_to_csv(d, folder_name, file_name): df = pd.DataFrame.from_dict(d, orient='index') df = df.transpose() df.to_csv("{}/{}.csv".format(folder_name, file_name), index=False) # https://www.kaggle.com/rishabhiitbhu/unet-with-resnet34-encoder-pytorch def dice_loss(input, target): input = torch.sigmoid(input) smooth = 1.0 iflat = input.view(-1) tflat = target.view(-1) intersection = (iflat * tflat).sum() return ((2.0 * intersection + smooth) / (iflat.sum() + tflat.sum() + smooth)) class FocalLoss(nn.Module): def __init__(self, gamma): super().__init__() self.gamma = gamma def forward(self, input, target): if not (target.size() == input.size()): raise ValueError("Target size ({}) must be the same as input size ({})" .format(target.size(), input.size())) max_val = (-input).clamp(min=0) loss = input - input * target + max_val + \ ((-max_val).exp() + (-input - max_val).exp()).log() invprobs = F.logsigmoid(-input * (target * 2.0 - 1.0)) loss = (invprobs * self.gamma).exp() * loss return loss.mean() class MixedLoss(nn.Module): def __init__(self, alpha, gamma): super().__init__() self.alpha = alpha self.focal = FocalLoss(gamma) def forward(self, input, target): loss = self.alpha*self.focal(input, target) - torch.log(dice_loss(input, target)) return loss.mean() def predict(X, threshold): X_p = np.copy(X) preds = (X_p > threshold).astype('uint8') return preds def metric(probability, truth, threshold=0.5, reduction='none'): '''Calculates dice of positive and negative images seperately''' '''probability and truth must be torch tensors''' batch_size = len(truth) with torch.no_grad(): probability = probability.view(batch_size, -1) truth = truth.view(batch_size, -1) assert(probability.shape == truth.shape) p = (probability > threshold).float() t = (truth > 0.5).float() t_sum = t.sum(-1) p_sum = p.sum(-1) neg_index = torch.nonzero(t_sum == 0) pos_index = torch.nonzero(t_sum >= 1) dice_neg = (p_sum == 0).float() dice_pos = 2 * (p*t).sum(-1)/((p+t).sum(-1)) dice_neg = dice_neg[neg_index] dice_pos = dice_pos[pos_index] dice = torch.cat([dice_pos, dice_neg]) # dice_neg = np.nan_to_num(dice_neg.mean().item(), 0) # dice_pos = np.nan_to_num(dice_pos.mean().item(), 0) # dice = dice.mean().item() num_neg = len(neg_index) num_pos = len(pos_index) return dice, dice_neg, dice_pos, num_neg, num_pos class Meter: '''A meter to keep track of iou and dice scores throughout an epoch''' def __init__(self, phase, epoch): self.base_threshold = 0.5 # <<<<<<<<<<< here's the threshold self.base_dice_scores = [] self.dice_neg_scores = [] self.dice_pos_scores = [] self.iou_scores = [] def update(self, targets, outputs): probs = torch.sigmoid(outputs) dice, dice_neg, dice_pos, _, _ = metric(probs, targets, self.base_threshold) self.base_dice_scores.extend(dice) self.dice_pos_scores.extend(dice_pos) self.dice_neg_scores.extend(dice_neg) preds = predict(probs, self.base_threshold) iou = compute_iou_batch(preds, targets, classes=[1]) self.iou_scores.append(iou) def get_metrics(self): dice = np.nanmean(self.base_dice_scores) dice_neg = np.nanmean(self.dice_neg_scores) dice_pos = np.nanmean(self.dice_pos_scores) dices = [dice, dice_neg, dice_pos] iou = np.nanmean(self.iou_scores) return dices, iou def epoch_log(phase, epoch, epoch_loss, meter, start): '''logging the metrics at the end of an epoch''' dices, iou = meter.get_metrics() dice, dice_neg, dice_pos = dices print("Loss: %0.4f | dice: %0.4f | dice_neg: %0.4f | dice_pos: %0.4f | IoU: %0.4f" % (epoch_loss, dice, dice_neg, dice_pos, iou)) return dice, iou def compute_ious(pred, label, classes, ignore_index=255, only_present=True): '''computes iou for one ground truth mask and predicted mask''' pred[label == ignore_index] = 0 ious = [] for c in classes: label_c = label == c if only_present and np.sum(label_c) == 0: ious.append(np.nan) continue pred_c = pred == c intersection = np.logical_and(pred_c, label_c).sum() union = np.logical_or(pred_c, label_c).sum() if union != 0: ious.append(intersection / union) return ious if ious else [1] def compute_iou_batch(outputs, labels, classes=None): '''computes mean iou for a batch of ground truth masks and predicted masks''' ious = [] preds = np.copy(outputs) # copy is imp labels = np.array(labels) # tensor to np for pred, label in zip(preds, labels): ious.append(np.nanmean(compute_ious(pred, label, classes))) iou = np.nanmean(ious) return iou
{ "alphanum_fraction": 0.6214560728, "author": null, "avg_line_length": 35.9371069182, "converted": null, "ext": "py", "file": null, "hexsha": "2a175a546d410749dfe74479a572ab2e85e71e87", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "c5a5d558a05286dcbae662f982f34f07bb7774fb", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "ifding/CT-Segmentation", "max_forks_repo_path": "utils.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "c5a5d558a05286dcbae662f982f34f07bb7774fb", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "ifding/CT-Segmentation", "max_issues_repo_path": "utils.py", "max_line_length": 133, "max_stars_count": null, "max_stars_repo_head_hexsha": "c5a5d558a05286dcbae662f982f34f07bb7774fb", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "ifding/CT-Segmentation", "max_stars_repo_path": "utils.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1505, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 5714 }
#%% import os import pickle import time from pathlib import Path import colorcet as cc import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns from sklearn.decomposition import PCA from sklearn.feature_selection import VarianceThreshold from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from umap import UMAP from graspologic.plot import pairplot from sparse_decomposition import SparseComponentAnalysis from sparse_decomposition.utils import calculate_explained_variance_ratio from sparse_new_basis.data import load_scRNAseq from sparse_new_basis.plot import savefig, set_theme set_theme() fig_dir = Path("sparse_new_basis/results/gene_sca_umap_1.0") def stashfig(name, *args, **kwargs): savefig(fig_dir, name, *args, **kwargs) #%% output_dir = Path("sparse_new_basis/experiments/genes/outputs") var_thresh = 0.005 train_size = 2 ** 14 n_components = 125 max_iter = 20 with_mean = True with_std = True seed = 8888 global_params = ( f"var_thresh={var_thresh}-train_size={train_size}-n_components={n_components}" f"-max_iter={max_iter}-with_std={with_std}-seed={seed}" ) output_dir = output_dir / global_params #%% sequencing_df, annotation_df = load_scRNAseq(fillna=True) #%% throw out some genes with low variance X = sequencing_df.values.copy() var_thresh = VarianceThreshold(threshold=var_thresh) X = var_thresh.fit_transform(X) gene_index = sequencing_df.columns original_n_genes = len(gene_index) gene_index = gene_index[var_thresh.get_support()] sequencing_df = sequencing_df[gene_index] new_n_genes = len(gene_index) print( f"Number of genes removed: {original_n_genes - new_n_genes} " f"out of {original_n_genes}" ) #%% np.random.seed(seed) neuron_index = sequencing_df.index y = sequencing_df.index.get_level_values(level="Neuron_type").values # stratify=y will try to set the distribution of class labels the same for train/test X_train, X_test, index_train, index_test = train_test_split( X, neuron_index, stratify=y, train_size=train_size ) #%% center and scale training data currtime = time.time() scaler = StandardScaler(with_mean=with_mean, with_std=with_std, copy=False) X_train = scaler.fit_transform(X_train) print(f"{time.time() - currtime:.3f} elapsed to scale and center data.") #%% np.random.seed(seed) currtime = time.time() pca = PCA(n_components=n_components) X_pca = pca.fit_transform(X_train) print(f"{time.time() - currtime:.3f} elapsed to fit PCA model.") #%% np.random.seed(seed) gammas = [ 4 * n_components, # 100, # 250, # 500, # int(np.sqrt(X_train.shape[1]) * n_components), np.inf, ] gammas = [float(g) for g in gammas] models_by_gamma = {} Xs_by_gamma = {} for i, gamma in enumerate(gammas): print(f"Gamma = {gamma}...") if gamma == np.inf: _max_iter = 0 else: _max_iter = max_iter currtime = time.time() sca = SparseComponentAnalysis( n_components=n_components, max_iter=_max_iter, gamma=gamma, verbose=10 ) X_sca = sca.fit_transform(X_train) print(f"{time.time() - currtime:.3f} elapsed.") models_by_gamma[gamma] = sca Xs_by_gamma[gamma] = X_sca print() #%% rows = [] for gamma, model in models_by_gamma.items(): explained_variance_ratio = model.explained_variance_ratio_ for k, ev in enumerate(explained_variance_ratio): n_nonzero = np.count_nonzero(model.components_[: k + 1]) rows.append( { "gamma": gamma, "explained_variance": ev, "n_components": k + 1, "n_nonzero": n_nonzero, } ) scree_df = pd.DataFrame(rows) #%% palette palette = dict(zip(gammas, sns.color_palette("deep", 10))) blue_shades = sns.color_palette("Blues", n_colors=len(gammas))[1:] palette = dict(zip(gammas[:-1], blue_shades)) red_shades = sns.color_palette("Reds", n_colors=len(gammas))[1:] palette[np.inf] = red_shades[-1] #%% fig, ax = plt.subplots(1, 1, figsize=(8, 4)) sns.lineplot( data=scree_df, x="n_components", y="explained_variance", hue="gamma", ax=ax, marker="o", palette=palette, ) ax.get_legend().remove() ax.legend(bbox_to_anchor=(1, 1), loc="upper left", title="Gamma") # ax.legend().set_title("Gamma") ax.set(ylabel="Cumulative explained variance", xlabel="# of PCs") ax.yaxis.set_major_locator(plt.MaxNLocator(3)) ax.xaxis.set_major_locator(plt.IndexLocator(base=5, offset=-1)) stashfig("screeplot") #%% fig, ax = plt.subplots(1, 1, figsize=(8, 4)) sns.lineplot( data=scree_df, x="n_nonzero", y="explained_variance", hue="gamma", ax=ax, marker="o", palette=palette, ) ax.get_legend().remove() ax.legend(bbox_to_anchor=(1, 1), loc="upper left", title="Gamma") # ax.legend().set_title("Gamma") ax.set(ylabel="Cumulative explained variance", xlabel="# nonzero elements") plt.xscale("log") ax.yaxis.set_major_locator(plt.MaxNLocator(3)) # ax.xaxis.set_major_locator(plt.IndexLocator(base=5, offset=-1)) stashfig("screeplot-by-params") #%% currtime = time.time() umap_pca = UMAP(min_dist=0.3, n_neighbors=75, metric="cosine") X_umap_pca = umap_pca.fit_transform(Xs_by_gamma[np.inf]) print(f"{time.time() - currtime:.3f} elapsed for UMAP.") currtime = time.time() umap_sca = UMAP(min_dist=0.3, n_neighbors=75, metric="cosine") X_umap_sca = umap_sca.fit_transform(Xs_by_gamma[gammas[0]]) print(f"{time.time() - currtime:.3f} elapsed for UMAP.") #%% neuron_types = index_train.get_level_values("Neuron_type").values neuron_type_palette = dict(zip(np.unique(neuron_types), cc.glasbey_light)) sca = models_by_gamma[gammas[-1]] components = sca.components_ prop_genes_used = np.count_nonzero(components.max(axis=0)) / components.shape[1] fig, axs = plt.subplots(1, 2, figsize=(16, 8)) ax = axs[0] ax.axis("off") sns.scatterplot( x=X_umap_pca[:, 0], y=X_umap_pca[:, 1], hue=neuron_types, palette=neuron_type_palette, alpha=0.2, s=10, linewidth=0, ax=ax, ) ax.get_legend().remove() ax.set( title=r"UMAP $\circ$ PCA - " + f"Proportion of genes used: {prop_genes_used:.2f}" ) # fig, ax = plt.subplots(1, 1, figsize=(10, 10)) sca = models_by_gamma[gammas[0]] components = sca.components_ prop_genes_used = np.count_nonzero(components.max(axis=0)) / components.shape[1] ax = axs[1] ax.axis("off") sns.scatterplot( x=X_umap_sca[:, 0], y=X_umap_sca[:, 1], hue=neuron_types, palette=neuron_type_palette, alpha=0.2, s=10, ax=ax, linewidth=0, ) ax.get_legend().remove() ax.set( title=r"UMAP $\circ$ SCA - " + f"Proportion of genes used: {prop_genes_used:.2f}" ) stashfig(f"umap-n_components={n_components}") #%% from hyppo.discrim import DiscrimTwoSample, DiscrimOneSample discrim = DiscrimOneSample(is_dist=False) y = index_train.get_level_values("Neuron_type").values.copy() uni_y = np.unique(y) name_map = dict(zip(uni_y, range(len(uni_y)))) y = np.vectorize(name_map.get)(y) currtime = time.time() output = discrim.test(X_pca, y, reps=0) print(f"{time.time() - currtime:.3f} elapsed.") output #%% from sklearn.metrics import pairwise_distances currtime = time.time() dist_X_pca = pairwise_distances(X_pca[:2000], metric="cosine") print(f"{time.time() - currtime:.3f} elapsed.") currtime = time.time() discrim = DiscrimOneSample(is_dist=True) tstat, _ = discrim.test(dist_X_pca, y[:2000], reps=0) print(f"{time.time() - currtime:.3f} elapsed.") tstat #%% X_sca = Xs_by_gamma[gammas[0]] currtime = time.time() dist_X_sca = pairwise_distances(X_sca[:3000], metric="cosine") print(f"{time.time() - currtime:.3f} elapsed.") currtime = time.time() discrim = DiscrimOneSample(is_dist=True) tstat, _ = discrim.test(dist_X_sca, y[:3000], reps=0) print(f"{time.time() - currtime:.3f} elapsed.") tstat #%% def compute_metrics(model): final_pve = model.explained_variance_ratio_[-1] n_nonzero = np.count_nonzero(model.components_) p_nonzero = n_nonzero / model.components_.size n_nonzero_cols = np.count_nonzero(model.components_.max(axis=0)) p_nonzero_cols = n_nonzero_cols / model.components_.shape[1] output = { "explained_variance": final_pve, "n_nonzero": n_nonzero, "p_nonzero": p_nonzero, "n_nonzero_cols": n_nonzero_cols, "p_nonzero_cols": p_nonzero_cols, } return output from sparse_decomposition import SparseComponentAnalysis max_iter = 15 tol = 1e-4 n_components_range = [30] # 60, 120] params = [] S_train_by_params = {} models_by_params = {} metric_rows = [] for n_components in n_components_range: gammas = [ 2 * n_components, 0.25 * np.sqrt(n_components * X_train.shape[1]), 0.5 * np.sqrt(n_components * X_train.shape[1]), np.sqrt(n_components * X_train.shape[1]), 0.5 * n_components * np.sqrt(X_train.shape[1]), ] gammas = [float(int(g)) for g in gammas] gammas.append(np.inf) for gamma in gammas: print(f"n_components = {n_components}, gamma = {gamma}") print() curr_params = (n_components, gamma) params.append(curr_params) # fit model currtime = time.time() sca = SparseComponentAnalysis( n_components=n_components, max_iter=max_iter, gamma=gamma, verbose=10, tol=tol, ) S_train = sca.fit_transform(X_train) print(f"{time.time() - currtime:.3f} elapsed to train SCA model.") S_test = sca.transform(X_test) # save model fit models_by_params[curr_params] = sca S_train_by_params[curr_params] = S_train # save metrics metrics = compute_metrics(sca) metrics["gamma"] = gamma metrics["n_components"] = n_components metric_rows.append(metrics) print("\n\n\n") #%% n_subsamples = 5 n_per_subsample = 4096 metric = "cosine" discrim_result_rows = [] for curr_params in params: X_transformed = S_train_by_params[curr_params] for i in range(n_subsamples): subsample_inds = np.random.choice( len(X_transformed), size=n_per_subsample, replace=False ) dist_X_transformed = pairwise_distances( X_transformed[subsample_inds], metric=metric ) currtime = time.time() discrim = DiscrimOneSample(is_dist=True) tstat, _ = discrim.test(dist_X_transformed, y[subsample_inds], reps=0) print(f"{time.time() - currtime:.3f} elapsed for discriminability.") result = { "tstat": tstat, "n_components": curr_params[0], "gamma": curr_params[1], "discrim_resample": i, } discrim_result_rows.append(result) discrim_results = pd.DataFrame(discrim_result_rows) discrim_results #%% # palette = dict(zip(gammas, sns.color_palette("deep", 10))) # blue_shades = sns.color_palette("Blues", n_colors=len(gammas))[1:] # palette = dict(zip(gammas[:-1], blue_shades)) # red_shades = sns.color_palette("Reds", n_colors=len(gammas))[1:] # palette[np.inf] = red_shades[-1] #%% discrim_results["params"] = list( zip(discrim_results["n_components"], discrim_results["gamma"]) ) discrim_results fig, ax = plt.subplots(1, 1, figsize=(8, 6)) sns.stripplot(data=discrim_results, x="params", y="tstat", hue="n_components", ax=ax) plt.setp(ax.get_xticklabels(), rotation=45, rotation_mode="anchor", ha="right") stashfig("discrim-by-params") #%% metrics = pd.DataFrame(metric_rows) metrics["params"] = list(zip(metrics["n_components"], metrics["gamma"])) discrim_results["p_nonzero_cols"] = discrim_results["params"].map( metrics.set_index("params")["p_nonzero_cols"] ) discrim_results #%% plot_results = discrim_results[discrim_results["n_components"] == 30] gammas = np.unique(plot_results["gamma"]) palette = dict(zip(gammas, sns.color_palette("deep", 10))) blue_shades = sns.color_palette("Blues", n_colors=len(gammas))[1:] palette = dict(zip(gammas[:-1], blue_shades)) red_shades = sns.color_palette("Reds", n_colors=len(gammas))[1:] palette[np.inf] = red_shades[-1] fig, ax = plt.subplots(1, 1, figsize=(8, 6)) sns.scatterplot( data=plot_results, x="p_nonzero_cols", y="tstat", hue="gamma", palette=palette, ax=ax, ) #%% dist_X_transformed = pairwise_distances(X_transformed, metric=metric) currtime = time.time() discrim = DiscrimOneSample(is_dist=True) tstat, _ = discrim.test(dist_X_transformed, y, reps=0) print(f"{time.time() - currtime:.3f} elapsed for discriminability.")
{ "alphanum_fraction": 0.6919393456, "author": null, "avg_line_length": 27.96875, "converted": null, "ext": "py", "file": null, "hexsha": "a173a18d4e44e17e1d75c777275678dddcd839eb", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "4bb9c766154fe713c544a80178c067ae8c867026", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "bdpedigo/sparse_new_basis", "max_forks_repo_path": "experiments/genes/gene_umap_1.0.py", "max_issues_count": 4, "max_issues_repo_head_hexsha": "4bb9c766154fe713c544a80178c067ae8c867026", "max_issues_repo_issues_event_max_datetime": "2020-11-16T02:23:42.000Z", "max_issues_repo_issues_event_min_datetime": "2020-11-14T18:27:11.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "bdpedigo/sparse_new_basis", "max_issues_repo_path": "experiments/genes/gene_umap_1.0.py", "max_line_length": 85, "max_stars_count": null, "max_stars_repo_head_hexsha": "4bb9c766154fe713c544a80178c067ae8c867026", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "bdpedigo/sparse_new_basis", "max_stars_repo_path": "experiments/genes/gene_umap_1.0.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 3433, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 12530 }
# -*- coding: utf-8 -*- # # Author: Taylor Smith <taylor.smith@alkaline-ml.com> # # Provide numpy compatibility and common variables. Since this # is a relatively sparse script, I feel I must defend this design # choice. See the docstring in the __init__: "Each sub-module is specifically # designed not to make calls out to other portions of pmdarima and to # remove circular dependencies." # # Since DTYPE is used commonly, this removes circular dependencies or # hard-coding. from __future__ import absolute_import import numpy as np # this is going to be the data-type used across pmdarima DTYPE = np.float64
{ "alphanum_fraction": 0.7658536585, "author": null, "avg_line_length": 32.3684210526, "converted": null, "ext": "py", "file": null, "hexsha": "0b9fc611b50235c3a83c72b379df135f9c5893f8", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "7f42e36beb888d9e1e7e41b0d9c9f7419c730a3a", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Saravji/pmdarima", "max_forks_repo_path": "pmdarima/compat/numpy.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "7f42e36beb888d9e1e7e41b0d9c9f7419c730a3a", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Saravji/pmdarima", "max_issues_repo_path": "pmdarima/compat/numpy.py", "max_line_length": 77, "max_stars_count": 1, "max_stars_repo_head_hexsha": "7f42e36beb888d9e1e7e41b0d9c9f7419c730a3a", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Saravji/pmdarima", "max_stars_repo_path": "pmdarima/compat/numpy.py", "max_stars_repo_stars_event_max_datetime": "2019-01-17T19:00:42.000Z", "max_stars_repo_stars_event_min_datetime": "2019-01-17T19:00:42.000Z", "num_tokens": 152, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 615 }
import numpy as np import matplotlib.pyplot as plt import time from xdesign_ming.material import XraylibMaterial, CustomMaterial from xdesign_ming.geometry import * from xdesign_ming.phantom import Phantom from xdesign_ming.propagation import * from xdesign_ming.plot import * from xdesign_ming.acquisition import Simulator def test_model_prop_pipeline(): n_particles = 5 top_y = 25.e-7 top_radius = 10.e-7 bottom_radius = 100.e-7 top_thickness = 5.e-7 bottom_thickness = 15.e-7 length = 200.e-7 bottom_y = top_y + length silicon = XraylibMaterial('Si', 2.33) titania = XraylibMaterial('TiO2', 4.23) air = CustomMaterial(delta=0, beta=0) try: grid_delta = np.load('data/sav/grid/grid_delta.npy') grid_beta = np.load('data/sav/grid/grid_beta.npy') except IOError: tube0 = TruncatedCone_3d(top_center=Point([128.e-7, top_y, 128.e-7]), length=length, top_radius=top_radius, bottom_radius=bottom_radius) phantom = Phantom(geometry=tube0, material=silicon) tube1 = TruncatedCone_3d(top_center=Point([128.e-7, top_y, 128.e-7]), length=length, top_radius=top_radius-top_thickness, bottom_radius=bottom_radius-bottom_thickness) tube1 = Phantom(geometry=tube1, material=air) phantom.children.append(tube1) rand_y = [] for i in range(n_particles): xi = np.random.rand() rand_y.append((top_radius - np.sqrt(top_radius**2 - top_radius**2 * xi + bottom_radius ** 2 * xi)) / (top_radius - bottom_radius) * length + top_y) for part_y in rand_y: r = (top_radius + (bottom_radius - top_radius) / (length) * (part_y - top_y)) theta = np.random.rand() * np.pi * 2 part_x = np.cos(theta) * r + 128.e-7 part_z = np.sin(theta) * r + 128.e-7 rad = int(np.random.rand() * 6.e-7) + 4.e-7 sphere = Sphere_3d(center=Point([part_x, part_y, part_z]), radius=rad) sphere = Phantom(geometry=sphere, material=titania) phantom.children.append(sphere) grid = discrete_phantom(phantom, 1.e-7, bounding_box=[[0, 255.e-7], [0, 255.e-7], [0, 255.e-7]], prop=['delta', 'beta'], ratio=1, mkwargs={'energy': 25}, overlay_mode='replace') grid_delta = grid[..., 0] grid_beta = grid[..., 1] np.save('grid_beta.npy',grid_beta) np.save('grid_delta.npy',grid_delta) sim = Simulator(energy=25000, grid=(grid_delta, grid_beta), psize=[1.e-7, 1.e-7, 1.e-7]) sim.initialize_wavefront('plane') t0 = time.time() wavefront = sim.multislice_propagate() print('Propagation time: {} ms'.format((time.time() - t0) * 1000)) np.save('ming_output.npy',abs(wavefront)) if __name__ == '__main__': # run tests which create figures test_model_prop_pipeline() plt.show(block=True)
{ "alphanum_fraction": 0.5312232678, "author": null, "avg_line_length": 36.53125, "converted": null, "ext": "py", "file": null, "hexsha": "246269dca9a1d80749c299842f8a30c4f09f93c6", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "1e36e067ff53809f4cc6286562b221c4bddbcb60", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "sajid-ali-nu/multislice", "max_forks_repo_path": "tests/test_tube_particles_modified.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "1e36e067ff53809f4cc6286562b221c4bddbcb60", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "sajid-ali-nu/multislice", "max_issues_repo_path": "tests/test_tube_particles_modified.py", "max_line_length": 78, "max_stars_count": 1, "max_stars_repo_head_hexsha": "1e36e067ff53809f4cc6286562b221c4bddbcb60", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "sajid-ali-nu/multislice", "max_stars_repo_path": "tests/test_tube_particles_modified.py", "max_stars_repo_stars_event_max_datetime": "2019-01-12T22:41:21.000Z", "max_stars_repo_stars_event_min_datetime": "2019-01-12T22:41:21.000Z", "num_tokens": 838, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 3507 }
""" This module contains tools to help in extracting training crops from images, where a "crop" is a relatively small rectangular ROI from within the bounds of an image. author: Stephen O'Hara created: April 14, 2016 """ import pyvision3 as pv3 import shapely.geometry as sg import shapely.ops as so import numpy as np def crop_regions(image, shapes, crop_size=None): """ Crops are generated from within the provided image by using the bounding box around a set of provided shapes, or at a fixed size, centered on the centroids of the provided shapes. Parameters ---------- image: pyvision3 image shapes: shapely polygons list Only the exterior coordinates of the polygons will be used to find either the bounding rectangle or the center point. crop_size: tuple (width, height) or None If None, then the bounding box for each shape will be used, and thus the generated crops may vary in size. Otherwise, if a single common size is desired, then a centered rectangle of this size will be extracted from the centroids of the shapes. It's possible with this latter strategy that you might have a shape too big for the crop_size. Returns ------- A list of crops, where each crop is a pyvision3 image """ if crop_size is not None: cw, ch = crop_size centers = [(shp.centroid.x, shp.centroid.y) for shp in shapes] rects = [pv3.CenteredRect(cx, cy, cw, ch) for (cx, cy) in centers] else: rects = [sg.box(*shp.bounds) for shp in shapes] # crops = [image.crop(r) for r in rects] crops = [] for r in rects: try: crop = image.crop(r) except pv3.OutOfBoundsError: print("{} is out of bounds in {}".format(str(r.bounds), image.desc)) crop = None crops.append(crop) return crops def crop_negative_regions(image, shapes, crop_size, N=10): """ This function is useful for creating negative or 'background' samples from an image where you already have known foreground regions. Basically, it generates randomly located rectangles of the specified crop size in the image, then removes any that intersect with any of the foreground shapes. The user specifies how many negative samples to crop from the image. Parameters ---------- image: pyvision3 image shapes: shapely polygons list The shapes are the places NOT to crop negative samples from. crop_size: (w,h) The fixed size rectangles to be used for background crops N: integer The number of crops to generate from this image Returns ------- A list of crops, where each is a pyvision3 image """ positive_area = so.cascaded_union(shapes) validated_crops = [] while len(validated_crops) < N: rect_gen = random_rect_gen(image.size, crop_size, N=N * 2) for rect in rect_gen: if not rect.intersects(positive_area): validated_crops.append(image.crop(rect)) if len(validated_crops) >= N: break return validated_crops def random_rect_gen(image_size, crop_size, N=1): """ Generates random rectangles (crop boundaries) of the specified size from within the image_size bounds. Parameters ---------- image_size: tuple The (w, h) of the image, serves as the container size from which the smaller crop rectangles will be generated crop_size: tuple The (w, h) of the crop rectangles, must be smaller than the image_size N: integer The number of random crop rects to create, default is 1 Returns ------- The rectangles as shapely polygons """ img_w, img_h = image_size c_w, c_h = crop_size offset_x = c_w // 2 offset_y = c_h // 2 rand_xs = np.random.randint(offset_x, high=img_w - offset_x, size=N) rand_ys = np.random.randint(offset_y, high=img_h - offset_y, size=N) for (cx, cy) in zip(rand_xs, rand_ys): rect = pv3.CenteredRect(cx, cy, crop_size[0], crop_size[1]) yield rect
{ "alphanum_fraction": 0.6576230492, "author": null, "avg_line_length": 32.5390625, "converted": null, "ext": "py", "file": null, "hexsha": "913a7169f47ec472f043e9557c2894130f6c369e", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2017-01-14T16:04:36.000Z", "max_forks_repo_forks_event_min_datetime": "2017-01-14T16:04:36.000Z", "max_forks_repo_head_hexsha": "cc0082897358bd2a8b29306c9d50ff6d2c118ead", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "svohara/pyvision3", "max_forks_repo_path": "pyvision3/dataset_tools/crops.py", "max_issues_count": 4, "max_issues_repo_head_hexsha": "cc0082897358bd2a8b29306c9d50ff6d2c118ead", "max_issues_repo_issues_event_max_datetime": "2020-04-28T03:25:39.000Z", "max_issues_repo_issues_event_min_datetime": "2020-04-16T02:38:16.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "svohara/pyvision3", "max_issues_repo_path": "pyvision3/dataset_tools/crops.py", "max_line_length": 84, "max_stars_count": 2, "max_stars_repo_head_hexsha": "cc0082897358bd2a8b29306c9d50ff6d2c118ead", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "svohara/pyvision3", "max_stars_repo_path": "pyvision3/dataset_tools/crops.py", "max_stars_repo_stars_event_max_datetime": "2018-02-09T04:11:11.000Z", "max_stars_repo_stars_event_min_datetime": "2016-12-14T20:44:17.000Z", "num_tokens": 1001, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 4165 }
%---------------------------------------------------------------------------- % Magic tutorial number 7 %---------------------------------------------------------------------------- \NeedsTeXFormat{LaTeX2e}[1994/12/01] \documentclass[letterpaper,twoside,12pt]{article} \usepackage{epsfig,times} \setlength{\textwidth}{8.5in} \addtolength{\textwidth}{-2.0in} \setlength{\textheight}{11.0in} \addtolength{\textheight}{-2.0in} \setlength{\oddsidemargin}{0in} \setlength{\evensidemargin}{0pt} \setlength{\topmargin}{-0.5in} \setlength{\headheight}{0.2in} \setlength{\headsep}{0.3in} \setlength{\topskip}{0pt} \def\hinch{\hspace*{0.5in}} \def\starti{\begin{center}\begin{tabbing}\hinch\=\hinch\=\hinch\=hinch\hinch\=\kill} \def\endi{\end{tabbing}\end{center}} \def\ii{\>\>\>} \def\mytitle{Magic Tutorial \#7: Netlists and Routing} %---------------------------------------------------------------------------- \begin{document} \makeatletter \newcommand{\ps@magic}{% \renewcommand{\@oddhead}{\mytitle\hfil\today}% \renewcommand{\@evenhead}{\today\hfil\mytitle}% \renewcommand{\@evenfoot}{\hfil\textrm{--{\thepage}--}\hfil}% \renewcommand{\@oddfoot}{\@evenfoot}} \newcommand{\ps@mplain}{% \renewcommand{\@oddhead}{}% \renewcommand{\@evenhead}{}% \renewcommand{\@evenfoot}{\hfil\textrm{--{\thepage}--}\hfil}% \renewcommand{\@oddfoot}{\@evenfoot}} \makeatother \pagestyle{magic} \thispagestyle{mplain} \begin{center} {\bfseries \Large \mytitle} \\ \vspace*{0.5in} {\itshape John Ousterhout} \\ \vspace*{0.5in} Computer Science Division \\ Electrical Engineering and Computer Sciences \\ University of California \\ Berkeley, CA 94720 \\ \vspace*{0.25in} {\itshape (Updated by others, too.)} \\ \vspace*{0.25in} This tutorial corresponds to Magic version 7. \\ \end{center} \vspace*{0.5in} {\noindent\bfseries\large Tutorials to read first:} \starti \> Magic Tutorial \#1: Getting Started \\ \> Magic Tutorial \#2: Basic Painting and Selection \\ \> Magic Tutorial \#3: Advanced Painting (Wiring and Plowing) \\ \> Magic Tutorial \#4: Cell Hierarchies \\ \> Magic Tutorial \#5: Multiple Windows \\ \endi {\noindent\bfseries\large Netlist Commands introduced in this tutorial:} \starti \> :extract, :flush, :ripup, :savenetlist, :trace, :writeall \endi {\noindent\bfseries\large Layout Commands introduced in this tutorial:} \starti \> :channel, :route \endi {\noindent\bfseries\large Macros introduced in this tutorial:} \starti \> {\itshape (none)} \endi \vspace*{0.75in} \section{Introduction} This tutorial describes how to use Magic's automatic routing tools to make interconnections between subcells in a design. In addition to the standard Magic router, which is invoked by the {\bfseries route} command and covered in this tutorial, two other routing tools are available. A gate-array router {\itshape Garouter} permits user specified channel definitions, terminals in the interior of cells, and route-throughs across cells. To learn about the gate-array router read this first then ``Magic Tutorial \#12: Routing Gate Arrays''. Finally Magic provides an interactive maze-router that takes graphic hints, the {\itshape Irouter}, that permits the user to control the overall path of routes while leaving the tedious details to Magic. The {\itshape Irouter} is documented in ``Magic Tutorial \#10: The Interactive Router''. The standard Magic router provides an {\itshape obstacle-avoidance} capability: if there is mask material in the routing areas, the router can work under, over, or around that material to complete the connections. This means that you can pre-route key signals by hand and have Magic route the less important signals automatically. In addition, you can route power and ground by hand (right now we don't have any power-ground routing tools, so you {\itshape have} to route them by hand). The router {\itshape only} makes connections between subcells; to make point-to-point connections between pieces of layout within a single cell you should use the wiring command described in ``Magic Tutorial \#3: Advanced Painting (Wiring and Plowing) '' or the maze router described in ``Magic Tutorial \#10: The Interactive Router''. If you only need to make a few connections you are probably better off doing them manually. The first step in routing is to tell Magic what should be connected to what. This information is contained in a file called a {\itshape netlist}. Sections 2, 3, 4, and 5 describe how to create and modify netlists using Magic's interactive netlist editing tools. Once you've created a netlist, the next step is to invoke the router. Section 6 shows how to do this, and gives a brief summary of what goes on inside the routing tools. Unless your design is very simple and has lots of free space, the routing probably won't succeed the first time. Section 7 describes the feedback provided by the routing tools. Sections 8 and 9 discuss how you can modify your design in light of this feedback to improve its routability. You'll probably need to iterate a few times until the routing is successful. \section{Terminals and Netlists} A netlist is a file that describes a set of desired connections. It contains one or more {\itshape nets}. Each net names a set of {\itshape terminals} that should all be wired together. A terminal is simply a label attached to a piece of mask material within a subcell; it is distinguishable from ordinary labels within a subcell by its presence within a netlist file and by certain characteristics common to terminals, as described below. The first step in building a netlist is to label the terminals in your design. Figure 1 shows an example. Each label should be a line or rectangle running along the edge of the cell (point terminals are not allowed). The router will make a connection to the cell somewhere along a terminal's length. If the label isn't at the edge of the cell, Magic will route recklessly across the cell to reach the terminal, taking the shortest path between the terminal and a routing channel. It's almost always a good idea to arrange for terminal labels to be at cell edges. The label must be at least as wide as the minimum width of the routing material; the wider you make the label, the more flexibility you give the router to choose a good point to connect to the terminal. \begin{figure}[ht] \begin{center} \epsfig{file=../psfigures/tut7.1.ps, width=0.5\columnwidth} \caption{An example of terminal labels. Each terminal should be labeled with a line or rectangle along the edge of the cell.} \end{center} \end{figure} Terminal labels must be attached to mask material that connects directly to one of Magic's two routing layers (Routing layers are defined in Magic's technology file). For example, in the SCMOS process where the routing layers are metal1 and metal2, diffusion may not be used as a terminal since neither of the routing layers will connect directly to it. On the other hand, a terminal may be attached to diffusion-metal1 contact, since the metal1 routing layer will connect properly to it. Terminals can have arbitrary names, except that they should not contain slashes (``/'') or the substring ``feedthrough'', and should not end in ``@'', ``\$'', or ``\^{}''. See Tutorial \#2 for a complete description of labeling conventions. For an example of good and bad terminals, edit the cell {\bfseries tut7a}. The cell doesn't make any electrical sense, but contains several good and bad terminals. All the terminals with names like {\bfseries bad1} are incorrect or undesirable for one of the reasons given above, and those with names like {\bfseries good4} are acceptable. \begin{figure}[ht] \begin{center} \epsfig{file=../psfigures/tut7.2.ps, width=0.7\columnwidth} \caption{The netlist menu.} \end{center} \end{figure} \begin{table}[ht] \begin{center} \begin{tabular}{|l|l|} \hline Button & Action \\ \hline Current Text & Left-click: prompt for more labels \\ & Right-click: advance to next label \\ \hline Placer & Left-click: place label \\ & Right-click: change label text position \\ \hline Pumps & Left-click: decrement number \\ & Right-click: increment number \\ \hline Find & Search under box, highlight labels \\ & matching current text \\ \hline Current Netlist & Left-click: prompt for new netlist name \\ & Right-click: use edit cell name as netlist name \\ \hline Verify & Check that wiring matches netlist (same as \\ & typing {\bfseries :verify} command) \\ \hline Print & Print names of all terminals in selected net \\ & (same as typing {\bfseries :print} command) \\ \hline Terms & Place feedback areas on screen to identify all terminals \\ & in current netlist (same as {\bfseries :showterms} command) \\ \hline Cleanup & Check current netlist for missing labels and nets \\ & with less than two terminals (same as typing \\ \hline & {\bfseries :cleanup} command) \\ No Net & Delete selected net (same as {\bfseries :dnet} command) \\ \hline Show & Highlight paint connected to material under box \\ & (same as typing {\bfseries :shownet} command) \\ \hline \end{tabular} \caption{A summary of all the netlist menu button actions.} \end{center} \end{table} If you create two or more terminal labels with the same name in the same cell the router will assume that they are electrically equivalent (connected together within the cell). Because of this, when routing the net it will feel free to connect to whichever one of the terminals is most convenient, and ignore the others. In some cases the router may take advantage of electrically equivalent terminals by using {\itshape feed throughs}: entering a cell at one terminal to make one connection, and exiting through an equivalent terminal on the way to make another connection for the same net. \section{Menu for Label Editing} Magic provides a special menu facility to assist you in placing terminal labels and editing netlists. To make the menu appear, invoke the Magic command \starti \ii {\bfseries :specialopen netlist} \endi A new window will appear in the lower-left corner of the screen, containing several rectangular areas on a purple background. Each of the rectangular areas is called a {\itshape button}. Clicking mouse buttons inside the menu buttons will invoke various commands to edit labels and netlists. Figure 2 shows a diagram of the netlist menu and Table I summarizes the meaning of button clicks in various menu items. The netlist menu can be grown, shrunk, and moved just like any other window; see ``Magic Tutorial \#5: Multiple Windows'' for details. It also has its own private set of commands. To see what commands you can type in the netlist menu, move the cursor over the menu and type \starti \ii {\bfseries :help} \endi You shouldn't need to type commands in the netlist menu very often, since almost everything you'll need to do can be done using the menu. See Section 9 for a description of a few of the commands you can type; the complete set is described in the manual page {\itshape magic(1)}. One of the best uses for the commands is so that you can define macros for them and avoid having to go back and forth to the menu; look up the {\bfseries :send} command in the man page to see how to do this. The top half of the menu is for placing labels and the bottom half is for editing netlists. This section describes the label facilities, and Section 4 describes the netlist facilities. The label menu makes it easy for you to enter lots of labels, particularly when there are many labels that are the same except for a number, e.g. {\bfseries bus1}, {\bfseries bus2}, {\bfseries bus3}, etc. There are four sections to the label menu: the current text, the placer, two pumps, and the {\bfseries Find} button. To place labels, first click the left mouse button over the current text rectangle. Then type one or more labels on the keyboard, one per line. You can use this mechanism to enter several labels at once. Type return twice to signal the end of the list. At this point, the first of the labels you typed will appear in the current text rectangle. To place a label, position the box over the area you want to label, then click the left mouse button inside one of the squares of the placer area. A label will be created with the current text. Where you click in the placer determines where the label text will appear relative to the label box: for example, clicking the left-center square causes the text to be centered just to the left of the box. You can place many copies of the same label by moving the box and clicking the placer area again. You can re-orient the text of a label by clicking the right mouse button inside the placer area. For example, if you would like to move a label's text so that it appears centered above the label, place the box over the label and right-click the top-center placer square. If you entered several labels at once, only the first appears in the current text area. However, you can advance to the next label by right-clicking inside the current text area. In this way you can place a long series of labels entirely with the mouse. Try using this mechanism to add labels to {\bfseries tut7a}. The two small buttons underneath the right side of the current text area are called pumps. To see how these work, enter a label name containing a number into the current text area, for example, {\bfseries bus1}. When you do this, the ``1'' appears in the left pump. Right-clicking the pump causes the number to increment, and left-clicking the pump causes the number to decrement. This makes it easy for you to enter a series of numbered signal names. If a name has two numbers in it, the second number will appear in the second pump, and it can be incremented or decremented too. Try using the pumps to place a series of numbered names. The last entry in the label portion of the menu is the {\bfseries Find} button. This can be used to locate a label by searching for a given pattern. If you click the {\bfseries Find} button, Magic will use the current text as a pattern and search the area underneath the box for a label whose name contains the pattern. Pattern-matching is done in the same way as in {\itshape csh}, using the special characters ``*'', ``?'', ``\\'', ``['', and ``]''. Try this on {\bfseries tut7a}: enter ``good*'' into the current text area, place the box around the whole cell, then click on the ``Find'' button. For each of the good labels, a feedback area will be created with white stripes to highlight the area. The {\bfseries :feedback find} command can be used to step through the areas, and {\bfseries :feedback clear} will erase the feedback information from the screen. The {\bfseries :feedback} command has many of the same options as {\bfseries :drc} for getting information about feedback areas; see the Magic manual page for details, or type {\bfseries :feedback help} for a synopsis of the options. \section{Netlist Editing} After placing terminal labels, the next step is to specify the connections between them; this is called netlist editing. The bottom half of the netlist menu is used for editing netlists. The first thing you must do is to specify the netlist you want to edit. Do this by clicking in the current netlist box. If you left-click, Magic will prompt you for the netlist name and you can type it at the keyboard. If you right-click, Magic will use the name of the edit cell as the current netlist name. In either case, Magic will read the netlist from disk if it exists and will create a new netlist if there isn't currently a netlist file with the given name. Netlist files are stored on disk with a ``.net'' extension, which is added by Magic when it reads and writes files. You can change the current netlist by clicking the current netlist button again. Startup Magic on the cell {\bfseries tut7b}, open the netlist menu, and set the current netlist to {\bfseries tut7b}. Then expand the subcells in {\bfseries tut7b} so that you can see their terminals. \begin{table}[ht] \begin{center} \begin{tabular}{|l|l|} \hline Button & Action \\ \hline Left & Select net, using nearest terminal to cursor. \\ \hline Right & Toggle nearest terminal into or out of \\ & current net. \\ \hline Middle & Find nearest terminal, join its net with the \\ & current net. \\ \hline \end{tabular} \caption{The actions of the mouse buttons when the terminal tool is in use.} \end{center} \end{table} Netlist editing is done with the netlist tool. If you haven't already read ``Tutorial \#3: Advanced Painting (Wiring and Plowing)'', you should read it now, up through Section 2.1. Tutorial \#3 explained how to change the current tool by using the space macro or by typing {\bfseries :tool}. Switch tools to the netlist tool (the cursor will appear as a thick square). When the netlist tool is in use the left, right, and middle buttons invoke select, toggle, and join operations respectively (see Table II). To see how they work, move the cursor over the terminal {\bfseries right4} in the top subcell of {\bfseries tut7b} and click the left mouse button (you may have to zoom in a bit to see the labels; terminals are numbered in clockwise order: {\bfseries right4} is the fourth terminal from the top on the right side). This causes the net containing that terminal to be selected. Three hollow white squares will appear over the layout, marking the terminals that are supposed to be wired together into {\bfseries right4}'s net. Left-click over the {\bfseries left3} terminal in the same subcell to select its net, then select the {\bfseries right4} net again. The right button is used to toggle terminals into or out of the current net. If you right-click over a terminal that is in the current net, then it is removed from the current net. If you right-click over a terminal that isn't in the current net, it is added to the current net. A single terminal can only be in one net at a time, so if a terminal is already in a net when you toggle it into another net then Magic will remove it from the old net. Toggle the terminal {\bfseries top4} in the bottom cell out of, then back into, the net containing {\bfseries right4}. Now toggle {\bfseries left3} in the bottom cell into this net. Magic warns you because it had to remove {\bfseries left3} from another net in order to add it to {\bfseries right4}'s net. Type {\bfseries u} to undo this change, then left-click on {\bfseries left3} to make sure it got restored to its old net by the undo. All of the netlist-editing operations are undo-able. The middle button is used to merge two nets together. If you middle-click over a terminal, all the terminals in its net are added to the current net. Play around with the three buttons to edit the netlist {\bfseries tut7b}. Note: the router does not make connections to terminals in the top level cell. It only works with terminals in subcells, or sub-subcells, etc. Because of this, the netlist editor does not permit you to select terminals in the top level cell. If you click over such a terminal Magic prints an error message and refuses to make the selection. If you left-click over a terminal that is not currently in a net, Magic creates a new net automatically. If you didn't really want to make a new net, you have several choices. Either you can toggle the terminal out of its own net, you can undo the select operation, or you can click the {\bfseries No Net} button in the netlist menu (you can do this even while the cursor is in the square shape). The {\bfseries No Net} button removes all terminals from the current net and destroys the net. It's a bad idea to leave single-net terminals in the netlist: the router will treat them as errors. There are two ways to save netlists on disk; these are similar to the ways you can save layout cells. If you type \starti \ii {\bfseries :savenetlist }[{\itshape name}] \endi with the cursor over the netlist menu, the current netlist will be saved on disk in the file {\itshape name}.{\bfseries net}. If no {\itshape name} is typed, the name of the current netlist is used. If you type the command \starti \ii {\bfseries :writeall} \endi then Magic will step through all the netlists that have been modified since they were last written, asking you if you'd like them to be written out. If you try to leave Magic without saving all the modified netlists, Magic will warn you and give you a chance to write them out. If you make changes to a netlist and then decide you don't want them, you can use the {\bfseries :flush} netlist command to throw away all of the changes and re-read the netlist from its disk file. If you create netlists using a text editor or some other program, you can use {\bfseries :flush} after you've modified the netlist file in order to make sure that Magic is using the most up-to-date version. The {\bfseries Print} button in the netlist menu will print out on the text screen the names of all the terminals in the current net. Try this for some of the nets in {\bfseries tut7b}. The official name of a terminal looks a lot like a Unix file name, consisting of a bunch of fields separated by slashes. Each field except the last is the id of a subcell, and the last field is the name of the terminal. These hierarchical names provide unique names for each terminal, even if the same terminal name is re-used in different cells or if there are multiple copies of the same cell. The {\bfseries Verify} button will check the paint of the edit cell to be sure it implements the connections specified in the current netlist. Feedback areas are created to show nets that are incomplete or nets that are shorted together. The {\bfseries Terms} button will cause Magic to generate a feedback area over each of the terminals in the current netlist, so that you can see which terminals are included in the netlist. If you type the command {\bfseries :feedback clear} in a layout window then the feedback will be erased. The {\bfseries Cleanup} button is there as a convenience to help you cleanup your netlists. If you click on it, Magic will scan through the current netlist to make sure it is reasonable. {\bfseries Cleanup} looks for two error conditions: terminal names that don't correspond to any labels in the design, and nets that don't have at least two terminals. When it finds either of these conditions it prints a message and gives you the chance to either delete the offending terminal (if you type {\bfseries dterm}), delete the offending net ({\bfseries dnet}), skip the current problem without modifying the netlist and continue looking for other problems ({\bfseries skip}), or abort the {\bfseries Cleanup} command without making any more changes ({\bfseries abort}). The {\bfseries Show} button provides an additional mechanism for displaying the paint in the net. If you place the box over a piece of paint and click on {\bfseries Show}, Magic will highlight all of the paint in the net under the box. This is similar to pointing at the net and typing {\bfseries s} three times to select the net, except that {\bfseries Show} doesn't select the net (it uses a different mechanism to highlight it), and {\bfseries Show} will trace through all cells, expanded or not (the selection mechanism only considers paint in expanded cells). Once you've used {\bfseries Show} to highlight a net, the only way to make the highlighting go away is to place the box over empty space and invoke {\bfseries Show} again. {\bfseries Show} is an old command that pre-dates the selection interface, but we've left it in Magic because some people find it useful. \section{Netlist Files} Netlists are stored on disk in ordinary text files. You are welcome to edit those files by hand or to write programs that generate the netlists automatically. For example, a netlist might be generated by a schematic editor or by a high-level simulator. See the manual page {\itshape net(5)} for a description of netlist file format. \section{Running the Router} Once you've created a netlist, it is relatively easy to invoke the router. First, place the box around the area you'd like Magic to consider for routing. No terminals outside this area will be considered, and Magic will not generate any paint more than a few units outside this area (Magic may use the next routing grid line outside the area). Load {\bfseries tut7d}, {\bfseries :flush} the netlist if you made any changes to it, set the box to the bounding box of the cell, and then invoke the router using the command: \starti \ii {\bfseries :route} \endi When the command completes, the netlist should be routed. Click the {\bfseries Verify} netlist button to make sure the connections were made correctly. Try deleting a piece from one of the wires and verify again. Feedback areas should appear to indicate where the routing was incorrect. Use the {\bfseries :feedback} command to step through the areas and, eventually, to delete the feedback ({\bfseries :feedback help} gives a synopsis of the command options). If the router is unable to complete the connections, it will report errors to you. Errors may be reported in several ways. For some errors, such as non-existent terminal names, messages will be printed. For other errors, cross-hatched feedback areas will be created. Most of the feedback areas have messages similar to ``Net shifter/bit[0]/phi1: Can't make bottom connection.'' To see the message associated with a feedback area, place the box over the feedback area and type {\bfseries :feedback why}. In this case the message means that for some reason the router was unable to connect the specified net (named by one of its terminals) within one of the routing channel. The terms ``bottom'', ``top'', etc. may be misnomers because Magic sometimes rotates channels before routing: the names refer to the direction at the time the channel was routed, not the direction in the circuit. However, the location of the feedback area indicates where the connection was supposed to have been made. You've probably noticed by now that the router sometimes generates unnecessary wiring, such as inserting extra jogs and U-shapes in wires (look next to {\bfseries right3} in the top cell). These jogs are particularly noticeable in small examples. However, the router actually does {\itshape better} on larger examples: there will still be a bit of extra wire, but it's negligible in comparison to the total wire length on a large chip. Some of this wire is necessary and important: it helps the router to avoid several problem situations that would cause it to fail on more difficult examples. However, you can use the {\bfseries straighten} command described in ``Magic Tutorial \#3: Advanced Painting (Wiring and Plowing)'' to remove unnecessary jogs. Please don't judge the router by its behavior on small examples. On the other hand, if it does awful things on big examples, we'd like to know about it. All of the wires placed by the router are of the same width, so the router won't be very useful for power and ground wiring. When using the Magic router, you can wire power and ground by hand before running the router. The router will be able to work around your hand-placed connections to make the connections in the netlist. If there are certain key signals that you want to wire carefully by hand, you can do this too; the router will work around them. Signals that you route by hand should not be in the netlist. {\bfseries Tutorial7b} has an example of ``hand routing'' in the form of a piece of metal in the middle of the circuit. Undo the routing, and try modifying the metal and/or adding more hand routing of your own to see how it affects the routing. The Magic router has a number of options useful for getting information about the routing and setting routing parameters. You need to invoke the {\bfseries route} command once for each option you want to specify; then type {\bfseries :route} with no options to start up the router with whatever parameters you've set. The {\bfseries viamin}, option which invokes a routing post-pass is, of course, invoked AFTER routing. Type {\bfseries :route netlist} {\itshape file} to specify a netlist for the routing without having to open up the netlist menu. The {\bfseries metal} option lets you toggle metal maximization on and off; if metal maximization is turned on, the router converts routing from the alternate routing layer (``poly'') to the preferred routing layer (``metal'') wherever possible. The {\bfseries vias} option controls metal maximization by specifying how many grid units of ``metal'' conversion make it worthwhile to place vias; setting this to 5 means that metal maximization will add extra vias only if 5 or more grid units of ``poly'' can be converted to ``metal''. View the current technology's router parameters with the {\bfseries tech} option. The {\bfseries jog}, {\bfseries obstacle}, and {\bfseries steady} options let you view and change parameters to control the channel router (this feature is for advanced users). The {\bfseries viamin} option invokes a via minimization algorithm which reduces the number of vias in a routed layout. This can be used as a post-processing step to improve the quality of the routing. This may be useful even when using another router to do the actual routing. Finally, show all parameter values with the {\bfseries settings} option. The options and their actions are summarized in Table III. \begin{table}[ht] \begin{center} \begin{tabular}{|l|l|} \hline Option & Action \\ \hline {\bfseries end} & Print the channel router end constant \\ {\bfseries end}{\itshape real} & Set the channel router end constant \\ \hline {\bfseries help} & Print a summary of the router options \\ \hline {\bfseries jog} & Print the channel router minimum jog length \\ {\bfseries jog} {\itshape int} & Set the minimum jog length, measured in grid units \\ \hline {\bfseries metal} & Toggle metal maximization on or off \\ \hline {\bfseries netlist} & Print the name of the current net list \\ {\bfseries netlist} {\itshape file} & Set the current net list \\ \hline {\bfseries obstacle} & Print the channel router obstacle constant \\ {\bfseries obstacle} {\itshape real} & Set the obstacle constant \\ \hline {\bfseries settings} & Print a list of all router parameters \\ \hline {\bfseries steady} & Print the channel router steady net constant \\ {\bfseries steady} {\itshape int} & Set the steady net constant, measured in grid units \\ \hline {\bfseries tech} & Print router technology information \\ \hline {\bfseries vias} & Print the metal maximization via limit \\ {\bfseries vias} {\itshape int} & Set the via limit \\ \hline {\bfseries viamin} & Minimize vias in a routed layout. \\ \hline \end{tabular} \end{center} \caption{A summary of all of Magic router options.} \end{table} \section{How the Router Works} In order to make the router produce the best possible results, it helps to know a little bit about how it works. The router runs in three stages, called {\itshape channel definition}, {\itshape global routing}, and {\itshape channel routing}. In the channel definition phase, Magic divides the area of the edit cell into rectangular routing areas called channels. The channels cover all the space under the box except the areas occupied by subcells. All of Magic's routing goes in the channel areas, except that stems (Section 8.2) may extend over subcells. To see the channel structure that Magic chose, place the box in {\bfseries tut7d} as if you were going to route, then type the command \starti \ii {\bfseries :channel} \endi in the layout window. Magic will compute the channel structure and display it on the screen as a collection of feedback areas. The channel structure is displayed as white rectangles. Type {\bfseries :feedback clear} when you're through looking at them. The second phase of routing is global routing. In the global routing phase, Magic considers each net in turn and chooses the sequence of channels the net must pass through in order to connect its terminals. The {\itshape crossing points} (places where the net crosses from one channel to another) are chosen at this point, but not the exact path through each channel. In the third phase, each channel is considered separately. All the nets passing through that channel are examined at once, and the exact path of each net is decided. Once the routing paths have been determined, paint is added to the edit cell to implement the routing. The router is grid-based: all wires are placed on a uniform grid. For the standard nMOS process the grid spacing is 7 units, and for the standard SCMOS process it is 8 units. If you type {\bfseries :grid 8} after routing {\bfseries tut7b}, you'll see that all of the routing lines up with its lower and left sides on grid lines. Fortunately, you don't have to make your cell terminals line up on even grid boundaries. During the routing Magic generates {\itshape stems} that connect your terminals up to grid lines at the edges of channels. Notice that there's space left by Magic between the subcells and the channels; this space is used by the stem generator. \section{What to do When the Router Fails} Don't be surprised if the router is unable to make all the connections the first time you try it on a large circuit. Unless you have extra routing space in your chip, you may have to make slight re-arrangements to help the router out. The paragraphs below describe things you can do to make life easier for the router. This section is not very well developed, so we'd like to hear about techniques you use to improve routability. If you discover new techniques, send us mail and we'll add them to this section. \subsection{Channel Structure} One of the first things to check when the router fails is the channel structure. If using the Magic router, type {\bfseries :channel} to look at the channels. One common mistake is to have some of the desired routing area covered by subcells; Magic only runs wires where there are no subcells. Check to be sure that there are channels everywhere that you're expecting wires to run. If you place cells too close together, there may not be enough room to have a channel between the cells; when this happens Magic will route willy-nilly across the tops of cells to bring terminals out to channels, and will probably generate shorts or design-rule violations. To solve the problem, move the cells farther apart. If there are many skinny channels, it will be difficult for the router to produce good routing. Try to re-arrange the cell structure to line up edges of nearby cells so that there are as few channels as possible and they are as large as possible (before doing this you'll probably want to get rid of the existing routing by undo-ing or by flushing the edit cell). \subsection{Stems} Another problem has to do with the stem generator. Stems are the pieces of wiring that connect terminals up to grid points on the edges of channels. The current stem generation code doesn't know about connectivity or design rules. It simply finds the nearest routing grid point and wires out to that point, without considering any other terminals. If a terminal is not on the edge of the cell, the stem runs straight across the cell to the nearest channel, without any consideration for other material in the cell. If two terminals are too close together, Magic may decide to route them both to the same grid point. When this happens, you have two choices. Either you can move the cell so that the terminals have different nearest grid points (for example, you can line its terminals up with the grid lines), or if this doesn't work you'll have to modify the cell to make the terminals farther apart. The place where stems cause the most trouble is in PLAs, many of which have been optimized to space the outputs as closely together as possible. In some cases the outputs are closer together than the routing grid, which is an impossible situation for the stem generator. In this case, we think the best approach is to change the PLA templates to space the outputs farther apart. Either space them exactly the same as the router grid (in which case you can line the PLAs up before routing so the terminals are already on the grid), or space the outputs at least 1.5 grid units apart so the stem generator won't have troubles. Having tightly-spaced PLA outputs is false economy: it makes it more difficult to design the PLAs and results in awful routing problems. Even if Magic could river-route out from tightly-spaced terminals to grid lines (which it can't), it would require $N^2$ space to route out $N$ lines; it takes less area to stretch the PLA. \subsection{Obstacles} The router tends to have special difficulties with obstacles running along the edges of channels. When you've placed a power wire or other hand-routing along the edge of a channel, the channel router will often run material under your wiring in the other routing layer, thereby blocking both routing layers and making it impossible to complete the routing. Where this occurs, you can increase the chances of successful routing by moving the hand-routing away from the channel edges. It's especially important to keep hand-routing away from terminals. The stem generator will not pay any attention to hand-routing when it generates stems (it just makes a bee-line for the nearest grid point), so it may accidentally short a terminal to nearby hand-routing. \begin{figure}[ht] \begin{center} \epsfig{file=../psfigures/tut7.3.ps, width=0.4\columnwidth} \caption{When placing hand routing, it is best to place wires with their left and bottom edges along grid lines, and contacts centered on the wires. In this fashion, the hand routing will block as few routing grid lines as possible.} \end{center} \end{figure} When placing hand-routing, you can get better routing results by following the advice illustrated in Figure 3. First, display the routing grid. For example, if the router is using a 8-unit grid (which is true for the standard SCMOS technology), type {\bfseries :grid 8}. Then place all your hand routing with its left and bottom edges along the grid lines. Because of the way the routing tools work, this approach results in the least possible amount of lost routing space. \section{More Netlist Commands} In addition to the netlist menu buttons and commands described in Section 4, there are a number of other netlist commands you can invoke by typing in the netlist window. Many of these commands are textual equivalents of the menu buttons. However, they allow you to deal with terminals by typing the hierarchical name of the terminal rather than by pointing to it. If you don't know where a terminal is, or if you have deleted a label from your design so that there's nothing to point to, you'll have to use the textual commands. Commands that don't just duplicate menu buttons are described below; see the {\itshape magic(1)} manual page for details on the others. The netlist command \starti \ii {\bfseries :extract} \endi will generate a net from existing wiring. It looks under the box for paint, then traces out all the material in the edit cell that is connected electrically to that paint. Wherever the material touches subcells it looks for terminals in the subcells, and all the terminals it finds are placed into a new net. Warning: there is also an {\bfseries extract} command for layout windows, and it is totally different from the {\bfseries extract} command in netlist windows. Make sure you've got the cursor over the netlist window when you invoke this command! The netlist editor provides two commands for ripping up existing routing (or other material). They are \starti \ii {\bfseries :ripup} \\ \ii {\bfseries :ripup netlist} \endi The first command starts by finding any paint in the edit cell that lies underneath the box. It then works outward from that paint to find all paint in the edit cell that is electrically connected to the starting paint. All of this paint is erased. ({\bfseries :ripup} isn't really necessary, since the same effect can be achieved by selecting all the paint in the net and deleting the selection; it's a hangover from olden days when there was no selection). The second form of the command, {\bfseries :ripup netlist}, is similar to the first except that it starts from each of the terminals in the current netlist instead of the box. Any paint in the edit cell that is electrically connected to a terminal is erased. The {\bfseries :ripup netlist} command may be useful to ripup existing routing before rerouting. The command \starti \ii {\bfseries :trace} [{\itshape name}] \endi provides an additional facility for examining router feedback. It highlights all paint connected to each terminal in the net containing {\itshape name}, much as the {\bfseries Show} menu button does for paint connected to anything under the box. The net to be highlighted may be specified by naming one of its terminals, for example, {\bfseries :trace shifter/bit[0]/phi1}. Use the trace command in conjunction with the nets specified in router feedback to see the partially completed wiring for a net. Where no net is specified, the {\bfseries :trace} command highlights the currently selected net. \end{document}
{ "alphanum_fraction": 0.7622744591, "author": null, "avg_line_length": 46.1138121547, "converted": null, "ext": "tex", "file": null, "hexsha": "108f4a3726371322061ff6be46c836abf9fbe503", "include": null, "lang": "TeX", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "fb85e97b9233cff352d964823173c18527c714aa", "max_forks_repo_licenses": [ "TCL", "X11", "MIT" ], "max_forks_repo_name": "wisehackermonkey/magic", "max_forks_repo_path": "doc/latexfiles/tut7.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "fb85e97b9233cff352d964823173c18527c714aa", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "TCL", "X11", "MIT" ], "max_issues_repo_name": "wisehackermonkey/magic", "max_issues_repo_path": "doc/latexfiles/tut7.tex", "max_line_length": 90, "max_stars_count": null, "max_stars_repo_head_hexsha": "fb85e97b9233cff352d964823173c18527c714aa", "max_stars_repo_licenses": [ "TCL", "X11", "MIT" ], "max_stars_repo_name": "wisehackermonkey/magic", "max_stars_repo_path": "doc/latexfiles/tut7.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 10222, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 41733 }
\chapter{Soccer Simulation 2D}\label{chapter:ss2d} In this chapter we explain the architecture of the Simulator itself. It is divided in 5 sections: about RoboCup Soccer Simulation Server (rcssserver), \cite{rcssserver}, explanation of the formation tactics and the formation editor, a section about the two major agents: the coach and the players; and a Section to show the environment used to train our agents. \section{Server}\label{section:rcssserver} Robocup Soccer Simulation Server (RCSSS) is the core of SS2D. It processes the whole environment receiving the messages from every agent and returning the actual state of the game in discretized time. An old challenge to teams in the league is that the returned state is noisy, therefore, not fully trustful. Also, it establishes the communication between the communication with monitors, from which one can watch the game. RCSSS follows an UDP/IP client-server style, which allows teams to develop on any programming language since they do that communication. There's an specific port for the 11 infield players and another to the coach. Only messages with the allowed protocols is processed by the server. For example, only the goalie can do catch, if any other player do catch, the action is invalidated and the agent looses a cycle. The two agents with special commands are the goalie and the coach, which will be explained in Section \ref{section:coach}. After the initial connection with the clients, the server commits the it's parameters. Those parameters are divided in server-params, the environment itself (position of goal, size of ball, how many cycles it will wait for an action, etc), and the 18 player types. Each player type has random params for speed, acceleration, size (which influences in the tackle action), maximum stamina and kickable area. In game, clients can exchange N messages with other clients, N being given by the server-params. \begin{figure}[H] \centering \includegraphics[scale=0.5]{images/field_params.png} \caption{Default field parameters given by the server. Image from \cite{ss2dmanual}.} \label{fig:fieldparams} \end{figure} \section{Formation}\label{section:formation} The formation file is one of the most tactical information that the players have. It describes positions depending on ball's position and, for an agent based on Helios Base, also called Agent2D, (\cite{heliosbase}), like great part of the teams in the league, it is indispensable this information. With the Agent2D, it was also released Fedit2. It is an user interface(UI) that you can create formations files with up to 128 static situations. See Figure \ref{fig:fedit2}. Teams usually have more than one formation file to be used in game. The formations can be changed given a certain situation. For example, if our team is winning and has great chance of winning, we can choose an aggressive formation instead of a defensive one. \begin{figure}[H] \centering \includegraphics[scale=0.4]{images/fedit2.png} \caption{Actual Footage of Fedit2. Image from \cite{heliosbase}.} \label{fig:fedit2} \end{figure} \section{Agents}\label{section:agents} In this section we shall talk about the main High-level agents: Coach and Player. The idea of each agent is based on a real soccer game where there is a coach and 11 players, where the coach can note things that the players cannot but they can send a message to a specific agent and only the player can execute the actions that the coach instructed them. \subsection{Coach}\label{section:coach} The coach is an special agent which can see all the field noiseless and has the greater message area of any agent. As it has global vision of the game, it can analyze globally the game (e.g: where our defense line is breaking, how we do goals more often, how many passes we got right). Cyrus2014, \cite{cyrus2014}, realized their code which shows some statistical information of the game. See Figure \ref{fig:cyrus_coach}. For the next RoboCup, the teams will not be able to see each others names, so strategies developed for a given team will be more difficult to be applied. For that, the coach is the most quoted agent to analyse the game in real time and tell what probable team is the opponent and then apply the specific strategy. \begin{figure}[H] \centering \includegraphics[scale=0.5]{images/cyrus_coach.png} \caption{Example of game analysis by Cyrus2014's coach.} \label{fig:cyrus_coach} \end{figure} \subsection{Players}\label{section:players} The players are the usual agents in field. The players can take 8 actions: \begin{itemize} \item Dash: given a power of dash $\alpha \in [-100, 100]$, the agent "runs" with $\alpha$ in its body's direction. \item Turn: given an angle $\gamma \in [-180, 180]$, the agent turns its body in $\gamma$ degrees. \item Kick: given a power $\alpha \in [-100, 100]$ and an angle $\gamma \in [-180, 180]$, the agent kicks in $\gamma$ direction with power $\alpha$. \item Tackle: given a power $\alpha \in [-100, 100]$, the agent tackles the ball. \item Say: given a message M and a target N, the agent sends to server M to delivered to N. \item Turn\_neck: given an angle $\gamma \in [-180, 180]$, the agent turns its neck in $\gamma$ degrees. \item Move: given a point (x,y) the agent is teletransported to (x,y). It is a special action done only while the game is paused. \item Catch: only the goalie can realize that action. \end{itemize} The agent has some restrictions of view as well. As far as the agent is of a target P, it becomes more difficult to see P. Figure \ref{fig:view_ranges} describes the view model, where point a, c and d the agent can see all parameters of P, at point e it cannot see the uniform number of P, at point f it cannot see which team P belongs to, at point b and g the agent cannot see. To cure some tactics that depends of all agents in field, \cite{heliosbase} created a memory for the agent that allows check an old position of the target. The catch and kick action also has some restrictions, it can only be performed if the ball is the kickable or catchable area. Figure \ref{fig:catchable_area} shows an example of catchable area. \begin{figure}[H] \centering \includegraphics[scale=0.5]{images/view_ranges.png} \caption{Ranges of view of the players. Image from \cite{ss2dmanual}.} \label{fig:view_ranges} \end{figure} \begin{figure}[H] \centering \includegraphics[scale=0.5]{images/catchable_area.png} \caption{Description of Catchable Area. Image from \cite{ss2dmanual}.} \label{fig:catchable_area} \end{figure} \section{Half Field Offensive}\label{section:HFO} The Half Field Offensive (HFO), \cite{hfo}, is an interface environment designed specifically to train SARSA (state, action, reward, next state, next action) agents based on the usual OpenAI environments. HFO supports: a delayed message from the agent due to an algorithm training or some heavy process, writing the agent's code in Python Programming Language (\cite{python}) using the original actions of the C++ Programming Language (\cite{cpp}) agent via an interface, see figure \ref{fig:HFO_diagram}. It provides 2 spaces of states and actions: \begin{itemize} \item Low-Level Features and Actions- Uses raw features from SS2D server (angle, positioning) and provides raw actions (kick, dash, turn). \item High-Level Features and Actions - Uses processed features (distance to opponent, interceptable ball) and only complex or chained actions (dribble, pass to teammate). \end{itemize} \begin{figure}[H] \centering \includegraphics[scale=0.5]{images/HFO_diagram.png} \caption{Half Field Offensive diagram. Image from \cite{hfo}.} \label{fig:HFO_diagram} \end{figure}
{ "alphanum_fraction": 0.7660202902, "author": null, "avg_line_length": 91.6117647059, "converted": null, "ext": "tex", "file": null, "hexsha": "ca770db76b49ecb738b71abbdf7e7c251ed619a4", "include": null, "lang": "TeX", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2020-08-08T02:30:21.000Z", "max_forks_repo_forks_event_min_datetime": "2020-06-18T01:33:35.000Z", "max_forks_repo_head_hexsha": "d0237207dbb485611c685251f97649679a7bbc0a", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "bcahlit/graduationMgm", "max_forks_repo_path": "Evaluating Reinforcement Learning on Robocup Soccer Simulation 2D/chapters/ss2d.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "d0237207dbb485611c685251f97649679a7bbc0a", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "bcahlit/graduationMgm", "max_issues_repo_path": "Evaluating Reinforcement Learning on Robocup Soccer Simulation 2D/chapters/ss2d.tex", "max_line_length": 728, "max_stars_count": 5, "max_stars_repo_head_hexsha": "ee264d6f1bffdbbbb04c55c8f43146604d54cc88", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "cstiano/graduationMgm", "max_stars_repo_path": "Evaluating Reinforcement Learning on Robocup Soccer Simulation 2D/chapters/ss2d.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-16T14:50:16.000Z", "max_stars_repo_stars_event_min_datetime": "2020-02-19T19:10:07.000Z", "num_tokens": 1900, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 7787 }
""" Classes for generating lists of detected events """ import numpy as np from yt.funcs import issue_deprecation_warning from pyxsim.utils import mylog from yt.units.yt_array import YTQuantity, YTArray, uconcatenate import astropy.io.fits as pyfits import astropy.wcs as pywcs import h5py from pyxsim.utils import force_unicode, validate_parameters, parse_value, \ ParameterDict from soxs.simput import write_photon_list from soxs.instrument import RedistributionMatrixFile import os from yt.utilities.parallel_tools.parallel_analysis_interface import \ communication_system, parallel_capable, get_mpi_type old_parameter_keys = {"ExposureTime": "exp_time", "Area": "area", "Redshift": "redshift", "AngularDiameterDistance": "d_a", "RMF": "rmf", "ARF": "arf", "Telescope": "telescope", "Instrument": "instrument", "Mission": "mission", "ChannelType": "channel_type"} comm = communication_system.communicators[-1] def communicate_events(my_events, root=0): if parallel_capable: new_events = {} mpi_int = get_mpi_type("int32") mpi_double = get_mpi_type("float64") local_num_events = my_events["xsky"].size sizes = comm.comm.gather(local_num_events, root=root) if comm.rank == 0: num_events = sum(sizes) disps = [sum(sizes[:i]) for i in range(len(sizes))] for key in my_events: if key in ["pi", "pha"]: dtype = "int32" else: dtype = "float64" new_events[key] = np.zeros(num_events, dtype=dtype) else: sizes = [] disps = [] for key in my_events: new_events[key] = np.empty([]) for key in my_events: if key in ["pi", "pha"]: mpi_type = mpi_int else: mpi_type = mpi_double comm.comm.Gatherv([my_events[key], local_num_events, mpi_type], [new_events[key], (sizes, disps), mpi_type], root=root) if key == "eobs": new_events[key] = YTArray(new_events[key], "keV") if key.endswith("sky"): new_events[key] = YTArray(new_events[key], "deg") return new_events else: return my_events class EventList(object): def __init__(self, events, parameters): self.events = events self.parameters = ParameterDict(parameters, "EventList", old_parameter_keys) self.num_events = comm.mpi_allreduce(events["xsky"].shape[0]) def keys(self): return self.events.keys() def has_key(self, key): return key in self.keys() def items(self): return self.events.items() def values(self): return self.events.values() def __getitem__(self,key): return self.events[key] def __repr__(self): return self.events.__repr__() def __contains__(self, key): return key in self.events def __add__(self, other): validate_parameters(self.parameters, other.parameters, skip=["sky_center"]) events = {} for item1, item2 in zip(self.items(), other.items()): k1, v1 = item1 k2, v2 = item2 events[k1] = uconcatenate([v1,v2]) return type(self)(events, dict(self.parameters)) def __iter__(self): return iter(self.events) @classmethod def from_h5_file(cls, h5file): """ Initialize an :class:`~pyxsim.event_list.EventList` from a HDF5 file with filename *h5file*. """ events = {} parameters = {} f = h5py.File(h5file, "r") p = f["/parameters"] parameters["exp_time"] = YTQuantity(p["exp_time"].value, "s") parameters["area"] = YTQuantity(p["area"].value, "cm**2") parameters["sky_center"] = YTArray(p["sky_center"][:], "deg") d = f["/data"] num_events = d["xsky"].size start_e = comm.rank*num_events//comm.size end_e = (comm.rank+1)*num_events//comm.size events["xsky"] = YTArray(d["xsky"][start_e:end_e], "deg") events["ysky"] = YTArray(d["ysky"][start_e:end_e], "deg") events["eobs"] = YTArray(d["eobs"][start_e:end_e], "keV") if "rmf" in p: parameters["rmf"] = force_unicode(p["rmf"].value) parameters["arf"] = force_unicode(p["arf"].value) parameters["channel_type"] = force_unicode(p["channel_type"].value) parameters["mission"] = force_unicode(p["mission"].value) parameters["telescope"] = force_unicode(p["telescope"].value) parameters["instrument"] = force_unicode(p["instrument"].value) chantype = parameters["channel_type"] events[chantype] = d[chantype][start_e:end_e] f.close() if "rmf" in p: return ConvolvedEventList(events, parameters) else: return EventList(events, parameters) @classmethod def from_fits_file(cls, fitsfile): """ Initialize an :class:`~pyxsim.event_list.EventList` from a FITS file with filename *fitsfile*. """ hdulist = pyfits.open(fitsfile, memmap=True) tblhdu = hdulist["EVENTS"] events = {} parameters = {} parameters["exp_time"] = YTQuantity(tblhdu.header["EXPOSURE"], "s") parameters["area"] = YTQuantity(tblhdu.header["AREA"], "cm**2") parameters["sky_center"] = YTArray([tblhdu.header["TCRVL2"], tblhdu.header["TCRVL3"]], "deg") num_events = tblhdu.header["NAXIS2"] start_e = comm.rank*num_events//comm.size end_e = (comm.rank+1)*num_events//comm.size wcs = pywcs.WCS(naxis=2) wcs.wcs.crpix = [tblhdu.header["TCRPX2"], tblhdu.header["TCRPX3"]] wcs.wcs.crval = parameters["sky_center"].d wcs.wcs.cdelt = [tblhdu.header["TCDLT2"], tblhdu.header["TCDLT3"]] wcs.wcs.ctype = ["RA---TAN", "DEC--TAN"] wcs.wcs.cunit = ["deg"]*2 xx = tblhdu.data["X"][start_e:end_e] yy = tblhdu.data["Y"][start_e:end_e] xx, yy = wcs.wcs_pix2world(xx, yy, 1) events["xsky"] = YTArray(xx, "degree") events["ysky"] = YTArray(yy, "degree") events["eobs"] = YTArray(tblhdu.data["ENERGY"][start_e:end_e]/1000., "keV") if "RESPFILE" in tblhdu.header: parameters["rmf"] = tblhdu.header["RESPFILE"] parameters["arf"] = tblhdu.header["ANCRFILE"] parameters["channel_type"] = tblhdu.header["CHANTYPE"].lower() parameters["mission"] = tblhdu.header["MISSION"] parameters["telescope"] = tblhdu.header["TELESCOP"] parameters["instrument"] = tblhdu.header["INSTRUME"] events[parameters["channel_type"]] = tblhdu.data[parameters["channel_type"]][start_e:end_e] hdulist.close() if "rmf" in tblhdu.header: return ConvolvedEventList(events, parameters) else: return EventList(events, parameters) def write_fits_file(self, fitsfile, fov, nx, overwrite=False): """ Write events to a FITS binary table file. The result is a standard "event file" which can be processed by standard X-ray analysis tools. Parameters ---------- fitsfile : string The name of the event file to write. fov : float, (value, unit) tuple, :class:`~yt.units.yt_array.YTQuantity`, or :class:`~astropy.units.Quantity` The field of view of the event file. If units are not provided, they are assumed to be in arcminutes. nx : integer The resolution of the image (number of pixels on a side). overwrite : boolean, optional Set to True to overwrite a previous file. """ from astropy.time import Time, TimeDelta events = communicate_events(self.events) fov = parse_value(fov, "arcmin") if comm.rank == 0: exp_time = float(self.parameters["exp_time"]) t_begin = Time.now() dt = TimeDelta(exp_time, format='sec') t_end = t_begin + dt dtheta = fov.to("deg").v / nx wcs = pywcs.WCS(naxis=2) wcs.wcs.crpix = [0.5*(nx+1)]*2 wcs.wcs.crval = self.parameters["sky_center"].d wcs.wcs.cdelt = [-dtheta, dtheta] wcs.wcs.ctype = ["RA---TAN", "DEC--TAN"] wcs.wcs.cunit = ["deg"] * 2 xx, yy = wcs.wcs_world2pix(self["xsky"].d, self["ysky"].d, 1) keepx = np.logical_and(xx >= 0.5, xx <= float(nx)+0.5) keepy = np.logical_and(yy >= 0.5, yy <= float(nx)+0.5) keep = np.logical_and(keepx, keepy) n_events = keep.sum() mylog.info("Threw out %d events because " % (xx.size-n_events) + "they fell outside the field of view.") col_e = pyfits.Column(name='ENERGY', format='E', unit='eV', array=events["eobs"].in_units("eV").d[keep]) col_x = pyfits.Column(name='X', format='D', unit='pixel', array=xx[keep]) col_y = pyfits.Column(name='Y', format='D', unit='pixel', array=yy[keep]) cols = [col_e, col_x, col_y] if "channel_type" in self.parameters: chantype = self.parameters["channel_type"] if chantype == "pha": cunit = "adu" elif chantype == "pi": cunit = "Chan" col_ch = pyfits.Column(name=chantype.upper(), format='1J', unit=cunit, array=events[chantype][keep]) cols.append(col_ch) time = np.random.uniform(size=n_events, low=0.0, high=float(self.parameters["exp_time"])) col_t = pyfits.Column(name="TIME", format='1D', unit='s', array=time) cols.append(col_t) coldefs = pyfits.ColDefs(cols) tbhdu = pyfits.BinTableHDU.from_columns(coldefs) tbhdu.name = "EVENTS" tbhdu.header["MTYPE1"] = "sky" tbhdu.header["MFORM1"] = "x,y" tbhdu.header["MTYPE2"] = "EQPOS" tbhdu.header["MFORM2"] = "RA,DEC" tbhdu.header["TCTYP2"] = "RA---TAN" tbhdu.header["TCTYP3"] = "DEC--TAN" tbhdu.header["TCRVL2"] = float(self.parameters["sky_center"][0]) tbhdu.header["TCRVL3"] = float(self.parameters["sky_center"][1]) tbhdu.header["TCDLT2"] = -dtheta tbhdu.header["TCDLT3"] = dtheta tbhdu.header["TCRPX2"] = 0.5*(nx+1) tbhdu.header["TCRPX3"] = 0.5*(nx+1) tbhdu.header["TLMIN2"] = 0.5 tbhdu.header["TLMIN3"] = 0.5 tbhdu.header["TLMAX2"] = float(nx)+0.5 tbhdu.header["TLMAX3"] = float(nx)+0.5 if "channel_type" in self.parameters: rmf = RedistributionMatrixFile(self.parameters["rmf"]) tbhdu.header["TLMIN4"] = rmf.cmin tbhdu.header["TLMAX4"] = rmf.cmax tbhdu.header["RESPFILE"] = os.path.split(self.parameters["rmf"])[-1] tbhdu.header["PHA_BINS"] = rmf.n_ch tbhdu.header["ANCRFILE"] = os.path.split(self.parameters["arf"])[-1] tbhdu.header["CHANTYPE"] = self.parameters["channel_type"] tbhdu.header["MISSION"] = self.parameters["mission"] tbhdu.header["TELESCOP"] = self.parameters["telescope"] tbhdu.header["INSTRUME"] = self.parameters["instrument"] tbhdu.header["EXPOSURE"] = exp_time tbhdu.header["TSTART"] = 0.0 tbhdu.header["TSTOP"] = exp_time tbhdu.header["AREA"] = float(self.parameters["area"]) tbhdu.header["HDUVERS"] = "1.1.0" tbhdu.header["RADECSYS"] = "FK5" tbhdu.header["EQUINOX"] = 2000.0 tbhdu.header["HDUCLASS"] = "OGIP" tbhdu.header["HDUCLAS1"] = "EVENTS" tbhdu.header["HDUCLAS2"] = "ACCEPTED" tbhdu.header["DATE"] = t_begin.tt.isot tbhdu.header["DATE-OBS"] = t_begin.tt.isot tbhdu.header["DATE-END"] = t_end.tt.isot hdulist = [pyfits.PrimaryHDU(), tbhdu] if "channel_type" in self.parameters: start = pyfits.Column(name='START', format='1D', unit='s', array=np.array([0.0])) stop = pyfits.Column(name='STOP', format='1D', unit='s', array=np.array([exp_time])) tbhdu_gti = pyfits.BinTableHDU.from_columns([start,stop]) tbhdu_gti.name = "STDGTI" tbhdu_gti.header["TSTART"] = 0.0 tbhdu_gti.header["TSTOP"] = exp_time tbhdu_gti.header["HDUCLASS"] = "OGIP" tbhdu_gti.header["HDUCLAS1"] = "GTI" tbhdu_gti.header["HDUCLAS2"] = "STANDARD" tbhdu_gti.header["RADECSYS"] = "FK5" tbhdu_gti.header["EQUINOX"] = 2000.0 tbhdu_gti.header["DATE"] = t_begin.tt.isot tbhdu_gti.header["DATE-OBS"] = t_begin.tt.isot tbhdu_gti.header["DATE-END"] = t_end.tt.isot hdulist.append(tbhdu_gti) pyfits.HDUList(hdulist).writeto(fitsfile, overwrite=overwrite) comm.barrier() def write_simput_file(self, prefix, overwrite=False, emin=None, emax=None): r""" Write events to a SIMPUT file that may be read by the SIMX instrument simulator. Parameters ---------- prefix : string The filename prefix. overwrite : boolean, optional Set to True to overwrite previous files. e_min : float, optional The minimum energy of the photons to save in keV. e_max : float, optional The maximum energy of the photons to save in keV. """ if isinstance(self, ConvolvedEventList): raise NotImplementedError("Writing SIMPUT files is only supported if " "you didn't convolve with responses!") events = communicate_events(self.events) if comm.rank == 0: mylog.info("Writing SIMPUT catalog file %s_simput.fits " % prefix + "and SIMPUT photon list file %s_phlist.fits." % prefix) if emin is None and emax is None: idxs = slice(None, None, None) else: if emin is None: emin = events["eobs"].min().value if emax is None: emax = events["eobs"].max().value idxs = np.logical_and(events["eobs"].d >= emin, events["eobs"].d <= emax) flux = np.sum(events["eobs"][idxs]).to("erg") / \ self.parameters["exp_time"]/self.parameters["area"] write_photon_list(prefix, prefix, flux.v, events["xsky"][idxs].d, events["ysky"][idxs].d, events["eobs"][idxs].d, overwrite=overwrite) comm.barrier() def write_h5_file(self, h5file): """ Write an :class:`~pyxsim.event_list.EventList` to the HDF5 file given by *h5file*. """ events = communicate_events(self.events) if comm.rank == 0: f = h5py.File(h5file, "w") p = f.create_group("parameters") p.create_dataset("exp_time", data=float(self.parameters["exp_time"])) p.create_dataset("area", data=float(self.parameters["area"])) p.create_dataset("sky_center", data=self.parameters["sky_center"].d) d = f.create_group("data") d.create_dataset("xsky", data=events["xsky"].d) d.create_dataset("ysky", data=events["ysky"].d) d.create_dataset("eobs", data=events["eobs"].d) if "rmf" in self.parameters: p.create_dataset("arf", data=self.parameters["arf"]) p.create_dataset("rmf", data=self.parameters["rmf"]) p.create_dataset("channel_type", data=self.parameters["channel_type"]) p.create_dataset("mission", data=self.parameters["mission"]) p.create_dataset("telescope", data=self.parameters["telescope"]) p.create_dataset("instrument", data=self.parameters["instrument"]) chantype = self.parameters["channel_type"] d.create_dataset(chantype, data=events[chantype]) f.close() comm.barrier() def write_fits_image(self, imagefile, fov, nx, emin=None, emax=None, overwrite=False): r""" Generate a image by binning X-ray counts and write it to a FITS file. Parameters ---------- imagefile : string The name of the image file to write. fov : float, (value, unit) tuple, :class:`~yt.units.yt_array.YTQuantity`, or :class:`~astropy.units.Quantity` The field of view of the image. If units are not provided, they are assumed to be in arcminutes. nx : integer The resolution of the image (number of pixels on a side). emin : float, optional The minimum energy of the photons to put in the image, in keV. emax : float, optional The maximum energy of the photons to put in the image, in keV. overwrite : boolean, optional Set to True to overwrite a previous file. """ fov = parse_value(fov, "arcmin") if emin is None: mask_emin = np.ones(self.num_events, dtype='bool') else: mask_emin = self["eobs"].d > emin if emax is None: mask_emax = np.ones(self.num_events, dtype='bool') else: mask_emax = self["eobs"].d < emax mask = np.logical_and(mask_emin, mask_emax) dtheta = fov.to("deg").v/nx xbins = np.linspace(0.5, float(nx)+0.5, nx+1, endpoint=True) ybins = np.linspace(0.5, float(nx)+0.5, nx+1, endpoint=True) wcs = pywcs.WCS(naxis=2) wcs.wcs.crpix = [0.5*(nx+1)]*2 wcs.wcs.crval = self.parameters["sky_center"].d wcs.wcs.cdelt = [-dtheta, dtheta] wcs.wcs.ctype = ["RA---TAN","DEC--TAN"] wcs.wcs.cunit = ["deg"]*2 xx, yy = wcs.wcs_world2pix(self["xsky"].d, self["ysky"].d, 1) H, xedges, yedges = np.histogram2d(xx[mask], yy[mask], bins=[xbins, ybins]) if parallel_capable: H = comm.comm.reduce(H, root=0) if comm.rank == 0: hdu = pyfits.PrimaryHDU(H.T) hdu.header["MTYPE1"] = "EQPOS" hdu.header["MFORM1"] = "RA,DEC" hdu.header["CTYPE1"] = "RA---TAN" hdu.header["CTYPE2"] = "DEC--TAN" hdu.header["CRPIX1"] = 0.5*(nx+1) hdu.header["CRPIX2"] = 0.5*(nx+1) hdu.header["CRVAL1"] = float(self.parameters["sky_center"][0]) hdu.header["CRVAL2"] = float(self.parameters["sky_center"][1]) hdu.header["CUNIT1"] = "deg" hdu.header["CUNIT2"] = "deg" hdu.header["CDELT1"] = -dtheta hdu.header["CDELT2"] = dtheta hdu.header["EXPOSURE"] = float(self.parameters["exp_time"]) hdu.writeto(imagefile, overwrite=overwrite) comm.barrier() def write_spectrum(self, specfile, emin, emax, nchan, overwrite=False): r""" Bin event energies into a spectrum and write it to a FITS binary table. This is for an *unconvolved* spectrum. Parameters ---------- specfile : string The name of the FITS file to be written. emin : float The minimum energy of the spectral bins in keV. emax : float The maximum energy of the spectral bins in keV. nchan : integer The number of channels. overwrite : boolean, optional Set to True to overwrite a previous file. """ espec = self["eobs"].d spec, ee = np.histogram(espec, bins=nchan, range=(emin, emax)) bins = 0.5*(ee[1:]+ee[:-1]) if parallel_capable: spec = comm.comm.reduce(spec, root=0) if comm.rank == 0: col1 = pyfits.Column(name='CHANNEL', format='1J', array=np.arange(nchan).astype('int32')+1) col2 = pyfits.Column(name='ENERGY', format='1D', array=bins.astype("float64")) col3 = pyfits.Column(name='COUNTS', format='1J', array=spec.astype("int32")) col4 = pyfits.Column(name='COUNT_RATE', format='1D', array=spec/float(self.parameters["exp_time"])) coldefs = pyfits.ColDefs([col1, col2, col3, col4]) tbhdu = pyfits.BinTableHDU.from_columns(coldefs) tbhdu.name = "SPECTRUM" tbhdu.header["DETCHANS"] = spec.shape[0] tbhdu.header["TOTCTS"] = spec.sum() tbhdu.header["EXPOSURE"] = float(self.parameters["exp_time"]) tbhdu.header["LIVETIME"] = float(self.parameters["exp_time"]) tbhdu.header["CONTENT"] = "pi" tbhdu.header["HDUCLASS"] = "OGIP" tbhdu.header["HDUCLAS1"] = "SPECTRUM" tbhdu.header["HDUCLAS2"] = "TOTAL" tbhdu.header["HDUCLAS3"] = "TYPE:I" tbhdu.header["HDUCLAS4"] = "COUNT" tbhdu.header["HDUVERS"] = "1.1.0" tbhdu.header["HDUVERS1"] = "1.1.0" tbhdu.header["CHANTYPE"] = "pi" tbhdu.header["BACKFILE"] = "none" tbhdu.header["CORRFILE"] = "none" tbhdu.header["POISSERR"] = True tbhdu.header["RESPFILE"] = "none" tbhdu.header["ANCRFILE"] = "none" tbhdu.header["MISSION"] = "none" tbhdu.header["TELESCOP"] = "none" tbhdu.header["INSTRUME"] = "none" tbhdu.header["AREASCAL"] = 1.0 tbhdu.header["CORRSCAL"] = 0.0 tbhdu.header["BACKSCAL"] = 1.0 hdulist = pyfits.HDUList([pyfits.PrimaryHDU(), tbhdu]) hdulist.writeto(specfile, overwrite=overwrite) comm.barrier() class ConvolvedEventList(EventList): def __init__(self, events, parameters): issue_deprecation_warning("ConvolvedEventList has been " "deprecated and will be removed " "in a future release!") super(ConvolvedEventList, self).__init__(events, parameters) def write_channel_spectrum(self, specfile, overwrite=False): r""" Bin event channels into a spectrum and write it to a FITS binary table. Parameters ---------- specfile : string The name of the FITS file to be written. overwrite : boolean, optional Set to True to overwrite a previous file. """ spectype = self.parameters["channel_type"] rmf = RedistributionMatrixFile(self.parameters["rmf"]) minlength = rmf.n_ch if rmf.cmin == 1: minlength += 1 spec = np.bincount(self[spectype], minlength=minlength) if rmf.cmin == 1: spec = spec[1:] bins = (np.arange(rmf.n_ch)+rmf.cmin).astype("int32") if parallel_capable: spec = comm.comm.reduce(spec, root=0) if comm.rank == 0: col1 = pyfits.Column(name='CHANNEL', format='1J', array=bins) col2 = pyfits.Column(name=spectype.upper(), format='1D', array=bins.astype("float64")) col3 = pyfits.Column(name='COUNTS', format='1J', array=spec.astype("int32")) col4 = pyfits.Column(name='COUNT_RATE', format='1D', array=spec/float(self.parameters["exp_time"])) coldefs = pyfits.ColDefs([col1, col2, col3, col4]) tbhdu = pyfits.BinTableHDU.from_columns(coldefs) tbhdu.name = "SPECTRUM" tbhdu.header["DETCHANS"] = spec.shape[0] tbhdu.header["TOTCTS"] = spec.sum() tbhdu.header["EXPOSURE"] = float(self.parameters["exp_time"]) tbhdu.header["LIVETIME"] = float(self.parameters["exp_time"]) tbhdu.header["CONTENT"] = spectype tbhdu.header["HDUCLASS"] = "OGIP" tbhdu.header["HDUCLAS1"] = "SPECTRUM" tbhdu.header["HDUCLAS2"] = "TOTAL" tbhdu.header["HDUCLAS3"] = "TYPE:I" tbhdu.header["HDUCLAS4"] = "COUNT" tbhdu.header["HDUVERS"] = "1.1.0" tbhdu.header["HDUVERS1"] = "1.1.0" tbhdu.header["CHANTYPE"] = spectype tbhdu.header["BACKFILE"] = "none" tbhdu.header["CORRFILE"] = "none" tbhdu.header["POISSERR"] = True tbhdu.header["RESPFILE"] = os.path.split(self.parameters["rmf"])[-1] tbhdu.header["ANCRFILE"] = os.path.split(self.parameters["arf"])[-1] tbhdu.header["MISSION"] = self.parameters["mission"] tbhdu.header["TELESCOP"] = self.parameters["telescope"] tbhdu.header["INSTRUME"] = self.parameters["instrument"] tbhdu.header["AREASCAL"] = 1.0 tbhdu.header["CORRSCAL"] = 0.0 tbhdu.header["BACKSCAL"] = 1.0 hdulist = pyfits.HDUList([pyfits.PrimaryHDU(), tbhdu]) hdulist.writeto(specfile, overwrite=overwrite) comm.barrier()
{ "alphanum_fraction": 0.5515009056, "author": null, "avg_line_length": 40.0478395062, "converted": null, "ext": "py", "file": null, "hexsha": "583fff1258a6bb039e1762a6e8ce9dac1b4ab0ef", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "7d3e2924a3d629ea7e91d526475c26572a3a704a", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "NegriAndrea/pyxsim", "max_forks_repo_path": "pyxsim/event_list.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "7d3e2924a3d629ea7e91d526475c26572a3a704a", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "NegriAndrea/pyxsim", "max_issues_repo_path": "pyxsim/event_list.py", "max_line_length": 117, "max_stars_count": null, "max_stars_repo_head_hexsha": "7d3e2924a3d629ea7e91d526475c26572a3a704a", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "NegriAndrea/pyxsim", "max_stars_repo_path": "pyxsim/event_list.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 6675, "path": null, "reason": "import numpy,import astropy,from astropy", "repo": null, "save_path": null, "sha": null, "size": 25951 }
import os import numpy as np import cv2 import shutil debug = False numberToHandle = 500 def rotate(angle, center, landmark): rad = angle * np.pi / 180.0 alpha = np.cos(rad) beta = np.sin(rad) M = np.zeros((2,3), dtype=np.float32) M[0, 0] = alpha M[0, 1] = beta M[0, 2] = (1-alpha)*center[0] - beta*center[1] M[1, 0] = -beta M[1, 1] = alpha M[1, 2] = beta*center[0] + (1-alpha)*center[1] landmark_ = np.asarray([(M[0,0]*x+M[0,1]*y+M[0,2], M[1,0]*x+M[1,1]*y+M[1,2]) for (x,y) in landmark]) return M, landmark_ class ImageData(): def __init__(self, line, imgDir, image_size=224): self.image_size = image_size line = line.strip().split() assert (len(line) == 207) self.list = line self.landmark = np.asarray(list(map(float, line[:196])), dtype=np.float32).reshape(-1, 2) self.box = np.asarray(list(map(int, line[196:200])), dtype=np.int32).reshape(-1, 2) flag = list(map(int, line[200:206])) flag = list(map(bool, flag)) self.path = os.path.join(imgDir, line[206]) self.img = None self.imgs = [] self.landmarks = [] self.boxes = [] self.ratio = 0.0 def load_data(self, is_train, repeat): img = cv2.imread(self.path) height = img.shape[0] #w*h*c width = img.shape[1] fy = float(height)/self.image_size fx = float(width)/self.image_size box = np.array(self.box) lmark = np.array(self.landmark) if img.shape[0] == 0 or img.shape[1] == 0: imgTT = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) if is_train: img = cv2.resize(img, (self.image_size, self.image_size)) box[:, 1] = self.box[:, 1] / fy box[:, 0] = self.box[:, 0] / fx lmark[:, 0] = self.landmark[:, 0] / fx lmark[:, 1] = self.landmark[:, 1] / fy xx = box[1][0] - box[0][0] yy = box[1][1] - box[0][1] self.ratio = xx * yy / (224 * 224) else: self.ratio = 1.0 # print(fx, fy, box, lmark) self.imgs.append(img) self.landmarks.append(lmark) self.boxes = box #数据增强 # if is_train: # while len(self.imgs) < repeat: # angle = np.random.randint(-20, 20) # cx, cy = img.shape[0]/2, img.shape[1]/2 #shape[0]是cols,shape[1]是rows # M, landmark = rotate(angle, (cx,cy), self.landmark) # img = cv2.resize(img, (self.image_size, self.image_size)) # self.imgs.append(img) # self.landmarks.append(landmark) def save_data(self, path, prefix): labels = [] for i, (img, landmark) in enumerate(zip(self.imgs, self.landmarks)): assert landmark.shape == (98, 2) # print(self.ratio) if self.ratio < 0.05: continue save_path = os.path.join(path, prefix + '_' + str(i) + '.png') assert not os.path.exists(save_path), save_path cv2.imwrite(save_path, img) box_str = ' '.join(list(map(str, self.boxes.reshape(-1).tolist()))) landmark_str = ' '.join(list(map(str,landmark.reshape(-1).tolist()))) # print(self.boxes," :: ", box_str) label = '{} {} {}\n'.format(save_path, box_str, landmark_str) labels.append(label) return labels def get_dataset_list(imgDir, outDir, landmarkDir, is_train): with open(landmarkDir,'r') as f: lines = f.readlines() labels = [] save_img = os.path.join(outDir, 'imgs') if not os.path.exists(save_img): os.mkdir(save_img) if debug: lines = lines[:numberToHandle] for i, line in enumerate(lines): Img = ImageData(line, imgDir) img_name = Img.path Img.load_data(is_train, 1) _, filename = os.path.split(img_name) filename, _ = os.path.splitext(filename) label_txt = Img.save_data(save_img, str(i)+'_' + filename) labels.append(label_txt) if ((i + 1) % 100) == 0: print('file: {}/{}'.format(i+1, len(lines))) with open(os.path.join(outDir, 'list.txt'),'w') as f: for label in labels: f.writelines(label) if __name__ == '__main__': root_dir = os.path.dirname(os.path.realpath(__file__)) imageDirs = '/home/frank/Desktop/projects/DL/DATA/face/WFLW/WFLW_images' Mirror_file = '/home/frank/Desktop/projects/DL/DATA/face/WFLW/Mirror98.txt' landmarkDirs = ['/home/frank/Desktop/projects/DL/DATA/face/WFLW/WFLW_annotations/list_98pt_rect_attr_train_test/list_98pt_rect_attr_test.txt', '/home/frank/Desktop/projects/DL/DATA/face/WFLW/WFLW_annotations/list_98pt_rect_attr_train_test/list_98pt_rect_attr_train.txt'] outDirs = ['test_data', 'train_data'] for landmarkDir, outDir in zip(landmarkDirs, outDirs): outDir = os.path.join(root_dir, outDir) print(outDir) if os.path.exists(outDir): shutil.rmtree(outDir) os.mkdir(outDir) if 'list_98pt_rect_attr_test.txt' in landmarkDir: is_train = False else: is_train = True imgs = get_dataset_list(imageDirs, outDir, landmarkDir, is_train) print('end')
{ "alphanum_fraction": 0.5580324245, "author": null, "avg_line_length": 36.6756756757, "converted": null, "ext": "py", "file": null, "hexsha": "268a318e0dec7dec2bccdab774109cebf71db0b2", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "d59a0c5868eacfbfd7b966975866bdf8824740de", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "GaussRieman/face_mxnet", "max_forks_repo_path": "face_align/prepare_box_lks.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "d59a0c5868eacfbfd7b966975866bdf8824740de", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "GaussRieman/face_mxnet", "max_issues_repo_path": "face_align/prepare_box_lks.py", "max_line_length": 147, "max_stars_count": null, "max_stars_repo_head_hexsha": "d59a0c5868eacfbfd7b966975866bdf8824740de", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "GaussRieman/face_mxnet", "max_stars_repo_path": "face_align/prepare_box_lks.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1521, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 5428 }
module PrimaryRecursive # Write your package code here. include("PRBase.jl") include("arithmetic.jl") export p1, p2, p3, PrimRec, succ, zro, Comb, Proj, add, mult, pred, cosub, dff, sgn, nsgn, const1, remainder, ge, ifel, square, floor_sqrt, traingle, pair, fib, fst, scd end
{ "alphanum_fraction": 0.7027972028, "author": null, "avg_line_length": 26, "converted": null, "ext": "jl", "file": null, "hexsha": "32625b235b9338ec1f0a1da81b93f58131d61d54", "include": null, "lang": "Julia", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "5c13a35b0fba1f6fc5d50f7aa1f888e6a4183078", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "AquaIndigo/PrimaryRecursive.jl", "max_forks_repo_path": "src/PrimaryRecursive.jl", "max_issues_count": null, "max_issues_repo_head_hexsha": "5c13a35b0fba1f6fc5d50f7aa1f888e6a4183078", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "AquaIndigo/PrimaryRecursive.jl", "max_issues_repo_path": "src/PrimaryRecursive.jl", "max_line_length": 66, "max_stars_count": 2, "max_stars_repo_head_hexsha": "5c13a35b0fba1f6fc5d50f7aa1f888e6a4183078", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "AquaIndigo/PrimaryRecursive.jl", "max_stars_repo_path": "src/PrimaryRecursive.jl", "max_stars_repo_stars_event_max_datetime": "2021-11-05T07:27:33.000Z", "max_stars_repo_stars_event_min_datetime": "2021-11-05T07:02:22.000Z", "num_tokens": 97, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 286 }
/- Copyright (c) 2018 Kenny Lau. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Kenny Lau, Yury Kudryashov -/ import algebra.algebra.tower /-! # The `restrict_scalars` type alias > THIS FILE IS SYNCHRONIZED WITH MATHLIB4. > Any changes to this file require a corresponding PR to mathlib4. See the documentation attached to the `restrict_scalars` definition for advice on how and when to use this type alias. As described there, it is often a better choice to use the `is_scalar_tower` typeclass instead. ## Main definitions * `restrict_scalars R S M`: the `S`-module `M` viewed as an `R` module when `S` is an `R`-algebra. Note that by default we do *not* have a `module S (restrict_scalars R S M)` instance for the original action. This is available as a def `restrict_scalars.module_orig` if really needed. * `restrict_scalars.add_equiv : restrict_scalars R S M ≃+ M`: the additive equivalence between the restricted and original space (in fact, they are definitionally equal, but sometimes it is helpful to avoid using this fact, to keep instances from leaking). * `restrict_scalars.ring_equiv : restrict_scalars R S A ≃+* A`: the ring equivalence between the restricted and original space when the module is an algebra. ## See also There are many similarly-named definitions elsewhere which do not refer to this type alias. These refer to restricting the scalar type in a bundled type, such as from `A →ₗ[R] B` to `A →ₗ[S] B`: * `linear_map.restrict_scalars` * `linear_equiv.restrict_scalars` * `alg_hom.restrict_scalars` * `alg_equiv.restrict_scalars` * `submodule.restrict_scalars` * `subalgebra.restrict_scalars` -/ variables (R S M A : Type*) /-- If we put an `R`-algebra structure on a semiring `S`, we get a natural equivalence from the category of `S`-modules to the category of representations of the algebra `S` (over `R`). The type synonym `restrict_scalars` is essentially this equivalence. Warning: use this type synonym judiciously! Consider an example where we want to construct an `R`-linear map from `M` to `S`, given: ```lean variables (R S M : Type*) variables [comm_semiring R] [semiring S] [algebra R S] [add_comm_monoid M] [module S M] ``` With the assumptions above we can't directly state our map as we have no `module R M` structure, but `restrict_scalars` permits it to be written as: ```lean -- an `R`-module structure on `M` is provided by `restrict_scalars` which is compatible example : restrict_scalars R S M →ₗ[R] S := sorry ``` However, it is usually better just to add this extra structure as an argument: ```lean -- an `R`-module structure on `M` and proof of its compatibility is provided by the user example [module R M] [is_scalar_tower R S M] : M →ₗ[R] S := sorry ``` The advantage of the second approach is that it defers the duty of providing the missing typeclasses `[module R M] [is_scalar_tower R S M]`. If some concrete `M` naturally carries these (as is often the case) then we have avoided `restrict_scalars` entirely. If not, we can pass `restrict_scalars R S M` later on instead of `M`. Note that this means we almost always want to state definitions and lemmas in the language of `is_scalar_tower` rather than `restrict_scalars`. An example of when one might want to use `restrict_scalars` would be if one has a vector space over a field of characteristic zero and wishes to make use of the `ℚ`-algebra structure. -/ @[nolint unused_arguments] def restrict_scalars (R S M : Type*) : Type* := M instance [I : inhabited M] : inhabited (restrict_scalars R S M) := I instance [I : add_comm_monoid M] : add_comm_monoid (restrict_scalars R S M) := I instance [I : add_comm_group M] : add_comm_group (restrict_scalars R S M) := I section module section variables [semiring S] [add_comm_monoid M] /-- We temporarily install an action of the original ring on `restrict_sclars R S M`. -/ def restrict_scalars.module_orig [I : module S M] : module S (restrict_scalars R S M) := I variables [comm_semiring R] [algebra R S] section local attribute [instance] restrict_scalars.module_orig /-- When `M` is a module over a ring `S`, and `S` is an algebra over `R`, then `M` inherits a module structure over `R`. The preferred way of setting this up is `[module R M] [module S M] [is_scalar_tower R S M]`. -/ instance [module S M] : module R (restrict_scalars R S M) := module.comp_hom M (algebra_map R S) /-- This instance is only relevant when `restrict_scalars.module_orig` is available as an instance. -/ instance [module S M] : is_scalar_tower R S (restrict_scalars R S M) := ⟨λ r S M, by { rw [algebra.smul_def, mul_smul], refl }⟩ end /-- When `M` is a right-module over a ring `S`, and `S` is an algebra over `R`, then `M` inherits a right-module structure over `R`. The preferred way of setting this up is `[module Rᵐᵒᵖ M] [module Sᵐᵒᵖ M] [is_scalar_tower Rᵐᵒᵖ Sᵐᵒᵖ M]`. -/ instance restrict_scalars.op_module [module Sᵐᵒᵖ M] : module Rᵐᵒᵖ (restrict_scalars R S M) := begin letI : module Sᵐᵒᵖ (restrict_scalars R S M) := ‹module Sᵐᵒᵖ M›, exact module.comp_hom M (algebra_map R S).op end instance restrict_scalars.is_central_scalar [module S M] [module Sᵐᵒᵖ M] [is_central_scalar S M] : is_central_scalar R (restrict_scalars R S M) := { op_smul_eq_smul := λ r x, (op_smul_eq_smul (algebra_map R S r) (_ : M) : _)} /-- The `R`-algebra homomorphism from the original coefficient algebra `S` to endomorphisms of `restrict_scalars R S M`. -/ def restrict_scalars.lsmul [module S M] : S →ₐ[R] module.End R (restrict_scalars R S M) := begin -- We use `restrict_scalars.module_orig` in the implementation, -- but not in the type. letI : module S (restrict_scalars R S M) := restrict_scalars.module_orig R S M, exact algebra.lsmul R (restrict_scalars R S M), end end variables [add_comm_monoid M] /-- `restrict_scalars.add_equiv` is the additive equivalence with the original module. -/ def restrict_scalars.add_equiv : restrict_scalars R S M ≃+ M := add_equiv.refl M variables [comm_semiring R] [semiring S] [algebra R S] [module S M] @[simp] lemma restrict_scalars.add_equiv_map_smul (c : R) (x : restrict_scalars R S M) : restrict_scalars.add_equiv R S M (c • x) = (algebra_map R S c) • restrict_scalars.add_equiv R S M x := rfl lemma restrict_scalars.smul_def (c : R) (x : restrict_scalars R S M) : c • x = (restrict_scalars.add_equiv R S M).symm (algebra_map R S c • restrict_scalars.add_equiv R S M x) := rfl lemma restrict_scalars.add_equiv_symm_map_algebra_map_smul (r : R) (x : M) : (restrict_scalars.add_equiv R S M).symm (algebra_map R S r • x) = r • (restrict_scalars.add_equiv R S M).symm x := rfl lemma restrict_scalars.add_equiv_symm_map_smul_smul (r : R) (s : S) (x : M) : (restrict_scalars.add_equiv R S M).symm ((r • s) • x) = r • (restrict_scalars.add_equiv R S M ).symm (s • x) := by { rw [algebra.smul_def, mul_smul], refl, } lemma restrict_scalars.lsmul_apply_apply (s : S) (x : restrict_scalars R S M) : restrict_scalars.lsmul R S M s x = (restrict_scalars.add_equiv R S M).symm (s • (restrict_scalars.add_equiv R S M x)) := rfl end module section algebra instance [I : semiring A] : semiring (restrict_scalars R S A) := I instance [I : ring A] : ring (restrict_scalars R S A) := I instance [I : comm_semiring A] : comm_semiring (restrict_scalars R S A) := I instance [I : comm_ring A] : comm_ring (restrict_scalars R S A) := I variables [semiring A] /-- Tautological ring isomorphism `restrict_scalars R S A ≃+* A`. -/ def restrict_scalars.ring_equiv : restrict_scalars R S A ≃+* A := ring_equiv.refl _ variables [comm_semiring S] [algebra S A] [comm_semiring R] [algebra R S] @[simp] lemma restrict_scalars.ring_equiv_map_smul (r : R) (x : restrict_scalars R S A) : restrict_scalars.ring_equiv R S A (r • x) = (algebra_map R S r) • restrict_scalars.ring_equiv R S A x := rfl /-- `R ⟶ S` induces `S-Alg ⥤ R-Alg` -/ instance : algebra R (restrict_scalars R S A) := { smul := (•), commutes' := λ r x, algebra.commutes _ _, smul_def' := λ _ _, algebra.smul_def _ _, .. (algebra_map S A).comp (algebra_map R S) } @[simp] lemma restrict_scalars.ring_equiv_algebra_map (r : R) : restrict_scalars.ring_equiv R S A (algebra_map R (restrict_scalars R S A) r) = algebra_map S A (algebra_map R S r) := rfl end algebra
{ "alphanum_fraction": null, "author": "leanprover-community", "avg_line_length": null, "converted": null, "ext": null, "file": null, "hexsha": null, "include": null, "lang": null, "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": null, "max_forks_repo_licenses": null, "max_forks_repo_name": null, "max_forks_repo_path": null, "max_issues_count": null, "max_issues_repo_head_hexsha": null, "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": null, "max_issues_repo_name": null, "max_issues_repo_path": null, "max_line_length": null, "max_stars_count": null, "max_stars_repo_head_hexsha": null, "max_stars_repo_licenses": null, "max_stars_repo_name": null, "max_stars_repo_path": null, "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": null, "path": "github-repos/lean/leanprover-community-mathlib/mathlib-5e526d18cea33550268dcbbddcb822d5cde40654/src/algebra/algebra/restrict_scalars.lean", "reason": null, "repo": "mathlib", "save_path": "github-repos/lean/leanprover-community-mathlib", "sha": "5e526d18cea33550268dcbbddcb822d5cde40654", "size": null }
import numpy as np from future.cmorph import _dilate rows = 1024 cols = 1024 srows = 64 scols = 64 image = np.random.randint(0, 255, rows * cols, dtype=np.uint8).reshape( (rows, cols) ) selem = np.random.randint(0, 1, srows * scols, dtype=np.uint8).reshape( (srows, scols) ) out = np.zeros((rows, cols), dtype=np.uint8) shift_x = np.int8(2) shift_y = np.int8(2)
{ "alphanum_fraction": 0.679245283, "author": null, "avg_line_length": 21.8235294118, "converted": null, "ext": "py", "file": null, "hexsha": "790d8aa3dd20e63992a5022c4c718a8f25bcdb4a", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2019-11-05T03:03:14.000Z", "max_forks_repo_forks_event_min_datetime": "2019-11-05T03:03:14.000Z", "max_forks_repo_head_hexsha": "a460e9f6d1139f79b668cb3306d1e8a7e190b72d", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "fluiddyn/transonic", "max_forks_repo_path": "doc/for_dev/scikit-image/setup_codes/cmorph__dilate.py", "max_issues_count": 13, "max_issues_repo_head_hexsha": "a460e9f6d1139f79b668cb3306d1e8a7e190b72d", "max_issues_repo_issues_event_max_datetime": "2021-02-09T11:03:29.000Z", "max_issues_repo_issues_event_min_datetime": "2019-06-20T15:53:10.000Z", "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "fluiddyn/transonic", "max_issues_repo_path": "doc/for_dev/scikit-image/setup_codes/cmorph__dilate.py", "max_line_length": 71, "max_stars_count": 88, "max_stars_repo_head_hexsha": "a460e9f6d1139f79b668cb3306d1e8a7e190b72d", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "fluiddyn/transonic", "max_stars_repo_path": "doc/for_dev/scikit-image/setup_codes/cmorph__dilate.py", "max_stars_repo_stars_event_max_datetime": "2022-02-06T14:19:23.000Z", "max_stars_repo_stars_event_min_datetime": "2019-01-08T16:39:08.000Z", "num_tokens": 133, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 371 }
import math import time import random import numpy as np from tqdm import tqdm, trange # 显示进度条 # 定义装饰器,监控运行时间 def timmer(func): def wrapper(*args, **kwargs): start_time = time.time() res = func(*args, **kwargs) stop_time = time.time() print('func %s, run time: %s' % (func.__name__, stop_time-start_time)) return res return wrapper # load data split data class Dataset(): def __init__(self, fp): # fp: data file path self.data = self.loadData(fp) @timmer def loadData(self, fp): data = [] for f in open(fp).readlines()[1:]: data.append(f.strip().split('\t')[:3]) new_data = {} for user, item, tag in data: if user not in new_data: new_data[user] = {} if item not in new_data[user]: new_data[user][item] = set() new_data[user][item].add(tag) ret = [] for user in new_data: for item in new_data[user]: ret.append((user, item, list(new_data[user][item]))) return ret @timmer def splitData(self, M, k, seed=1): ''' # :param: self.data 加载所有的(user, item)数据条目 # :param: M, 划分的数目,最后需要取M折平均 # :param: k, 本次是第几轮划分 # :param: seed, random的随机种子数,对于不同的k应设置为一样的 # :return: train, test ''' train, test = [], [] random.seed(seed) for user, item, tags in self.data: # randint左右都覆盖 if random.randint(0, M-1) == k: test.append((user, item, tags)) else: train.append((user, item, tags)) # 处理为字典的形式 def convert_dict(data): data_dict = {} for user, item, tags in data: if user not in data_dict: data_dict[user] = {} data_dict[user][item] = tags return data_dict return convert_dict(train), convert_dict(test) # 评价指标 Precision Recall Coverage Diversity Popularity(Novelty) class Metric(): def __init__(self, train, test, GetRecommendation): ''' :param train: :param test: :param GetRecommendation: 为某个用户获得推荐的函数 ''' self.train = train self.test = test self.GetRecommendation = GetRecommendation self.recs = self.getRec() def getRec(self): recs = {} for user in self.test: recs[user] = {} for item in self.test[user]: rank = self.GetRecommendation(user, item) recs[user][item] = rank return recs def recall(self): hit = 0 all = 0 for user in self.test: for item in self.test[user]: test_tags = set(self.test[user][item]) rank = self.recs[user][item] for tag, score in rank: if tag in test_tags: hit += 1 all += len(test_tags) return round(hit / all * 100, 2) def precision(self): hit = 0 all = 0 for user in self.test: for item in self.test[user]: test_tags = set(self.test[user][item]) rank = self.recs[user][item] for tag, score in rank: if tag in test_tags: hit += 1 all += len(rank) return round(hit / all * 100, 2) def eval(self): metric = { 'Precision': self.precision(), 'Recall': self.recall() } print('Metric: ', metric) return metric # 算法实现 # 1.推荐热门标签 def PopularTags(train, N): tags = {} for user in train: for item in train[user]: for tag in train[user][item]: if tag not in tags: tags[tag] = 0 tags[tag] += 1 tags = list(sorted(tags.items(), key=lambda x: x[1], reverse=True))[:N] def GetRecommendation(user, item): return tags return GetRecommendation # 2.推荐用户最热门标签 def UserPopularTags(train, N): user_tags = {} for user in train: if user not in user_tags: user_tags[user] = {} for item in train[user]: for tag in train[user][item]: if tag not in user_tags[user]: user_tags[user][tag] = 0 user_tags[user][tag] += 1 user_tags = {k: list(sorted(v.items(), key=lambda x: x[1], reverse=True)) for k, v in user_tags.items()} def GetRecommendation(user, item): if user in user_tags: return user_tags[user][:N] else: return [] return GetRecommendation # 3.推荐最热门标签 def ItemPopularTags(train, N): item_tags = {} for user in train: for item in train[user]: if item not in item_tags: item_tags[item] = {} for tag in train[user][item]: if tag not in item_tags[item]: item_tags[item][tag] = 0 item_tags[item][tag] += 1 item_tags = {k: list(sorted(v.items(), key=lambda x: x[1], reverse=True)) for k, v in item_tags.items()} def GetRecommendation(user, item): if item in item_tags: return item_tags[item][:N] else: return [] return GetRecommendation # 4.联合用户和物品进行推荐 def HybridPopularTags(train, N, alpha): user_tags = {} for user in train: if user not in user_tags: user_tags[user] = {} for item in train[user]: for tag in train[user][item]: if tag not in user_tags[user]: user_tags[user][tag] = 0 user_tags[user][tag] += 1 item_tags = {} for user in train: for item in train[user]: if item not in item_tags: item_tags[item] = {} for tag in train[user][item]: if tag not in item_tags[item]: item_tags[item][tag] = 0 item_tags[item][tag] += 1 def GetRecommendation(user, item): tag_score = {} if user in user_tags: max_user_tag = max(user_tags[user].values()) for tag in user_tags[user]: if tag not in tag_score: tag_score[tag] = 0 tag_score[tag] += (1 - alpha) * user_tags[user][tag] / max_user_tag if item in item_tags: max_item_tag = max(item_tags[item].values()) for tag in item_tags[item]: if tag not in tag_score: tag_score[tag] = 0 tag_score[tag] += alpha * item_tags[item][tag] / max_item_tag return list(sorted(tag_score.items(), key=lambda x: x[1], reverse=True))[:N] return GetRecommendation # 实验 class Experiment(): def __init__(self, M, N, fp='./dataset/delicious-2k/user_taggedbookmarks.dat', rt='PopularTags'): self.M = M self.N = N self.fp = fp self.rt = rt self.alg = {'PopularTags': PopularTags, 'UserPopularTags': UserPopularTags, 'ItemPopularTags': ItemPopularTags, 'HybridPopularTags': HybridPopularTags} @timmer def worker(self, train, test, **kwargs): getRecommendation = self.alg[self.rt](train, self.N, **kwargs) metric = Metric(train, test, getRecommendation) return metric.eval() # 多次实验取平均 @timmer def run(self, **kwargs): metrics = {'Precision': 0, 'Recall': 0} dataset = Dataset(self.fp) for i in range(self.M): train, test = dataset.splitData(self.M, i) print('Experiment {}'.format(i)) metric = self.worker(train, test, **kwargs) metrics = {k: metrics[k]+metric[k] for k in metrics} metrics = {k: metrics[k]/self.M for k in metrics} print('Average Result (M={}, N={}): {}'.format(self.M, self.N, metrics)) # 1. PopularTags实验 # M, N = 10, 10 # exp = Experiment(M, N, rt='PopularTags') # exp.run() # # Average Result (M=10, N=10): {'Precision': 2.7589999999999995, 'Recall': 6.606999999999999} # 2. UserPopularTags实验 # M, N = 10, 10 # exp = Experiment(M, N, rt='UserPopularTags') # exp.run() # # Average Result (M=10, N=10): {'Precision': 10.518, 'Recall': 24.963} # 2. ItemPopularTags实验 M, N = 10, 10 exp = Experiment(M, N, rt='ItemPopularTags') exp.run() # Average Result (M=10, N=10): {'Precision': 12.052, 'Recall': 8.575} # 4.HybridPopularTags实验 # M, N = 10, 10 # for alpha in range(0, 11): # alpha /= 10 # print('alpha =', alpha) # exp = Experiment(M, N, rt='HybridPopularTags') # exp.run(alpha=alpha) # alpha = 0.0 # Average Result (M=10, N=10): {'Precision': 10.482, 'Recall': 24.978} # alpha = 0.1 # Average Result (M=10, N=10): {'Precision': 10.794, 'Recall': 25.720999999999997} # alpha = 0.2 # Average Result (M=10, N=10): {'Precision': 11.138, 'Recall': 26.538} # alpha = 0.3 # Average Result (M=10, N=10): {'Precision': 11.396999999999998, 'Recall': 27.158000000000005} # alpha = 0.4 # Average Result (M=10, N=10): {'Precision': 11.539, 'Recall': 27.496} # alpha = 0.5 # Average Result (M=10, N=10): {'Precision': 11.591000000000001, 'Recall': 27.619000000000007} # alpha = 0.6 # Average Result (M=10, N=10): {'Precision': 11.586, 'Recall': 27.606} # alpha = 0.7 # Average Result (M=10, N=10): {'Precision': 11.56, 'Recall': 27.542} # alpha = 0.8 # Average Result (M=10, N=10): {'Precision': 11.524000000000001, 'Recall': 27.458} # alpha = 0.9 # Average Result (M=10, N=10): {'Precision': 11.504000000000001, 'Recall': 27.409999999999997} # alpha = 1.0 # Average Result (M=10, N=10): {'Precision': 7.502000000000001, 'Recall': 17.878999999999998}
{ "alphanum_fraction": 0.5439689511, "author": null, "avg_line_length": 30.7893081761, "converted": null, "ext": "py", "file": null, "hexsha": "f96d4e497d92ed21d32779d0342524157c513408", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "df33b90ffb40bf794d873cabf8d01708795baffd", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "zwd1217944524/test", "max_forks_repo_path": "3.2.tag-recommendation.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "df33b90ffb40bf794d873cabf8d01708795baffd", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "zwd1217944524/test", "max_issues_repo_path": "3.2.tag-recommendation.py", "max_line_length": 108, "max_stars_count": null, "max_stars_repo_head_hexsha": "df33b90ffb40bf794d873cabf8d01708795baffd", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "zwd1217944524/test", "max_stars_repo_path": "3.2.tag-recommendation.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2771, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 9791 }
# -*- coding: utf-8 -*- """Test for updater.ensemble module""" import numpy as np import datetime from stonesoup.models.measurement.linear import LinearGaussian from stonesoup.types.detection import Detection from stonesoup.types.hypothesis import SingleHypothesis from stonesoup.types.prediction import EnsembleStatePrediction from stonesoup.types.state import EnsembleState from stonesoup.updater.ensemble import EnsembleUpdater def test_ensemble(): # Initialize variables measurement_model = LinearGaussian(ndim_state=2, mapping=[0], noise_covar=np.array([[0.04]])) timestamp = datetime.datetime(2021, 3, 5, 22, 3, 17) num_vectors = 100 test_ensemble = EnsembleState.generate_ensemble( np.array([[-6.45], [0.7]]), np.array([[4.1123, 0.0013], [0.0013, 0.0365]]), num_vectors) # Create Prediction, Measurement, and Updater prediction = EnsembleStatePrediction(test_ensemble, timestamp=timestamp) measurement = Detection(np.array([[-6.23]]), timestamp) updater = EnsembleUpdater(measurement_model) # Construct hypothesis hypothesis = SingleHypothesis(prediction=prediction, measurement=measurement) # Run updater updated_state = updater.update(hypothesis) assert updated_state.timestamp == timestamp assert updated_state.hypothesis.prediction == prediction assert updated_state.hypothesis.measurement == measurement assert updated_state.ndim == updated_state.hypothesis.prediction.ndim assert updated_state.num_vectors == \ updated_state.hypothesis.prediction.num_vectors assert np.allclose(updated_state.sqrt_covar @ updated_state.sqrt_covar.T, updated_state.covar) # Test updater runs with measurement prediction already in hypothesis. test_measurement_prediction = updater.predict_measurement(prediction) hypothesis = SingleHypothesis(prediction=prediction, measurement=measurement, measurement_prediction=test_measurement_prediction) updated_state = updater.update(hypothesis) assert updated_state.timestamp == timestamp assert updated_state.hypothesis.prediction == prediction assert updated_state.hypothesis.measurement == measurement assert updated_state.ndim == updated_state.hypothesis.prediction.ndim assert updated_state.num_vectors == \ updated_state.hypothesis.prediction.num_vectors assert np.allclose(updated_state.sqrt_covar @ updated_state.sqrt_covar.T, updated_state.covar)
{ "alphanum_fraction": 0.6912459135, "author": null, "avg_line_length": 41.7121212121, "converted": null, "ext": "py", "file": null, "hexsha": "dc84e5389fa187834bbfde5cf782068acaa06d45", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "aaa895b54383e9a9b9c9f9ff746291bf60242aab", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "0sm1um/Stone-Soup", "max_forks_repo_path": "stonesoup/updater/tests/test_ensemble.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "aaa895b54383e9a9b9c9f9ff746291bf60242aab", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "0sm1um/Stone-Soup", "max_issues_repo_path": "stonesoup/updater/tests/test_ensemble.py", "max_line_length": 85, "max_stars_count": 1, "max_stars_repo_head_hexsha": "aaa895b54383e9a9b9c9f9ff746291bf60242aab", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "0sm1um/Stone-Soup", "max_stars_repo_path": "stonesoup/updater/tests/test_ensemble.py", "max_stars_repo_stars_event_max_datetime": "2021-12-02T00:17:21.000Z", "max_stars_repo_stars_event_min_datetime": "2021-12-02T00:17:21.000Z", "num_tokens": 542, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 2753 }
[STATEMENT] lemma swing_pos_inv:"swing_pos w1 w2 \<Longrightarrow> (kauff_mat w1) = (kauff_mat w2)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. swing_pos w1 w2 \<Longrightarrow> kauff_mat w1 = kauff_mat w2 [PROOF STEP] unfolding swing_pos_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. w1 = r_over_braid \<and> w2 = l_over_braid \<Longrightarrow> kauff_mat w1 = kauff_mat w2 [PROOF STEP] using kauff_mat_swingpos [PROOF STATE] proof (prove) using this: kauff_mat r_over_braid = kauff_mat l_over_braid goal (1 subgoal): 1. w1 = r_over_braid \<and> w2 = l_over_braid \<Longrightarrow> kauff_mat w1 = kauff_mat w2 [PROOF STEP] by auto
{ "alphanum_fraction": null, "author": null, "avg_line_length": null, "converted": null, "ext": null, "file": "Knot_Theory_Linkrel_Kauffman", "hexsha": null, "include": null, "lang": null, "length": 3, "llama_tokens": 280, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": null, "max_forks_repo_licenses": null, "max_forks_repo_name": null, "max_forks_repo_path": null, "max_issues_count": null, "max_issues_repo_head_hexsha": null, "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": null, "max_issues_repo_name": null, "max_issues_repo_path": null, "max_line_length": null, "max_stars_count": null, "max_stars_repo_head_hexsha": null, "max_stars_repo_licenses": null, "max_stars_repo_name": null, "max_stars_repo_path": null, "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": null, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": null }
# rng #random number generators #Computers are not capable of generating true random numbers #but can generate sequences with statistical randomness # 1 Julia's implemented solution rand(100) # LCP psuedo solutions function rng_Park_Miller1998(n;x1=3) x=zeros(n) x[1] = x1 u = zeros(n) for i in 2:n x[i] =mod(16807*x[i-1],2147483647)#7^5 \ 2^31-1 31th mason prime end u = x./2147483647 return u end # implemented in Matlab 4 1990 rng_Park_Miller1998(100;x1=3) function rng_randu(n;x1=3) x = zeros(n) x[1]=x1 for i in 2:n x[i] = mod(65539*x[i-1],2147483648) end u = x./2147483648 return u end rng_randu(100) # iterative psudo solutions function rng_LMap(n;x1=.4) x = zeros(n) x[1] = x1 for i in 2:n x[i]=1-2*x[i-1]^2 end return((x./2) .+.5) end rng_LMap(100) # self - avoiding solutio function rng_Halton(n;x1 = 3) b = zeros(Int(ceil(log(n)/log(x1)))) u = zeros(n) for j in 1:n i = 1 b[1] = b[1]+1 while b[i]>(x1-1+eps()) b[i]=0 i=i+1 b[i]=b[i]+1 end u[j]=0 for k=1:length(b) u[j] = u[j]+b[k]*Float64(x1)^(-k); end end return u end rng_Halton(100)
{ "alphanum_fraction": 0.527982327, "author": null, "avg_line_length": 17.8684210526, "converted": null, "ext": "jl", "file": null, "hexsha": "86ddef1e13ef8ff19bc72bf8ce82991f518d4ac3", "include": null, "lang": "Julia", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "d887ee783bfc6e4efc1b4f748c19a21baef5d654", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "HaoLi111/Julia_Numerical_Recipe", "max_forks_repo_path": "RNG.jl", "max_issues_count": null, "max_issues_repo_head_hexsha": "d887ee783bfc6e4efc1b4f748c19a21baef5d654", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "HaoLi111/Julia_Numerical_Recipe", "max_issues_repo_path": "RNG.jl", "max_line_length": 73, "max_stars_count": 3, "max_stars_repo_head_hexsha": "d887ee783bfc6e4efc1b4f748c19a21baef5d654", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "HaoLi111/Julia_Numerical_Recipe", "max_stars_repo_path": "RNG.jl", "max_stars_repo_stars_event_max_datetime": "2020-05-05T04:55:37.000Z", "max_stars_repo_stars_event_min_datetime": "2019-06-27T05:52:24.000Z", "num_tokens": 477, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 1358 }
# import itertools import numpy as np import matplotlib.pyplot as plt from netCDF4 import Dataset from postladim import ParticleFile # --------------- # User settings # --------------- # Files particle_file = "station.nc" # particle_file = "/home/bjorn/ladim/examples/station/station.nc" grid_file = "../data/ocean_avg_0014.nc" # Subgrid definition i0, i1 = 100, 140 j0, j1 = 84, 133 # time step t = 100 # ---------------- # ROMS grid, plot domain with Dataset(grid_file) as f0: H = f0.variables["h"][j0:j1, i0:i1] M = f0.variables["mask_rho"][j0:j1, i0:i1] lon = f0.variables["lon_rho"][j0:j1, i0:i1] lat = f0.variables["lat_rho"][j0:j1, i0:i1] # Cell centers and boundaries Xcell = np.arange(i0, i1) Ycell = np.arange(j0, j1) Xb = np.arange(i0 - 0.5, i1) Yb = np.arange(j0 - 0.5, j1) # particle_file pf = ParticleFile(particle_file) # num_times = pf.num_times # Set up the plot area fig = plt.figure(figsize=(12, 10)) ax = plt.axes(xlim=(i0 + 1, i1 - 1), ylim=(j0 + 1, j1 - 1), aspect="equal") # Background bathymetry cmap = plt.get_cmap("Blues") ax.contourf(Xcell, Ycell, H, cmap=cmap, alpha=0.3) # Lon/lat lines ax.contour(Xcell, Ycell, lat, levels=range(57, 64), colors="black", linestyles=":") ax.contour(Xcell, Ycell, lon, levels=range(-4, 10, 2), colors="black", linestyles=":") # Landmask constmap = plt.matplotlib.colors.ListedColormap([0.2, 0.6, 0.4]) M = np.ma.masked_where(M > 0, M) plt.pcolormesh(Xb, Yb, M, cmap=constmap) # Plot particle distribution X, Y = pf.position(time=t) Z = pf["Z"][t] particle_dist = ax.scatter(X, Y, c=Z, cmap=plt.get_cmap("plasma_r")) # Colorbar cb = plt.colorbar(particle_dist) cb.ax.invert_yaxis() cb.set_label("Particle depth", fontsize=14) # Time stamp timestamp = ax.text(0.01, 0.96, pf.time(t), fontsize=15, transform=ax.transAxes) plt.show()
{ "alphanum_fraction": 0.6688632619, "author": null, "avg_line_length": 25.6478873239, "converted": null, "ext": "py", "file": null, "hexsha": "dc715bc12c12a83ed399bf8b2f8eb69b98741fe5", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "f6c1be9028ca54370ce33dde25b005d5b0bb4677", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "bjornaa/ladim2", "max_forks_repo_path": "examples/station/plot.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "f6c1be9028ca54370ce33dde25b005d5b0bb4677", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "bjornaa/ladim2", "max_issues_repo_path": "examples/station/plot.py", "max_line_length": 86, "max_stars_count": null, "max_stars_repo_head_hexsha": "f6c1be9028ca54370ce33dde25b005d5b0bb4677", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "bjornaa/ladim2", "max_stars_repo_path": "examples/station/plot.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 632, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 1821 }
module PyCMA using PyCall const cma = PyNULL() function __init__() copy!(cma, pyimport("cma")) py""" import numpy as np """ end end # module
{ "alphanum_fraction": 0.6111111111, "author": null, "avg_line_length": 10.125, "converted": null, "ext": "jl", "file": null, "hexsha": "43a9b1836a065fd6ce9c13cb43d537378df68812", "include": null, "lang": "Julia", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "b7d43485407ca4a4df140589e4d49dac792795cb", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "jbrea/PyCMA.jl", "max_forks_repo_path": "src/PyCMA.jl", "max_issues_count": null, "max_issues_repo_head_hexsha": "b7d43485407ca4a4df140589e4d49dac792795cb", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "jbrea/PyCMA.jl", "max_issues_repo_path": "src/PyCMA.jl", "max_line_length": 31, "max_stars_count": null, "max_stars_repo_head_hexsha": "b7d43485407ca4a4df140589e4d49dac792795cb", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "jbrea/PyCMA.jl", "max_stars_repo_path": "src/PyCMA.jl", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 49, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 162 }
# # Copyright (c) 2014, Texas Water Development Board # All rights reserved. # # This code is open-source. See LICENSE file for details. # """ View definitions for controlling layout of survey line editor task Views are: *BigView: Example of use for overall layout with Controls on left and Plots on right. *PlotView: Sets plot view which may be just the plot component and legend. *ControlView: Set layout of controls and information displays """ # Std lib imports import logging # other imports import numpy as np # ETS imports from enable.api import ComponentEditor from traits.api import (Instance, Str, List, HasTraits, Float, Property, Enum, Bool, Dict, on_trait_change, Trait, Callable, Tuple, CFloat) from traitsui.api import (View, Item, EnumEditor, UItem, InstanceEditor, TextEditor, RangeEditor, Label, HGroup, CheckListEditor, Group) from chaco import default_colormaps from chaco.api import (Plot, ArrayPlotData, VPlotContainer, HPlotContainer, Legend, create_scatter_plot, PlotComponent, create_line_plot, DataRange1D) from chaco.tools.api import (PanTool, ZoomTool, RangeSelection, LineInspector, RangeSelectionOverlay, LegendHighlighter) from chaco.base import n_gon # Local imports from .survey_tools import InspectorFreezeTool from .survey_data_session import SurveyDataSession # global constants # these still need to be tweaked to get the right look logger = logging.getLogger(__name__) COLORMAPS = default_colormaps.color_map_name_dict.keys() DEFAULT_COLORMAP = 'Spectral' TITLE_FONT = 'swiss 10' MINI_HEIGHT = 100 SLICE_PLOT_WIDTH = 75 ZOOMBOX_COLOR = 'lightgreen' ZOOMBOX_ALPHA = 0.3 HPLOT_PADDING = 0 MAIN_PADDING = 10 MAIN_PADDING_LEFT = 20 MAIN_PADDING_BOTTOM = 10 MINI_PADDING = 15 CONTRAST_MAX = float(20) CORE_VISIBILITY_CRITERIA = 200.0 CORE_LINE_WIDTH = 2 class InstanceUItem(UItem): '''Convenience class for inluding instance in view as Item''' style = Str('custom') editor = Instance(InstanceEditor, ()) class ColormapEditView(HasTraits): ''' provides dialog box to select colormap''' colormap = Enum(COLORMAPS) traits_view = View( Group(Label('Frequency to Edit'), Item('colormap') ), buttons=["OK", "Cancel"], ) class PlotContainer(HasTraits): ''' miniplot must have at least one plot with an index. therefore there should be a check in the plot dictionary that there is a plot with an index ''' #========================================================================== # Traits Attributes #========================================================================== # data objects recived from model_view model = Instance(SurveyDataSession) data = Instance(ArrayPlotData) # main plot component returned vplot_container = Instance(VPlotContainer) ###### storage structures for listener access to these objects via lookup # dict of hplots (main-edit-plot|slice plot) keyed by freq like intensities hplot_dict = Dict(Str, Instance(HPlotContainer)) # dict of all core plots so we can set visibility core_plots_dict = Dict # hplots visible in main vplotcontainer. Set by checkbox. selected_hplots = List # show intensity slice profiles show_intensity_profiles = Bool(True) # legends for each hplot:main. Used to set visibility legend_dict = Dict # shared dict of line_plot objects, mostly for when edit target changes plot_dict = Dict(Str, PlotComponent) # tool used to toggle active state of cursor via keyboard () inspector_freeze_tool = Instance(InspectorFreezeTool) img_colormap = Enum(COLORMAPS) # private traits _cmap = Trait(default_colormaps.Spectral, Callable) # main_value_range = Instance(DataRange1D) # #========================================================================== # Define Views #========================================================================== traits_view = View(UItem('vplot_container', editor=ComponentEditor(), show_label=False ) ) #========================================================================== # Defaults #========================================================================== def _vplot_container_default(self): vpc = self.create_empty_plot() return vpc def _inspector_freeze_tool_default(self): # sets up keybd shortcut and tool for freezing cursor activity tool = InspectorFreezeTool(tool_set=set(), main_key="c", modifier_keys=["alt"], ignore_keys=[] ) return tool def _core_plot_dict_default(self): d = {} for core in self.model.core_samples: d[core.core_id] = [] def __cmap_default(self): cm = default_colormaps.color_map_name_dict[self.img_colormap] return cm def _img_colormap_default(self): return DEFAULT_COLORMAP # def _main_value_range_default(self): # dr = DataRange1D() # dr.set_bounds('auto', 'auto') # return dr #========================================================================== # Helper functions #========================================================================== def create_empty_plot(self): ''' place filler ''' vpc = VPlotContainer(bgcolor='lightgrey', height=1000, width=800) self.vplot_container = vpc return vpc def create_vplot(self): ''' fill vplot container with 1 mini plot for range selection and N main plots ordered from to top to bottom by freq ''' vpc = VPlotContainer(bgcolor='lightgrey') if self.model.freq_choices: # create mini plot using the highest freq as background keys = self.model.freq_choices mini = self.create_hplot(key=keys[-1], mini=True) self.mini_hplot = mini vpc.add(mini) # create hplot containers for each freq and add to dict. # dictionary will be used to access these later to individually # address them to turn them on or off etc. # note these are added with lowest freq on bottom for freq in self.model.freq_choices: hpc = self.create_hplot(key=freq) self.hplot_dict[freq] = hpc vpc.add(hpc) # add tool to freeze line inspector cursor when in desired position vpc.tools.append(self.inspector_freeze_tool) self.vplot_container = vpc self.set_hplot_visibility(all=True) self.set_intensity_profile_visibility() def set_hplot_visibility(self, all=False): ''' to be called when selected hplots are changed For selected hplots set the hplot, legend, and axis visibility''' if all: self.selected_hplots = self.model.freq_choices # get sorted list of hplots to add on top of mini sorted_hplots = [f for f in self.model.freq_choices if f in self.selected_hplots] if sorted_hplots: bottom = sorted_hplots[0] top = sorted_hplots[-1] for freq, hpc in self.hplot_dict.items(): hpc.visible = ((freq in sorted_hplots) or (freq == 'mini')) main = hpc.components[0] if freq == bottom or freq == 'mini': main.x_axis.visible = True hpc.padding_bottom = MAIN_PADDING_BOTTOM else: main.x_axis.visible = False hpc.padding_bottom = MAIN_PADDING_BOTTOM legend, highlighter = self.legend_dict.get(freq, [None, None]) if legend: legend.visible = (freq == top) else: logger.info('no hplot containers') self.reset_all() def reset_all(self): for k, hpc in self.hplot_dict.items(): if k is not 'mini': profile = hpc.components[1] main = hpc.components[0] profile.visible = False main.visible = False hpc.visible = False hpc.invalidate_and_redraw() for k, hpc in self.hplot_dict.items(): if k is not 'mini': profile = hpc.components[1] main = hpc.components[0] if k in self.selected_hplots: main.visible = True profile.visible = self.show_intensity_profiles hpc.visible = True hpc.invalidate_and_redraw() self.vplot_container.invalidate_and_redraw() def set_intensity_profile_visibility(self, show=True): ''' sets intensity profile visibility for all hplots ''' self.reset_all() def create_hplot(self, key=None, mini=False): if mini: hpc = HPlotContainer(bgcolor='darkgrey', height=MINI_HEIGHT, resizable='h', padding=0 ) else: hpc = HPlotContainer(bgcolor='lightgrey', padding=HPLOT_PADDING, resizable='hv' ) # make slice plot for showing intesity profile of main plot #************************************************************ slice_plot = Plot(self.data, width=SLICE_PLOT_WIDTH, orientation="v", resizable="v", padding=MAIN_PADDING, padding_left=MAIN_PADDING_LEFT, bgcolor='beige', origin='top left' ) slice_plot.x_axis.visible = False slice_key = key + '_slice' ydata_key = key + '_y' slice_plot.plot((ydata_key, slice_key), name=slice_key) # make main plot for editing depth lines #************************************************************ main = Plot(self.data, border_visible=True, bgcolor='beige', origin='top left', padding=MAIN_PADDING, padding_left=MAIN_PADDING_LEFT, ) if mini: main.padding = MINI_PADDING # add intensity img to plot and get reference for line inspector #************************************************************ img_plot = main.img_plot(key, name=key, xbounds=self.model.xbounds[key], ybounds=self.model.ybounds[key], colormap=self._cmap )[0] # add line plots: use method since these may change #************************************************************ self.update_line_plots(key, main, update=True) # set slice plot index range to follow main plot value range #************************************************************ slice_plot.index_range = main.value_range # add vertical core lines to main plots and slices #************************************************************ # save pos and distance in session dict for view info and control for core in self.model.core_samples: loc_index, loc, dist = self.model.core_info_dict[core.core_id] # add boundarys to slice plot ref_line = self.model.final_lake_depth self.plot_core_depths(slice_plot, core, ref_line, loc_index) # add positions to main plots self.plot_core(main, core, ref_line, loc_index, loc) # now add tools depending if it is a mini plot or not #************************************************************ if mini: # add range selection tool only # first add a reference line to attach it to reference = self.make_reference_plot() main.add(reference) # attache range selector to this plot range_tool = RangeSelection(reference) reference.tools.append(range_tool) range_overlay = RangeSelectionOverlay(reference, metadata_name="selections") reference.overlays.append(range_overlay) range_tool.on_trait_change(self._range_selection_handler, "selection") # add zoombox to mini plot main.plot(('zoombox_x', 'zoombox_y'), type='polygon', face_color=ZOOMBOX_COLOR, alpha=ZOOMBOX_ALPHA) # add to hplot and dict hpc.add(main) self.hplot_dict['mini'] = hpc else: # add zoom tools main.tools.append(PanTool(main)) zoom = ZoomTool(main, tool_mode='box', axis='both', alpha=0.5) main.tools.append(zoom) main.overlays.append(zoom) main.value_mapper.on_trait_change(self.zoom_all_value, 'updated') main.index_mapper.on_trait_change(self.zoom_all_index, 'updated') # add line inspector and attach to freeze tool #********************************************* line_inspector = LineInspector(component=img_plot, axis='index_x', inspect_mode="indexed", is_interactive=True, write_metadata=True, metadata_name='x_slice', is_listener=True, color="white") img_plot.overlays.append(line_inspector) self.inspector_freeze_tool.tool_set.add(line_inspector) # add listener for changes to metadata made by line inspector #************************************************************ img_plot.on_trait_change(self.metadata_changed, 'index.metadata') # set slice plot index range to follow main plot value range #************************************************************ slice_plot.index_range = main.value_range # add clickable legend ; must update legend when depth_dict updated #****************************************************************** legend = Legend(component=main, padding=0, align="ur", font='modern 8') legend_highlighter = LegendHighlighter(legend, drag_button="right") legend.tools.append(legend_highlighter) self.update_legend_plots(legend, main) legend.visible = False self.legend_dict[key] = [legend, legend_highlighter] main.overlays.append(legend) # add main and slice plot to hplot container and dict #**************************************************** main.title = 'frequency = {} kHz'.format(key) main.title_font = TITLE_FONT hpc.add(main, slice_plot) self.hplot_dict[key] = hpc return hpc def update_legend_plots(self, legend, plot): ''' update legend if lines added or changed''' for k, v in self.model.depth_dict.items(): legend.plots[k] = plot.plots[k] def update_all_line_plots(self, update=False): ''' reload all line plots when added or changed''' for key in self.model.freq_choices: hpc = self.hplot_dict[key] plot = hpc.components[0] self.update_line_plots(key, plot, update=update) legend, highlighter = self.legend_dict[key] self.update_legend_plots(legend, plot) legend_highlighter = LegendHighlighter(legend, drag_button="right") if highlighter in legend.tools: legend.tools.remove(highlighter) legend.tools.append(legend_highlighter) plot.invalidate_and_redraw() def update_line_plots(self, key, plot, update=False): ''' takes a Plot object and adds all available line plots to it. Each Plot.plots has one img plot labeled by freq key and the rest are line plots. When depth_dict is updated, check all keys to see all lines are plotted. Update=True will replot all lines even if already there (for style changes)''' for line_key, depth_line in self.model.depth_dict.items(): not_plotted = line_key not in plot.plots not_image = line_key not in self.model.freq_choices if (not_plotted or update) and not_image: line_plot = self.plot_depth_line(key, line_key, depth_line, plot) # note: plot dict needs 3 entries for every line since each # freq has a copy using the same plotdata source plot_key = key + '_' + line_key self.plot_dict[plot_key] = line_plot def plot_depth_line(self, key, line_key, depth_line, plot): ''' plot a depth_line using a depth line object''' # add data to ArrayPlotData if not there if line_key not in self.data.arrays.keys(): x = self.model.distance_array[depth_line.index_array] y = depth_line.depth_array key_x, key_y = line_key + '_x', line_key + '_y' self.data.update({key_x: x, key_y: y}) # now plot line_plot = plot.plot((key_x, key_y), color=depth_line.color, name=line_key )[0] # match vertical to ybounds in case there are pathological points line_plot.value_range = plot.plots[key][0].index_range.y_range return line_plot def make_reference_plot(self): ''' make reference plot for mini plot range selector''' x_pts = np.array([self.model.distance_array.min(), self.model.distance_array.max() ] ) y_pts = 0 * x_pts ref_plot = create_scatter_plot((x_pts, y_pts), color='black') return ref_plot #========================================================================== # Notifiers and Handlers #========================================================================== @on_trait_change('model') def update(self): ''' make new vplot when a new survey line is selected''' self.create_vplot() def zoom_all_value(self, obj, name, old, new): low, high = obj.range.low, obj.range.high # change y values of zoombox in mini self.data.update_data(zoombox_y=np.array([low, low, high, high])) for key, hpc in self.hplot_dict.items(): if key != 'mini': vmapper = hpc.components[0].value_mapper if vmapper.range.low != low: vmapper.range.low = low if vmapper.range.high != high: vmapper.range.high = high def zoom_all_index(self, obj, name, old, new): low, high = obj.range.low, obj.range.high # change x values of zoombox self.data.update_data(zoombox_x=np.array([low, high, high, low])) for key, hpc in self.hplot_dict.items(): if key != 'mini': vmapper = hpc.components[0].index_mapper if vmapper.range.low != low: vmapper.range.low = low if vmapper.range.high != high: vmapper.range.high = high def _range_selection_handler(self, event): ''' updates the main plots when the range selector in the mini plot is adjusted. The event obj should be a tuple (low, high) in data space ''' if event is not None: #adjust index range for main plots low, high = event for key, hpc in self.hplot_dict.items(): if key is not 'mini': this_plot = hpc.components[0] this_plot.index_range.low = low this_plot.index_range.high = high else: # reset range back to full/auto for main plots for key, hpc in self.hplot_dict.items(): if key is not 'mini': this_plot = hpc.components[0] this_plot.index_range.set_bounds("auto", "auto") def metadata_changed(self, obj, name, old, new): ''' handler for line inspector tool. provides changed "index" trait of intensity image whose meta data was changed by the line inspector. The line inspector sets the "x_slice" key in the meta data. We then retrieve this and use it to update the metadata for the intensity plots for all freqs''' selected_meta = obj.metadata slice_meta = selected_meta.get("x_slice", None) for key, hplot in self.hplot_dict.items(): if key is not 'mini': self.update_hplot_slice(key, hplot, slice_meta) def update_hplot_slice(self, key, hplot, slice_meta): ''' when meta data changes call this with relevant hplots to update slice from cursor position''' slice_key = key+'_slice' img = hplot.components[0].plots[key][0] if slice_meta: # set metadata and data # check hplot img meta != new meta. if !=, change it. # this will update tools for other frequencies this_meta = img.index.metadata if this_meta.get('x_slice', None) is not slice_meta: this_meta.update({"x_slice": slice_meta}) x_index, y_index = slice_meta try: if x_index: # now updata data array which will updata slice plot slice_data = img.value.data[:, x_index] self.data.update_data({slice_key: slice_data}) else: self.data.update_data({slice_key: np.array([])}) except IndexError: self.data.update_data({slice_key: np.array([])}) try: # abs_index is the trace number for the selected image index abs_index = self.model.freq_trace_num[key][x_index] - 1 x_pos = self.model.distance_array[abs_index] except IndexError: # if for some reason the tool returns a crazy index then bound # it to array limits. logger.info('cursor index out of bounds: value set to limit') indices = self.model.freq_trace_num[key]-1 x_ind_max = indices.size - 1 x_ind_clipped = np.clip(x_index, 0, x_ind_max) abs_index = indices[x_ind_clipped] x_pos = self.model.distance_array[abs_index] # check if cursor is 'near' core, and set visibility in sliceplot for core in self.model.core_samples: # show core if cursor within range of core location loc_index, loc, dist = self.model.core_info_dict[core.core_id] core_plot_list = self.core_plots_dict[core.core_id] for core_plot in core_plot_list: try: if np.abs(x_pos - loc) < CORE_VISIBILITY_CRITERIA: core_plot.visible = True else: core_plot.visible = False except ValueError: debug = 'core dist check xpos,loc,abs(x-l)\n={},{},{}' absdiff = np.abs(x_pos-loc) logger.debug(debug.format(x_pos, loc, absdiff)) else: # clear all slice plots self.data.update_data({slice_key: np.array([])}) def plot_core(self, main, core, ref_line, loc_index, loc): ''' plot core info on main plot''' # first plot vertical line y_range = main.value_range ys = np.array([y_range.low, y_range.high]) xs = ys * 0 + loc line = create_line_plot((xs, ys), color='lightgreen', width=CORE_LINE_WIDTH) line.origin = 'top left' line.index_range = main.index_range main.add(line) # then plot boundary layers as dots on line layer_depths = core.layer_boundaries ref_depth_line = self.model.get_ref_depth_line() if ref_depth_line: ref_depth = ref_depth_line.depth_array[loc_index] else: ref_depth = 0 ys = ref_depth + layer_depths xs = ys * 0 + loc scatter = create_scatter_plot((xs, ys), color='darkgreen', marker='circle', marker_size=CORE_LINE_WIDTH + 1) scatter.origin = 'top left' scatter.value_range = main.value_range scatter.index_range = main.index_range main.add(scatter) def plot_core_depths(self, slice_plot, core, ref_line, loc_index): ''' plot a set of core depths to the given slice plot set to not visible by default but then show when within show_core_range''' x_range = slice_plot.index_range xs = np.array([x_range.low, x_range.high]) ref_depth_line = self.model.get_ref_depth_line() if ref_depth_line: ref_depth = ref_depth_line.depth_array[loc_index] else: ref_depth = 0 for boundary in core.layer_boundaries: ys = xs * 0 + (ref_depth + boundary) line = create_line_plot((xs, ys), orientation='h', color='lightgreen', width=CORE_LINE_WIDTH) line.origin = 'top left' line.value_range = slice_plot.index_range self.core_plots_dict.setdefault(core.core_id, []).append(line) slice_plot.add(line) def _img_colormap_changed(self): ''' updates colormap in images when img_colormap changes''' self._cmap = default_colormaps.color_map_name_dict[self.img_colormap] for key, hpc in self.hplot_dict.items(): main = hpc.components[0] if key == 'mini': key = self.model.freq_choices[-1] img_plot = main.plots[key][0] if img_plot is not None: value_range = img_plot.color_mapper.range img_plot.color_mapper = self._cmap(value_range) print 'redraw', key main.invalidate_and_redraw() class ControlView(HasTraits): ''' Define controls and info subview with size control''' # list of keys for target depth lines to edit (changes if list does) target_choices = List(Str) # chosen key for depth line to edit line_to_edit = Str # used to explicitly get edit mode edit = Enum('Editing', 'Not Editing') # Button('Not Editing') traits_view = View( HGroup( UItem('edit', tooltip='Toggle between "not editing" and \ "editing" selected line' ), Item('line_to_edit', editor=EnumEditor(name='target_choices'), tooltip='Edit red line with right mouse button' ), ), resizable=True ) def _edit_default(self): return 'Not Editing' class ImageAdjustView(HasTraits): # brightness contrast controls freq_choices = List frequency = Str brightness = Float(0.0) contrast = Float(1.0) contrast_brightness = Property(depends_on=['brightness', 'contrast']) invert = Bool traits_view = View( Label('Frequency to Edit'), UItem('frequency', editor=EnumEditor(name='freq_choices')), Label('Brightness and Contrast'), Item('brightness', editor=RangeEditor(low=0.0, high=1.0), label='B'), Item('contrast', editor=RangeEditor(low=1.0, high=CONTRAST_MAX), label='C'), Item('invert'), resizable=True, kind='livemodal' ) def _get_contrast_brightness(self): return (self.contrast, self.brightness) class HPlotSelectionView(HasTraits): ''' provide checkbox pop up to set visibilty of hplots''' # hplots/freqs visible in main vplotcontainer. Set by checkbox. hplot_choices = Tuple visible_frequencies = List # show intensity profiles on the side or not intensity_profile = Bool traits_view = View(Label('Select Frequencies to View'), UItem('visible_frequencies', editor=CheckListEditor(name='hplot_choices'), style='custom' ), Label('Show Intensity Profiles'), UItem('intensity_profile'), kind='modal', resizable=True ) class DataView(HasTraits): ''' Show location data as cursor moves about''' # latitude, longitude for current cursor latitude = Float(0) longitude = Float(0) # latitude, longitude for current cursor easting = Float(0) northing = Float(0) # depth of current mouse position depth = Float(0) # power & gain at current cursor power = CFloat(0) gain = CFloat(0) traits_view = View( Item('latitude'), Item('longitude'), Item('_'), Item('easting'), Item('northing'), Item('_'), Item('depth'), Item('_'), Item('power'), Item('gain'), resizable=True ) class MsgView(HasTraits): msg = Str('my msg') traits_view = View(Item('msg', editor=TextEditor(), style='custom'), buttons=['OK', 'Cancel'], kind='modal', resizable=True ) if __name__ == '__main__': pass
{ "alphanum_fraction": 0.5422647011, "author": null, "avg_line_length": 39.0696202532, "converted": null, "ext": "py", "file": null, "hexsha": "88ad22bdd8c2685baddae539e2e384c598954331", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "bb8c8775f15bdc9fa1a0a443ad0cdd9702a7e7af", "max_forks_repo_licenses": [ "CC-BY-3.0" ], "max_forks_repo_name": "dpinte/hydropick", "max_forks_repo_path": "hydropick/ui/survey_views.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "bb8c8775f15bdc9fa1a0a443ad0cdd9702a7e7af", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC-BY-3.0" ], "max_issues_repo_name": "dpinte/hydropick", "max_issues_repo_path": "hydropick/ui/survey_views.py", "max_line_length": 79, "max_stars_count": 1, "max_stars_repo_head_hexsha": "bb8c8775f15bdc9fa1a0a443ad0cdd9702a7e7af", "max_stars_repo_licenses": [ "CC-BY-3.0" ], "max_stars_repo_name": "dpinte/hydropick", "max_stars_repo_path": "hydropick/ui/survey_views.py", "max_stars_repo_stars_event_max_datetime": "2019-01-24T16:22:23.000Z", "max_stars_repo_stars_event_min_datetime": "2019-01-24T16:22:23.000Z", "num_tokens": 6160, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 30865 }
// Licensed under the Apache License, Version 2.0. // Author: Jin Qing (http://blog.csdn.net/jq0123) // Worker command shared ptr. #ifndef RPCZ_WORKER_CMD_PTR_HPP #define RPCZ_WORKER_CMD_PTR_HPP #include <boost/shared_ptr.hpp> namespace rpcz { namespace b2w { struct worker_cmd; struct handle_data_cmd; } // namespace b2w typedef boost::shared_ptr<b2w::worker_cmd> worker_cmd_ptr; typedef boost::shared_ptr<b2w::handle_data_cmd> handle_data_cmd_ptr; } // namespace rpcz #endif // RPCZ_WORKER_CMD_PTR_HPP
{ "alphanum_fraction": 0.782778865, "author": null, "avg_line_length": 25.55, "converted": null, "ext": "hpp", "file": null, "hexsha": "5b6088d8a98bf20c1479bd66efcc9c3e21685e7c", "include": null, "lang": "C++", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 3, "max_forks_repo_forks_event_max_datetime": "2018-10-18T11:02:59.000Z", "max_forks_repo_forks_event_min_datetime": "2015-06-15T02:28:39.000Z", "max_forks_repo_head_hexsha": "d273dc1a8de770cb4c2ddee98c17ce60c657d6ca", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "jinq0123/rpcz", "max_forks_repo_path": "src/rpcz/worker/worker_cmd_ptr.hpp", "max_issues_count": 1, "max_issues_repo_head_hexsha": "d273dc1a8de770cb4c2ddee98c17ce60c657d6ca", "max_issues_repo_issues_event_max_datetime": "2015-11-12T10:38:21.000Z", "max_issues_repo_issues_event_min_datetime": "2015-06-19T07:54:53.000Z", "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "jinq0123/rpcz", "max_issues_repo_path": "src/rpcz/worker/worker_cmd_ptr.hpp", "max_line_length": 68, "max_stars_count": 4, "max_stars_repo_head_hexsha": "d273dc1a8de770cb4c2ddee98c17ce60c657d6ca", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "jinq0123/rpcz", "max_stars_repo_path": "src/rpcz/worker/worker_cmd_ptr.hpp", "max_stars_repo_stars_event_max_datetime": "2020-11-07T02:29:59.000Z", "max_stars_repo_stars_event_min_datetime": "2015-06-14T13:38:40.000Z", "num_tokens": 140, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 511 }
""" grd2cpt(cmd0::String="", arg1=[], kwargs...) Make linear or histogram-equalized color palette table from grid Full option list at [`grd2cpt`](http://gmt.soest.hawaii.edu/doc/latest/grd2cpt.html) Parameters ---------- - **A** : **alpha** : **transparency** : -- Str -- Sets a constant level of transparency (0-100) for all color slices. [`-A`](http://gmt.soest.hawaii.edu/doc/latest/grd2cpt.html#a) - $(GMT.opt_C) - **D** : -- Str or [] -- Flags = [i|o] Select the back- and foreground colors to match the colors for lowest and highest z-values in the output CPT. [`-D`](http://gmt.soest.hawaii.edu/doc/latest/grd2cpt.html#d) - **E** : **nlevels** : -- Int or [] -- Flags = [nlevels] Create a linear color table by using the grid z-range as the new limits in the CPT. Alternatively, append nlevels and we will resample the color table into nlevels equidistant slices. [`-E`](http://gmt.soest.hawaii.edu/doc/latest/grd2cpt.html#e) - **F** : **force_rgb** : -- Str or [] -- Flags = [R|r|h|c][+c]] Force output CPT to written with r/g/b codes, gray-scale values or color name. [`-F`](http://gmt.soest.hawaii.edu/doc/latest/grd2cpt.html#f) - **G** : **truncate** : -- Str -- Flags = zlo/zhi Truncate the incoming CPT so that the lowest and highest z-levels are to zlo and zhi. [`-G`](http://gmt.soest.hawaii.edu/doc/latest/grd2cpt.html#g) - **I** : **inverse** : **reverse** : -- Str -- Flags = [c][z] Reverse the sense of color progression in the master CPT. [`-I`](http://gmt.soest.hawaii.edu/doc/latest/grd2cpt.html#i) - **L** : **range** : -- Str -- Flags = minlimit/maxlimit Limit range of CPT to minlimit/maxlimit, and don’t count data outside this range when estimating CDF(Z). [Default uses min and max of data.] [`-L`](http://gmt.soest.hawaii.edu/doc/latest/grd2cpt.html#l) - **M** : **overrule_bg** -- Bool or [] -- Overrule background, foreground, and NaN colors specified in the master CPT with the values of the parameters COLOR_BACKGROUND, COLOR_FOREGROUND, and COLOR_NAN. [`-M`](http://gmt.soest.hawaii.edu/doc/latest/grd2cpt.html#m) - **N** : **no_bg** -- Bool or [] -- Do not write out the background, foreground, and NaN-color fields. - **Q** : **log** : -- Bool or [] -- Selects a logarithmic interpolation scheme [Default is linear]. [`-Q`](http://gmt.soest.hawaii.edu/doc/latest/grd2cpt.html#q) - **C** : **row_col** : -- Bool -- Replace the x- and y-coordinates on output with the corresponding column and row numbers. [`-C`](http://gmt.soest.hawaii.edu/doc/latest/grd2cpt.html#c) - $(GMT.opt_R) - **S** : **steps** : -- Bool or [] or Str -- Flags = zstart/zstop/zinc or n Set steps in CPT. Calculate entries in CPT from zstart to zstop in steps of (zinc). Default chooses arbitrary values by a crazy scheme based on equidistant values for a Gaussian CDF. [`-S`](http://gmt.soest.hawaii.edu/doc/latest/grd2cpt.html#s) - **T** : **symetric** : -- Str -- Flags = -|+|_|= Force the color table to be symmetric about zero (from -R to +R). [`-T`](http://gmt.soest.hawaii.edu/doc/latest/grd2cpt.html#t) - $(GMT.opt_V) - **W** : **wrap** : **categorical** : -- Bool or Str or [] -- Flags = [w] Do not interpolate the input color table but pick the output colors starting at the beginning of the color table, until colors for all intervals are assigned. [`-W`](http://gmt.soest.hawaii.edu/doc/latest/grd2cpt.html#w) - **Z** : **continuous** : -- Bool or [] -- Creates a continuous CPT [Default is discontinuous, i.e., constant colors for each interval]. [`-Z`](http://gmt.soest.hawaii.edu/doc/latest/grd2cpt.html#z) - $(GMT.opt_V) - $(GMT.opt_write) """ function grd2cpt(cmd0::String="", arg1=[]; kwargs...) length(kwargs) == 0 && occursin(" -", cmd0) && return monolitic("grd2cpt", cmd0, arg1) # Speedy mode d = KW(kwargs) cmd, = parse_R("", d) cmd = parse_V_params(cmd, d) cmd, arg1, arg2, = add_opt_cpt(d, cmd, [:C :color :cmap], 'C', 0, arg1) cmd = add_opt(cmd, 'A', d, [:A :alpha :transparency]) cmd = add_opt(cmd, 'D', d, [:D :bg :background]) cmd = add_opt(cmd, 'E', d, [:E :nlevels]) cmd = add_opt(cmd, 'F', d, [:F :force_rgb]) cmd = add_opt(cmd, 'G', d, [:G :truncate]) cmd = add_opt(cmd, 'I', d, [:I :inverse :reverse]) cmd = add_opt(cmd, 'L', d, [:L :limit]) cmd = add_opt(cmd, 'M', d, [:M :overrule_bg]) cmd = add_opt(cmd, 'N', d, [:N :no_bg]) cmd = add_opt(cmd, 'Q', d, [:Q :log]) cmd = add_opt(cmd, 'S', d, [:S :steps]) cmd = add_opt(cmd, 'T', d, [:T :symetric]) cmd = add_opt(cmd, 'W', d, [:W :no_interp]) cmd = add_opt(cmd, 'Z', d, [:Z :continuous]) cmd, got_fname, arg1 = find_data(d, cmd0, cmd, 1, arg1) if (isa(arg1, Array{<:Number})) arg1 = mat2grid(arg1) end return common_grd(d, cmd, got_fname, 1, "grd2cpt", arg1) # Finish build cmd and run it end # --------------------------------------------------------------------------------------------------- grd2cpt(arg1=[], cmd0::String=""; kw...) = grd2cpt(cmd0, arg1; kw...)
{ "alphanum_fraction": 0.6155665025, "author": null, "avg_line_length": 44.5175438596, "converted": null, "ext": "jl", "file": null, "hexsha": "d66acf55d340f000eee524a651958ff3166fc9e7", "include": null, "lang": "Julia", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "53d9c030559fe3f079f3b662ce262e7ed682d357", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "JuliaDocsForks/GMT.jl", "max_forks_repo_path": "src/grd2cpt.jl", "max_issues_count": null, "max_issues_repo_head_hexsha": "53d9c030559fe3f079f3b662ce262e7ed682d357", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "JuliaDocsForks/GMT.jl", "max_issues_repo_path": "src/grd2cpt.jl", "max_line_length": 108, "max_stars_count": null, "max_stars_repo_head_hexsha": "53d9c030559fe3f079f3b662ce262e7ed682d357", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "JuliaDocsForks/GMT.jl", "max_stars_repo_path": "src/grd2cpt.jl", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1549, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 5075 }
import numpy as np import torch import torch.nn.functional as F def cutmix(images, labels, alpha=1.0, beta=1.0, num_classes=10): """ Apply CutMix to a batch of images. Arguments: image (torch.FloatTensor): images. labels (torch.LongTensor): target labels. alpha (float): parameter for cut ratio. beta (float): parameter for cut ratio. num_classes (int): number of target classes. Returns: augmented batch of images and labels. """ batch_size, _, height, width = images.shape labels = F.one_hot(labels, num_classes) lam = np.random.beta(alpha, beta) cut_rat = np.sqrt(1. - lam) cut_w = np.array(width * cut_rat, dtype=np.int32) cut_h = np.array(height * cut_rat, dtype=np.int32) box_coords = _random_box(height, width, cut_h, cut_w) # Adjust lambda. lam = 1. - (box_coords[2] * box_coords[3] / (height * width)) idx = np.random.permutation(batch_size) def _cutmix(x, y): images_a = x images_b = x[idx, :, :, :] y = lam * y + (1. - lam) * y[idx, :] x = _compose_two_images(images_a, images_b, box_coords) return x, y return _cutmix(images, labels) def _random_box(height, width, cut_h, cut_w): """ Return a random box within the image size. """ minval_h = 0 minval_w = 0 maxval_h = height maxval_w = width i = np.random.randint(minval_h, maxval_h, dtype=np.int32) j = np.random.randint(minval_w, maxval_w, dtype=np.int32) bby1 = np.clip(i - cut_h // 2, 0, height) bbx1 = np.clip(j - cut_w // 2, 0, width) h = np.clip(i + cut_h // 2, 0, height) - bby1 w = np.clip(j + cut_w // 2, 0, width) - bbx1 return np.array([bby1, bbx1, h, w]) def _compose_two_images(images, image_permutation, bbox): """ Mix two images. """ def _single_compose_two_images(image1, image2): _, height, width = image1.shape mask = _window_mask(bbox, (height, width)) return image1 * (1. - mask) + image2 * mask new_images = [_single_compose_two_images(image1, image2) for image1, image2 in zip(images, image_permutation)] return torch.stack(new_images, dim=0) def _window_mask(destination_box, size): """ Compute window mask. """ height_offset, width_offset, h, w = destination_box h_range = np.reshape(np.arange(size[0]), [1, size[0], 1]) w_range = np.reshape(np.arange(size[1]), [1, 1, size[1]]) return np.logical_and( np.logical_and(height_offset <= h_range, h_range < height_offset + h), np.logical_and(width_offset <= w_range, w_range < width_offset + w) ).astype(np.float32)
{ "alphanum_fraction": 0.6261160714, "author": null, "avg_line_length": 32.3855421687, "converted": null, "ext": "py", "file": null, "hexsha": "39ad754b3e6254fa6a1de83885da72bfd7b22a7c", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 8, "max_forks_repo_forks_event_max_datetime": "2022-03-10T05:10:06.000Z", "max_forks_repo_forks_event_min_datetime": "2021-11-06T03:43:20.000Z", "max_forks_repo_head_hexsha": "bd82f9781b9f05937e62aff11b75237f1a6f9c88", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "shwinshaker/adversarial_robustness_pytorch", "max_forks_repo_path": "gowal21uncovering/utils/cutmix.py", "max_issues_count": 3, "max_issues_repo_head_hexsha": "bd82f9781b9f05937e62aff11b75237f1a6f9c88", "max_issues_repo_issues_event_max_datetime": "2022-03-24T15:18:26.000Z", "max_issues_repo_issues_event_min_datetime": "2021-10-16T08:51:05.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "shwinshaker/adversarial_robustness_pytorch", "max_issues_repo_path": "gowal21uncovering/utils/cutmix.py", "max_line_length": 114, "max_stars_count": 44, "max_stars_repo_head_hexsha": "bd82f9781b9f05937e62aff11b75237f1a6f9c88", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "shwinshaker/adversarial_robustness_pytorch", "max_stars_repo_path": "gowal21uncovering/utils/cutmix.py", "max_stars_repo_stars_event_max_datetime": "2022-03-31T01:07:26.000Z", "max_stars_repo_stars_event_min_datetime": "2021-07-05T07:30:31.000Z", "num_tokens": 768, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 2688 }
\chapter{Testing} \index{testing} Test jobs are generally contained in the source file of the package that they are testing and are extracted using a cradle composed of the package name appended by the letter T, e.g. {\bf hbookt]} for HBOOK. The tests for {\bf KERNGEN} are contained in a separate PAM file, KERNGENT. \section{VM} The tests are run using e.g. \underline{{\bf SLIB MAKE TEST\_HBOOK}} \section{VMS} The tests are run using e.g. \underline{{\bf TESTPACK HBOOK}} \section{Unix} The tests are run using e.g. \underline{{\bf TESTPACK HBOOK}} \section{List of tests} \begin{DLtt}{1234567890} \item[BVSL] \item[COJETS] \item[EPT1L] \item[EPT1S] \item[EPT2L] \item[EPT2S] \item[EPT3L] \item[EPT3S] \item[EPT4L] \item[EPT4S] \item[EURODEC] \item[FFREAD] \item[FLOP] \item[FORCCR] \item[FRITIOF] \item[G321X1] \item[G321X2] \item[G321X3] \item[G321X4] \item[G321X5] \item[GARFIELD] \item[HBOOK] \item[hptGDDM] \item[hptGKS] \item[hptGL] \item[hptX11] \item[hptgks] \item[hptx11] \item[hztGDDM] \item[hztGKS] \item[hztGL] \item[hztX11] \item[hztgks] \item[hztx11] \item[ISAJET] \item[JETSET74] \item[KAPACK] \item[KERNASW] \item[KERNGEN] \item[KERNNUM] \item[KUIPC] \item[LATTCR] \item[LEPTO] \item[MINUIT] \item[MPA] \item[PAW] \item[PDFLIB] \item[PHOTOS] \item[POISCR] \item[TRIPCR] \item[ZBOOK] \item[zebfc1] \item[zebfc2] \item[zebfc3] \item[zebfz1] \item[zebfz2] \item[zebfz3] \item[zebfz4] \item[zebfz5] \item[zebfz6] \item[zebfz7] \item[zebfz8] \item[zebfz9] \item[zebjz1] \item[zebmz1] \item[zebrz1] \item[zebrz2] \item[zebtlib] \end{DLtt}
{ "alphanum_fraction": 0.7223288548, "author": null, "avg_line_length": 16.4526315789, "converted": null, "ext": "tex", "file": null, "hexsha": "bc37a630ebf847215ec737a79697ac6247c99947", "include": null, "lang": "TeX", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "76048db0ca60708a16661e8494e1fcaa76a83db7", "max_forks_repo_licenses": [ "CC-BY-4.0" ], "max_forks_repo_name": "berghaus/cernlib-docs", "max_forks_repo_path": "install/cerntest.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "76048db0ca60708a16661e8494e1fcaa76a83db7", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC-BY-4.0" ], "max_issues_repo_name": "berghaus/cernlib-docs", "max_issues_repo_path": "install/cerntest.tex", "max_line_length": 68, "max_stars_count": 1, "max_stars_repo_head_hexsha": "76048db0ca60708a16661e8494e1fcaa76a83db7", "max_stars_repo_licenses": [ "CC-BY-4.0" ], "max_stars_repo_name": "berghaus/cernlib-docs", "max_stars_repo_path": "install/cerntest.tex", "max_stars_repo_stars_event_max_datetime": "2019-07-24T12:30:01.000Z", "max_stars_repo_stars_event_min_datetime": "2019-07-24T12:30:01.000Z", "num_tokens": 657, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 1563 }
# coding: utf-8 import numpy as np from scipy.sparse import diags from functools import singledispatch @singledispatch def mm(n_param : int, constraint="inc", dim=0): """Creates the mapping matrix for the constraint P-splines as in Fahrmeir, Regression p.436f, for the constraint. Paramters: ---------- n_param : int - Number of used B-spline basis functions. constraint : str - Type of constraint. Returns: -------- D : matrix - Finite difference matrix of shape (n_param-order x n_param) """ order = 1 if constraint in ["inc", "dec", "peak", "valley"] else 2 assert (n_param > order), "n_param needs to be larger than order!" if order == 1: d1 = np.array([-1*np.ones(n_param),np.ones(n_param)]) D = diags(d1,offsets=[0,1], shape=(n_param-order, n_param)).toarray() elif order == 2: d2 = np.array([np.ones(n_param),-2*np.ones(n_param),np.ones(n_param)]) D = diags(d2,offsets=[0,1,2], shape=(n_param-order, n_param)).toarray() if constraint == "none": D = np.zeros((n_param, n_param)) return D @mm.register def _(n_param : tuple, constraint="inc", dim=0): """Creates the mapping matrix for the constraint tensor-product P-splines as in Fahrmeir, Regression p.508 equation (8.27) for the constraint. Paramters: ---------- n_param : tuple - Numbers of used B-spline basis functions. constraint : str - Type of constraint. dim : int - Indicator for the dimension of the constraint, 0 for dimension 1, 1 for dimension 2, e.g. (10, "inc", 1) means 10 basis functions with increasing constraint in dimension 2 for the two-dimensional data X = [x_1, x_2] Returns: -------- D : matrix - Mapping matrix for the constraint and dimension. """ order = 1 if constraint in ["inc", "dec", "peak", "valley"] else 2 assert (dim in [0, 1]), "Argument 'dim' either 0 or 1." assert (n_param[0] > order and n_param[1] > order), "n_param needs to be larger than order of constraint!" if order == 1: d = np.array([-1*np.ones(n_param[dim]),np.ones(n_param[dim])]) D = diags(d,offsets=[0,1], shape=(n_param[dim]-order, n_param[dim])).toarray() elif order == 2: d = np.array([np.ones(n_param[dim]),-2*np.ones(n_param[dim]),np.ones(n_param[dim])]) D = diags(d,offsets=[0,1,2], shape=(n_param[dim]-order, n_param[dim])).toarray() if dim == 0: Dc = np.kron(np.eye(n_param[dim+1]), D) else: Dc = np.kron(D, np.eye(n_param[dim-1])) if constraint == "none": Dc = np.zeros((np.prod(n_param), np.prod(n_param))) return Dc def check_constraint(coef, constraint="inc", y=None, B=None): """Check whether the coefficients in coef hold true to the constraint for the B-spline coefficients. Parameters: ----------- coef : array - Array of coefficients to test against the constraint. constraint : str - Constraint type. y : array - Target data. B : matrix - B-spline basis matrix. Returns: -------- v : array - Diagonal elements of the weighting matrix V. """ threshold = 1e-4 if constraint not in ["inc", "dec", "conv","conc", "peak", "valley", "none"]: print(f"Constraint '{constraint}'' currently not available.") return if constraint == "inc": v = np.diff(coef) < threshold elif constraint == "dec": v = np.diff(coef) > -threshold elif constraint == "conv": v = np.diff(coef, 2) < threshold elif constraint == "conc": v = np.diff(coef, 2) > -threshold elif constraint == "peak": assert (np.all(y != None) and np.all(B != None)), "Include the output y and B-spline basis matrix B." peakidx = np.argmax(y) peak_spline_idx = np.argmax(B[peakidx,:]) v = list(np.diff(coef[:peak_spline_idx]) < threshold) + [0] + list(np.diff(coef[peak_spline_idx:]) > -threshold) v = np.array(v) elif constraint == "valley": assert (np.all(y != None) and np.all(B != None)), "Include the output y and B-spline basis matrix B." valleyidx = np.argmin(y) valley_spline_idx = np.argmax(B[valleyidx,:]) v = list(np.diff(coef[:valley_spline_idx]) > -threshold) + [0] + list(np.diff(coef[valley_spline_idx:]) < threshold) v = np.array(v) else: v = np.zeros(len(coef)) return v.astype(int) def check_constraint_full_model(model, coef=None, basis=0, y=0): """ Tests the whole model against all constraints given in the model. Parameters: ---------- model : dict - Model dictionary, output from stareg.create_model_from_description. coef : array - If None, uses coef_pls as test coefficients. basis : matrix - Basis matrix to evaluate peak/valley constraint. y : arra - Target data to evaluate peak/valley constraint Returns: -------- W : dict - Keys are the submodels, values are the weight vectors of the submodel. model : dict - Updated model, the weights are changed to be consisted with W. """ W = dict() coef_idx = 0 for submodel in model.keys(): type_ = model[submodel]["type"] len_submodel = len(model[submodel]["coef_pls"]) if coef is not None: test_coef = coef[coef_idx:len_submodel+coef_idx] #ic(test_coef.shape) else: test_coef = model[submodel]["coef_pls"] test_constraints = model[submodel]["constraint"] #ic(test_constraints) if type_.startswith("s"): v = list(check_constraint(test_coef, constraint=test_constraints, y=y, B=model[submodel]["B"])) elif type_.startswith("t"): v1 = list(check_constraint_dim1(test_coef, test_constraints[0], nr_splines=model[submodel]["nr_splines"])) v2 = list(check_constraint_dim2(test_coef, test_constraints[1], nr_splines=model[submodel]["nr_splines"])) v = dict(v1=v1, v2=v2) model[submodel]["weights"] = v W[submodel] = v coef_idx += len_submodel #ic(coef_idx) return W, model def check_constraint_full(coef_, descr, basis=0, y=0): """Checks the respective parts of the coef vector against the respective constraints. Paramters: ---------- coef_ : array - Vector of coefficients. descr : tuple - Model description. Returns: -------- v : list - Diagonal elements of the weighting matrix V. vc : list - Comparable version of the weighting matrix V. """ i, v, vc = 0, [], [] for e in descr: type_, nr_splines, constraints = e[0], e[1], e[2] if type_.startswith("s"): c = coef_[i:int(nr_splines)+i] vc += list(check_constraint(coef=c, constraint=constraints, y=y, B=basis[:, i:int(nr_splines)+i])) v.append(check_constraint(coef=c, constraint=constraints, y=y, B=basis[:, i:int(nr_splines)+i])) elif type_.startswith("t"): c = coef_[i:np.prod(nr_splines)+i] v1 = check_constraint_dim1(coef=c, nr_splines=nr_splines, constraint=constraints[0]) v2 = check_constraint_dim2(coef=c, nr_splines=nr_splines, constraint=constraints[1]) vc += list(v1) vc += list(v2) v.append((v1,v2)) else: print("Only B-splines (s) and tensor-product B-splines (t) are supported!") return i += np.prod(e[1]) return v, vc def check_constraint_dim2(coef, constraint="inc", nr_splines=(6,4)): """Compute the diagonal elements of the weighting matrix for SC-TP-P-splines Compute the diagonal elements of the weighting matrix for SC-TP-P-splines given the constraint for direction 2. According to the scheme given in the Master Thesis !! Parameters: ----------- coef : array - Coefficient vector to test against constraint. constraint : str - Specifies the constraint. nr_splines : list - Specifies the number of splines in each dimension Returns ------- v : array - Diagonal elements of the weighting matrix V. """ if constraint in ["inc", "dec"]: diff = 1 else: diff = 0 v2 = np.zeros(nr_splines[0]*(nr_splines[1]-diff)) for i in range(1, nr_splines[1]): for j in range(1, nr_splines[0]+1): # print(j+(i-1)*nr_splines[0]-1, "->", j+i*nr_splines[0]-1, "-", j+i*nr_splines[0]-nr_splines[0]-1) v2[j+(i-1)*nr_splines[0]-1] = coef[j+i*nr_splines[0]-1] - coef[j+i*nr_splines[0]-nr_splines[0]-1] if constraint == "inc": v2 = v2 < 0 elif constraint == "dec": v2 = v2 > 0 elif constraint == "none": v2 = np.zeros(v2.shape) return v2.astype(int) def check_constraint_dim1(coef, constraint="inc", nr_splines=(6,4)): """Compute the diagonal elements of the weighting matrix for SC-TP-P-splines Compute the diagonal elements of the weighting matrix for SC-TP-P-splines given the constraint for direction 1. According to the scheme given in the Master Thesis !! Parameters: ----------- coef : array - Coefficient vector to test against constraint. constraint : str - Specifies the constraint. nr_splines : list - Specifies the number of splines in each dimension Returns ------- v : array - Diagonal elements of the weighting matrix V. """ if constraint in ["inc", "dec"]: diff = 1 else: diff = 0 # first constraint in dim 1 v1 = np.zeros((nr_splines[0]-diff)*nr_splines[1]) for i in range(1,nr_splines[1]+1): for j in range(nr_splines[0]-1): # print(j+(i-1)*(nr_splines[0]-1), "->", j+(i-1)*nr_splines[0] + 1, "-", j+(i-1)*nr_splines[0]) v1[j+(i-1)*(nr_splines[0]-1)] = coef[j+(i-1)*nr_splines[0] + 1] - coef[j+(i-1)*nr_splines[0]] if constraint == "inc": v1 = v1 < 0 elif constraint == "dec": v1 = v1 > 0 elif constraint == "none": v1 = np.zeros(v1.shape) return v1.astype(int)
{ "alphanum_fraction": 0.5950261273, "author": null, "avg_line_length": 38.2740740741, "converted": null, "ext": "py", "file": null, "hexsha": "1bb8ebd06dd3c1796617ab462ae17f41e2f4690d", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "e81eae3c72a452a820988f56730f1bcb72a22b65", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "j-cap/stareg", "max_forks_repo_path": "stareg/utils.py", "max_issues_count": 1, "max_issues_repo_head_hexsha": "e81eae3c72a452a820988f56730f1bcb72a22b65", "max_issues_repo_issues_event_max_datetime": "2020-07-20T17:15:15.000Z", "max_issues_repo_issues_event_min_datetime": "2020-07-20T17:15:15.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "j-cap/stareg", "max_issues_repo_path": "stareg/utils.py", "max_line_length": 124, "max_stars_count": 1, "max_stars_repo_head_hexsha": "e81eae3c72a452a820988f56730f1bcb72a22b65", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "j-cap/stareg", "max_stars_repo_path": "stareg/utils.py", "max_stars_repo_stars_event_max_datetime": "2022-01-03T19:37:47.000Z", "max_stars_repo_stars_event_min_datetime": "2022-01-03T19:37:47.000Z", "num_tokens": 2826, "path": null, "reason": "import numpy,from scipy", "repo": null, "save_path": null, "sha": null, "size": 10334 }
// Boost.Geometry (aka GGL, Generic Geometry Library) // Copyright (c) 2007-2012 Barend Gehrels, Amsterdam, the Netherlands. // This file was modified by Oracle on 2017. // Modifications copyright (c) 2017 Oracle and/or its affiliates. // Contributed and/or modified by Adam Wulkiewicz, on behalf of Oracle // Use, modification and distribution is subject to the Boost Software License, // Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) #ifndef BOOST_GEOMETRY_ALGORITHMS_DETAIL_OVERLAY_GET_INTERSECTION_POINTS_HPP #define BOOST_GEOMETRY_ALGORITHMS_DETAIL_OVERLAY_GET_INTERSECTION_POINTS_HPP #include <cstddef> #include <boost/mpl/if.hpp> #include <boost/range.hpp> #include <boost/geometry/algorithms/convert.hpp> #include <boost/geometry/algorithms/detail/overlay/get_turns.hpp> #include <boost/geometry/policies/robustness/rescale_policy_tags.hpp> #include <boost/geometry/geometries/segment.hpp> namespace boost { namespace geometry { #ifndef DOXYGEN_NO_DETAIL namespace detail { namespace get_intersection_points { template < typename Point1, typename Point2, typename TurnInfo > struct get_turn_without_info { template < typename UniqueSubRange1, typename UniqueSubRange2, typename Strategy, typename RobustPolicy, typename OutputIterator > static inline OutputIterator apply(UniqueSubRange1 const& range_p, UniqueSubRange2 const& range_q, TurnInfo const& , Strategy const& strategy, RobustPolicy const& , OutputIterator out) { // Make sure this is only called with no rescaling BOOST_STATIC_ASSERT((boost::is_same < no_rescale_policy_tag, typename rescale_policy_type<RobustPolicy>::type >::value)); typedef typename TurnInfo::point_type turn_point_type; typedef policies::relate::segments_intersection_points < segment_intersection_points<turn_point_type> > policy_type; typename policy_type::return_type const result = strategy.apply(range_p, range_q, policy_type()); for (std::size_t i = 0; i < result.count; i++) { TurnInfo tp; geometry::convert(result.intersections[i], tp.point); *out++ = tp; } return out; } }; }} // namespace detail::get_intersection_points #endif // DOXYGEN_NO_DETAIL template < typename Geometry1, typename Geometry2, typename RobustPolicy, typename Turns, typename Strategy > inline void get_intersection_points(Geometry1 const& geometry1, Geometry2 const& geometry2, RobustPolicy const& robust_policy, Turns& turns, Strategy const& strategy) { concepts::check_concepts_and_equal_dimensions<Geometry1 const, Geometry2 const>(); typedef detail::get_intersection_points::get_turn_without_info < typename point_type<Geometry1>::type, typename point_type<Geometry2>::type, typename boost::range_value<Turns>::type > TurnPolicy; detail::get_turns::no_interrupt_policy interrupt_policy; boost::mpl::if_c < reverse_dispatch<Geometry1, Geometry2>::type::value, dispatch::get_turns_reversed < typename tag<Geometry1>::type, typename tag<Geometry2>::type, Geometry1, Geometry2, false, false, TurnPolicy >, dispatch::get_turns < typename tag<Geometry1>::type, typename tag<Geometry2>::type, Geometry1, Geometry2, false, false, TurnPolicy > >::type::apply( 0, geometry1, 1, geometry2, strategy, robust_policy, turns, interrupt_policy); } }} // namespace boost::geometry #endif // BOOST_GEOMETRY_ALGORITHMS_DETAIL_OVERLAY_GET_INTERSECTION_POINTS_HPP
{ "alphanum_fraction": 0.6309495897, "author": null, "avg_line_length": 27.8758169935, "converted": null, "ext": "hpp", "file": null, "hexsha": "4b0e9ed794a52b9baac5fbc21616e55663284943", "include": null, "lang": "C++", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 15, "max_forks_repo_forks_event_max_datetime": "2022-02-12T18:53:47.000Z", "max_forks_repo_forks_event_min_datetime": "2019-05-11T04:03:35.000Z", "max_forks_repo_head_hexsha": "38c6c8c07f2fcc42d573b10807fef27ec14930f8", "max_forks_repo_licenses": [ "BSL-1.0" ], "max_forks_repo_name": "cpp-pm/boost", "max_forks_repo_path": "boost/geometry/algorithms/detail/overlay/get_intersection_points.hpp", "max_issues_count": 157, "max_issues_repo_head_hexsha": "38c6c8c07f2fcc42d573b10807fef27ec14930f8", "max_issues_repo_issues_event_max_datetime": "2022-03-31T21:50:28.000Z", "max_issues_repo_issues_event_min_datetime": "2019-02-06T05:04:20.000Z", "max_issues_repo_licenses": [ "BSL-1.0" ], "max_issues_repo_name": "cpp-pm/boost", "max_issues_repo_path": "boost/geometry/algorithms/detail/overlay/get_intersection_points.hpp", "max_line_length": 86, "max_stars_count": 101, "max_stars_repo_head_hexsha": "38c6c8c07f2fcc42d573b10807fef27ec14930f8", "max_stars_repo_licenses": [ "BSL-1.0" ], "max_stars_repo_name": "cpp-pm/boost", "max_stars_repo_path": "boost/geometry/algorithms/detail/overlay/get_intersection_points.hpp", "max_stars_repo_stars_event_max_datetime": "2022-03-14T14:14:38.000Z", "max_stars_repo_stars_event_min_datetime": "2019-02-12T12:53:18.000Z", "num_tokens": 881, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 4265 }
''' 1. normalize by mean of each diagonal - args: mat: numpy.ndarray norm = normalizebydistance(mat) 2. calculate correlation between each column corr = correlation(norm, method='pearson', center=True) 3. perform eigenvalue decomposition - args: nc: number of eigenvectors returned pc = decomposition(corr, method='eig', nc=nc) 4. plot AB compartment fig = plot(corr, pc) fig.savefig(...) ''' import os import argparse import numpy as np import matplotlib.pyplot as plt from scipy.stats import spearmanr from scipy.stats import pearsonr from scipy.linalg import eig from sklearn.decomposition import PCA from mpl_toolkits.axes_grid1 import make_axes_locatable from scipy.optimize import curve_fit from scipy.spatial.distance import pdist, squareform def fill_diagonal(mat, k, val): ''' Fill the k-th diagonal of the given 2-d square array. :param mat: array. :param k: int. if positive, above the main diagonal, else, below the main diagonal. :param val: scalar. ''' if mat.ndim != 2 or mat.shape[0] != mat.shape[1]: raise ValueError("mat should be a 2-d square array.") n = mat.shape[1] if abs(k) > n - 1: raise ValueError("k should not larger than n-1.") if k >= 0: start = k end = n * n - k * n else: start = (-k) * n end = n * n + k step = n + 1 mat.flat[start:end:step] = val def normalizebydistance_(mat): x = np.zeros_like(mat, dtype=float) n = x.shape[0] margin = mat.sum(axis=0) # fill diagonal with np.nan np.fill_diagonal(x, np.nan) # fill the first diagonal with np.nan if all zeros if np.nansum(np.diagonal(mat, offset=1)) == 0: fill_diagonal(x, k=1, val=np.nan) fill_diagonal(x, k=-1, val=np.nan) # fill row/col with np.nan if all zeros x[margin==0, :] = np.nan x[:, margin==0] = np.nan diagmean = np.ones(n, dtype=float) for d in range(1, n): diag = np.diagonal(mat, offset=d) m = np.nanmean(diag) if ~np.isnan(m) and m > 0: diagmean[d] = m for i in range(n): for j in range(i+1, n): x[i, j] = mat[i, j] / diagmean[abs(i-j)] x[j, i] = x[i, j] return x def fit_genomic_spatial_func(x, a, b): return (x**b)/a def normalizebydistance(mat, genomic_index=None): # mtype='3d' or 'fish' if genomic_index is None: return normalizebydistance_(mat) gen_idx = np.zeros(len(genomic_index)) for i, index in enumerate(genomic_index): gen_idx[i] = np.nanmean(index) genomic_dis = pdist(gen_idx.reshape(-1,1)) msku = np.zeros_like(mat) msku[np.triu_indices_from(msku, k=1)] = True triu_mat = mat[msku.astype(bool)].flatten() popt, pcov = curve_fit(fit_genomic_spatial_func, genomic_dis, triu_mat) print('power law parameters: ', popt) a = fit_genomic_spatial_func(genomic_dis, *popt) expected_triu = squareform(a) np.fill_diagonal(expected_triu, 1) res = mat.astype(float)/expected_triu.astype(float) return res, popt def centering(mat): x = np.array(mat, copy=True, dtype=float) n = x.shape[0] # substract row mean from each row for i in range(n): m = np.nanmean(x[i, :]) x[i, :] = x[i, :] - m return x """def correlation(mat, method='pearson', center=True): x = np.array(mat, copy=True, dtype=float) n = x.shape[0] # substract row mean from each row if center: # for i in range(n): # m = np.nanmean(x[i, :]) # x[i, :] = x[i, :] - m m = np.nanmean(x, axis=1, keepdims=True) x = x - m # corr = np.zeros((n, n), dtype=float) # for i in range(n): # for j in range(i, n): # nas = np.logical_or(np.isnan(x[:, i]), np.isnan(x[:, j])) # if method == 'spearman': # corr[i, j], _ = spearmanr(x[~nas, i], x[~nas, j]) # else: # corr[i, j], _ = pearsonr(x[~nas, i], x[~nas, j]) # corr[j, i] = corr[i, j] pearson = 1 - pdist(x, 'correlation') corr = squareform(pearson).astype(float) # keep NAs # corr[np.isnan(corr)] = 0 return corr""" def correlation(mat, center=True): # 'pearson' correlation x = np.array(mat, copy=True, dtype=float) n = x.shape[0] if center: m = np.nanmean(x, axis=1, keepdims=True) x = x - m pearson = 1 - pdist(x, 'correlation') corr = squareform(pearson).astype(float) return corr def decomposition(mat, method='eig', nc=2): # remove row/col with NAs n = mat.shape[0] nas = (~np.isnan(mat)).sum(axis=0) == 0 if method == 'eig': _, v = eig(mat[~nas, :][:, ~nas]) eigenvec = np.full((n, nc), np.nan, dtype=float) eigenvec[~nas, :] = v[:, :nc] return eigenvec else: pca = PCA(n_components=nc) # nas = np.isnan(mat).sum(axis=0) > 0 pc = pca.fit_transform(mat[~nas, :][:, ~nas]) eigenvec = np.full((n, nc), np.nan, dtype=float) eigenvec[~nas, :] = pc return eigenvec def plot(mat, pc, title=None, start=0, locs=None, figure=(5, 5) ): fig = plt.figure(figsize=figure) axmatrix = plt.subplot(111) n, nc = pc.shape im = axmatrix.matshow(mat, vmin=-1, vmax=1, cmap='bwr') if title is not None: plt.title(title) divider = make_axes_locatable(axmatrix) cax = divider.append_axes("right", size="5%", pad=0.05) plt.colorbar(im, cax=cax) for i in range(nc): cax = divider.append_axes("bottom", size="6%", pad=0.25) cax.set_xlim([start, start+n]) cax.set_yticks([0]) rg = np.arange(start, start+n) pos = pc[:, i] > 0 neg = pc[:, i] < 0 cax.bar(rg[pos], pc[pos, i], color='red', width=1) cax.bar(rg[neg], pc[neg, i], color='blue', width=1) if locs is not None: for loc in locs: if loc[0] >= start and loc[0] < start+n: cax.axvspan(loc[0], min(loc[1]+1, start+n-1), facecolor='black', alpha=0.3) return fig
{ "alphanum_fraction": 0.5816811404, "author": null, "avg_line_length": 31.2974358974, "converted": null, "ext": "py", "file": null, "hexsha": "133960b0ef13c81e7a5410ddfd4810463c3d7693", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "8b22675827c3df34759424d0b3dd776ad7466775", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "wmalab/GIST", "max_forks_repo_path": "validation/ab.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "8b22675827c3df34759424d0b3dd776ad7466775", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "wmalab/GIST", "max_issues_repo_path": "validation/ab.py", "max_line_length": 95, "max_stars_count": null, "max_stars_repo_head_hexsha": "8b22675827c3df34759424d0b3dd776ad7466775", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "wmalab/GIST", "max_stars_repo_path": "validation/ab.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1840, "path": null, "reason": "import numpy,from scipy", "repo": null, "save_path": null, "sha": null, "size": 6103 }
import os import numpy as np import pandas as pd from collections import defaultdict from tensorboard.backend.event_processing.event_accumulator import EventAccumulator def tabulate_events(dpath): summary_iterators = [EventAccumulator(os.path.join(dpath, dname)).Reload() for dname in os.listdir(dpath) if dname.startswith('events')] assert len(summary_iterators) == 1 tags = set(*[si.Tags()['scalars'] for si in summary_iterators]) out = defaultdict(list) steps = [] for tag in tags: steps = [e.step for e in summary_iterators[0].Scalars(tag)] for events in zip(*[acc.Scalars(tag) for acc in summary_iterators]): assert len(set(e.step for e in events)) == 1 out[tag].append([e.value for e in events]) return out, steps def to_csv(dpath): dirs = os.listdir(dpath) d, steps = tabulate_events(dpath) tags, values = zip(*d.items()) np_values = np.array(values) df = pd.DataFrame(dict((f"{tags[i]}", np_values[i][:, 0]) for i in range(np_values.shape[0])), index=steps, columns=tags) df.to_csv(os.path.join(dpath, "logger.csv")) def read_event(path): to_csv(path) return pd.read_csv(os.path.join(path, "logger.csv"), index_col=0) def empty_dir(folder): if os.path.exists(folder): for filename in os.listdir(folder): file_path = os.path.join(folder, filename) try: if os.path.isfile(file_path) or os.path.islink(file_path): os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: print('Failed to delete %s. Reason: %s' % (file_path, e))
{ "alphanum_fraction": 0.6419036564, "author": null, "avg_line_length": 38.2888888889, "converted": null, "ext": "py", "file": null, "hexsha": "e013fa846f3dab48cba955c8f84ab5aa0c7ee097", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2022-01-16T11:35:45.000Z", "max_forks_repo_forks_event_min_datetime": "2022-01-16T11:35:45.000Z", "max_forks_repo_head_hexsha": "1b5077342756ba6dc587a2af49abd2451319e5df", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "super-dainiu/DATA130007.01-Community-Detection-Link-Prediction-and-Node-Classification-on-Ego-Facebook-and-Cites", "max_forks_repo_path": "node-classification/SSP/ssp-master/experiments/utils.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "1b5077342756ba6dc587a2af49abd2451319e5df", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "super-dainiu/DATA130007.01-Community-Detection-Link-Prediction-and-Node-Classification-on-Ego-Facebook-and-Cites", "max_issues_repo_path": "node-classification/SSP/ssp-master/experiments/utils.py", "max_line_length": 140, "max_stars_count": 3, "max_stars_repo_head_hexsha": "1b5077342756ba6dc587a2af49abd2451319e5df", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Super-Dainiu/DATA130007.01-Community-Detection-Link-Prediction-and-Node-Classification-on-Ego-Facebook-and-Cites", "max_stars_repo_path": "node-classification/SSP/ssp-master/experiments/utils.py", "max_stars_repo_stars_event_max_datetime": "2022-01-14T08:36:02.000Z", "max_stars_repo_stars_event_min_datetime": "2021-07-04T04:32:33.000Z", "num_tokens": 418, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 1723 }
using MeshArrays, OceanStateEstimation, NetCDF p=dirname(pathof(MeshArrays)) include(joinpath(p,"../examples/Demos.jl")) """ random_flow_field(;np=12,nq=18) Set up a random flow field over a gridded domain of size np,nq ``` ϕ,u,v=random_flow_field() ``` """ function random_flow_field(;np=12,nq=18) Γ=simple_periodic_domain(np,nq) (_,ϕ,_,_)=demo2(Γ) ϕ .*= 0.5 #For the convergent / scalar potential case, ϕ is interpreted as being #on center points -- hence the standard gradient function readily gives #what we need #(u,v)=gradient(ϕ,Γ) #return u[1],v[1],ϕ[1] #For the rotational / streamfunction case, ϕ is interpreted as being #on S/W corner points -- this is ok since the grid is homegeneous, #and conveniently yields an adequate substitution u,v <- -v,u; but note #that doing the same with gradient() would shift indices inconsistenly u=-(circshift(ϕ[1], (0,-1))-ϕ[1]) v=(circshift(ϕ[1], (-1,0))-ϕ[1]) return u,v,ϕ[1] end """ solid_body_rotation(np,nz) Set up an idealized flow field which consists of [rigid body rotation](https://en.wikipedia.org/wiki/Rigid_body), plus a convergent term, plus a sinking term. ``` u,v,w=solid_body_rotation(12,4) ``` """ function solid_body_rotation(np,nz) Γ=simple_periodic_domain(np); γ=Γ.XC.grid; #Solid-body rotation around central location ... i=Int(np/2+1) u=-(Γ.YG.-Γ.YG[1][i,i]) v=(Γ.XG.-Γ.XG[1][i,i]) #... plus a convergent term to / from central location d=-0.01 u=u+d*(Γ.XG.-Γ.XG[1][i,i]) v=v+d*(Γ.YG.-Γ.YG[1][i,i]) #Replicate u,v in vertical dimension uu=MeshArray(γ,γ.ioPrec,nz) [uu[k]=u[1] for k=1:nz] vv=MeshArray(γ,γ.ioPrec,nz) [vv[k]=v[1] for k=1:nz] #Vertical velocity component w w=fill(-0.01,MeshArray(γ,γ.ioPrec,nz+1)) return write(uu),write(vv),write(w) end """ global_ocean_circulation(;k=10,ny=2) Set up Global Ocean particle simulation in 2D with seasonally varying flow field. ``` 𝑃,𝐷=global_ocean_circulation(k=10,ny=2); ``` """ function global_ocean_circulation(;k=1,ny=2) #k=10 #choice of vertical level #ny=2 #number of simulated years (20 for k>20) r_reset = 0.01 #fraction of the particles reset per month (0.05 for k<=10) #read grid and set up connections between subdomains p=dirname(pathof(IndividualDisplacements)) γ=GridSpec("LatLonCap",MeshArrays.GRID_LLC90) Γ=GridLoad(γ) Γ=merge(Γ,NeighborTileIndices_cs(Γ)) func=(u -> update_location_llc!(u,𝐷)) Γ=merge(Γ,(; update_location! = func)) #initialize u0,u1 etc 𝑃,𝐷=set_up_FlowFields(k,Γ,ECCOclim_path); #add parameters for use in reset! tmp=(frac=r_reset, Γ=Γ) 𝐷=merge(𝐷,tmp) return 𝑃,𝐷 end """ OCCA_FlowFields(;backward_in_time::Bool=false,nmax=Inf) Define gridded variables and return result as NamedTuple """ function OCCA_FlowFields(;backward_in_time::Bool=false,nmax=Inf) γ=GridSpec("PeriodicChannel",MeshArrays.GRID_LL360) Γ=GridLoad(γ) n=length(Γ.RC) isfinite(nmax) ? n=min(n,Int(nmax)) : nothing g=Γ.XC.grid func=(u -> IndividualDisplacements.update_location_dpdo!(u,g)) jj=[:hFacC, :hFacW, :hFacS, :DXG, :DYG, :RAC, :RAZ, :RAS] ii=findall([!in(i,jj) for i in keys(Γ)]) Γ=(; zip(Symbol.(keys(Γ)[ii]), values(Γ)[ii])...) backward_in_time ? s=-1.0 : s=1.0 s=Float32(s) function rd(filename, varname,n) fil = NetCDF.open(filename, varname) siz = size(fil) tmp = zeros(siz[1:2]...,n) [tmp .+= fil[:,:,1:n,t] for t=1:12] tmp ./= 12.0 tmp[findall(tmp.<-1e22)] .= 0.0 return tmp end fileIn=OCCAclim_path*"DDuvel.0406clim.nc" u=s*read(rd(fileIn,"u",n),MeshArray(γ,Float32,n)) fileIn=OCCAclim_path*"DDvvel.0406clim.nc" v=s*read(rd(fileIn,"v",n),MeshArray(γ,Float32,n)) fileIn=OCCAclim_path*"DDwvel.0406clim.nc" w=s*rd(fileIn,"w",n) w=-cat(w,zeros(360, 160),dims=3) w[:,:,1] .=0.0 w=read(w,MeshArray(γ,Float32,n+1)) fileIn=OCCAclim_path*"DDtheta.0406clim.nc" θ=read(rd(fileIn,"theta",n),MeshArray(γ,Float32,n)) # fileIn=OCCAclim_path*"DDsalt.0406clim.nc" # 𝑆=read(rd(fileIn,"salt",n),MeshArray(γ,Float64,n)) for i in eachindex(u) u[i]=u[i]./Γ.DXC[1] v[i]=v[i]./Γ.DYC[1] end for i in eachindex(u) u[i]=circshift(u[i],[-180 0]) v[i]=circshift(v[i],[-180 0]) θ[i]=circshift(θ[i],[-180 0]) # 𝑆[i]=circshift(𝑆[i],[-180 0]) end for i in eachindex(w) w[i]=w[i]./Γ.DRC[min(i[2]+1,n)] w[i]=circshift(w[i],[-180 0]) end tmpx=circshift(Γ.XC[1],[-180 0]) tmpx[1:180,:]=tmpx[1:180,:] .- 360.0 Γ.XC[1]=tmpx tmpx=circshift(Γ.XG[1],[-180 0]) tmpx[1:180,:]=tmpx[1:180,:] .- 360.0 Γ.XG[1]=tmpx Γ.Depth[1]=circshift(Γ.Depth[1],[-180 0]) t0=0.0; t1=86400*366*2.0; for k=1:n (tmpu,tmpv)=exchange(u[:,k],v[:,k],1) u[:,k]=tmpu v[:,k]=tmpv end for k=1:n+1 tmpw=exchange(w[:,k],1) w[:,k]=tmpw end 𝑃=FlowFields(u,u,v,v,w,w,[t0,t1],func) 𝐷 = (θ0=θ, θ1=θ, XC=exchange(Γ.XC), YC=exchange(Γ.YC), RF=Γ.RF, RC=Γ.RC,ioSize=(360,160,n)) return 𝑃,𝐷,Γ end """ test1_setup() Call `gcmgrid`, initialize a single point, rely on `dxdt!`, and just output `sol` at the end. ``` using IndividualDisplacements, MeshArrays, OrdinaryDiffEq 𝑃,sol=test1_setup() ``` """ function test1_setup() mygrid=gcmgrid("flt_example/","ll",1,[(80,42)], [80 42], Float32, read, write) XC=MeshArray(mygrid,Float32); XC[1]=vec(2500.:5000.:397500.0)*ones(1,42); XG=MeshArray(mygrid,Float32); XG[1]=vec(0.:5000.:395000.0)*ones(1,42); YC=MeshArray(mygrid,Float32); YC[1]=ones(80,1)*transpose(vec(2500.:5000.:207500.0)); YG=MeshArray(mygrid,Float32); YG[1]=ones(80,1)*transpose(vec(0.:5000.:205000.0)); dx=5000.0 t0=0.0; t1=18001.0*3600.0 u=-(YG.-YC[1][40,21])/2000000. v=(XG.-XC[1][40,21])/2000000. u0=u./dx; u1=u./dx v0=v./dx; v1=v./dx 𝑃=FlowFields(u0[1], u1[1], v0[1], v1[1], [t0,t1]) u0=[200000.0;0.0]./dx du=fill(0.0,2); prob = ODEProblem(dxdt!,u0,[0.0,2998*3600.0],𝑃) sol = solve(prob,Tsit5(),reltol=1e-8,abstol=1e-8) return 𝑃,sol end """ test2_periodic_domain(np = 12, nq = 12) Call `simple_periodic_domain`, initialize 6x6 point cloud, rely on `dxdt!`, and call `postprocess_xy` at the end. ``` using IndividualDisplacements, MeshArrays, OrdinaryDiffEq df,𝑃=test2_periodic_domain() using Plots @gif for t in 𝑃.t0:1.0:𝑃.t1 scatter_subset(𝑃,df,t) end ``` """ function test2_periodic_domain(np = 12, nq = 12) #domain and time parameters Γ = simple_periodic_domain(np, nq) u = 0.1 ./ Γ.DXC v = 0.3 ./ Γ.DYC (u, v) = exchange(u, v, 1) f = (u -> IndividualDisplacements.update_location_dpdo!(u,Γ.XC.grid)) 𝑃=FlowFields(u,u,v,v,[0.0,400.0],f) #initial conditions x0 = np * (0.4:0.04:0.6) y0 = nq * (0.4:0.04:0.6) x0 = vec(x0) * ones(1, length(y0)) y0 = ones(size(x0, 1), 1) * transpose(vec(y0)) u0 = permutedims([[x0[i];y0[i];1.0] for i in eachindex(x0)]) du=0*u0 #solve for trajectories prob = ODEProblem(dxdt!, u0, 𝑃.𝑇, 𝑃) sol = solve(prob,Euler(),dt=0.1) return postprocess_xy(sol, 𝑃),𝑃 end
{ "alphanum_fraction": 0.6327186365, "author": null, "avg_line_length": 25.3829787234, "converted": null, "ext": "jl", "file": null, "hexsha": "e9a15ac8d09950fa2715d62b3b1d718a6c885cc8", "include": null, "lang": "Julia", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "ffa890fd25897ae0d32af4c67741fab0b086592e", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "hdrake/IndividualDisplacements.jl", "max_forks_repo_path": "examples/flow_fields.jl", "max_issues_count": null, "max_issues_repo_head_hexsha": "ffa890fd25897ae0d32af4c67741fab0b086592e", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "hdrake/IndividualDisplacements.jl", "max_issues_repo_path": "examples/flow_fields.jl", "max_line_length": 88, "max_stars_count": null, "max_stars_repo_head_hexsha": "ffa890fd25897ae0d32af4c67741fab0b086592e", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "hdrake/IndividualDisplacements.jl", "max_stars_repo_path": "examples/flow_fields.jl", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2841, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 7158 }
import argparse import numpy as np import os import matplotlib.pyplot as plt import matplotlib as mpl import pandas mpl.use('Agg') """IMPORTANT""" # NOTE: this script assumes it is run in DReyeVR/Diagnostics/ assert("Diagnostics" in os.getcwd()) def plot_many_versus(data_x, data_ys, units="", name_x="X", name_y="Y", trim=(0, 0), points=True, lines=False, colours=["r", "g", "b", "c", "m", "y", "k"], xbounds=None, ybounds=None, dir_path="results", t_carla=(None, None)): # trim the starts and end of data trim_start, trim_end = trim max_len = min(len(data_x), len(data_ys[0])) data_x = data_x[trim_start:max_len - trim_end] for data_y in data_ys: data_y = data_y[trim_start:max_len - trim_end] # create a figure that is 6in x 6in fig = plt.figure() # the axis limits and grid lines plt.grid(True) units_str = " (" + units + ")" if units != "" else "" trim_str = " [" + str(trim_start) + ", " + str(trim_end) + "]" # label your graph, axes, and ticks on each axis plt.xlabel(name_x + units_str, fontsize=16) plt.ylabel(name_y + units_str, fontsize=16) if ybounds: plt.ylim(ybounds) if xbounds: plt.xlim(xbounds) plt.xticks() plt.yticks() plt.tick_params(labelsize=15) if(name_x == ""): plt.title(name_y + trim_str, fontsize=18) else: plt.title(name_x + " versus " + name_y + trim_str, fontsize=18) # plot data for i in range(len(data_ys)): data_y = data_ys[i] colour = colours[i % len(colours)] if points: plt.plot(data_x, data_y, colour + "o") if lines: plt.plot(data_x, data_y, color=colour, linewidth=1) # add lines for carla starting/ending t_carla_start, t_carla_end = t_carla # plot time when carla starts if(t_carla_start is not None): y = np.arange(int(np.max(data_ys)) + 5) x = np.ones_like(y) * t_carla_start plt.plot(x, y, color='c', linewidth=1) # plot time when carla starts if(t_carla_end is not None): y = np.arange(np.max(data_ys) + 5) x = np.ones_like(y) * t_carla_end plt.plot(x, y, color='c', linewidth=1) # complete the layout, save figure, and show the figure for you to see plt.tight_layout() # make file and save to disk if not os.path.exists(os.path.join(os.getcwd(), dir_path)): os.makedirs(dir_path, exist_ok=True) filename = name_x + "_vs_" + name_y + '.png' if name_x != "" else name_y + ".png" fig.savefig(os.path.join(dir_path, filename)) plt.close(fig) print("Plotted", filename) def read_from_file(datadir, filename): print("Looking for", filename) if not os.path.exists(os.path.join(os.getcwd(), datadir)): print("ERROR: could not find", datadir, "in cwd") os._exit(1) filepath = os.path.join(os.getcwd(), datadir, filename) with open(filepath, "r") as file: # the command 'collectl -sCm -oT -P -i 0.5 --sep ","' prints # and if closed via ctrl+C, outputs "Ouch!" at the end # simple fix: skip first line (header) and last line (footer) df = pandas.read_csv(file, skipfooter=1, engine='python') print("Successfully found and parsed", filename) return df def convert_to_seconds(data_str): arr = [] for x in data_str: (h, m, s) = x.split(':') t = int(h) * (60*60) + int(m) * 60 + float(s) arr.append(t) return np.array(arr) def get_cpu_data_from(df, bounds): start, end = bounds cpus = [] for i in range(start, end, 1): cpu_str = "[CPU:" + str(i) + "]Totl%" cpus.append(df[cpu_str].to_numpy()) return cpus def main(sys_data_df, dir_path): # get system time sys_time = sys_data_df["Time"].to_numpy() sys_time = convert_to_seconds(sys_time) # get cpu usages cpu_usages = get_cpu_data_from(sys_data_df, (0, 7)) time = sys_time - sys_time[0] # zero out (relative time) # time when carla started/ended, the run_collectl.sh makes entire columns for these t_start_str = sys_data_df["[CARLA]t_start"].to_numpy()[0] t_end_str = sys_data_df["[CARLA]t_end"].to_numpy()[0] t_carla_start = convert_to_seconds([t_start_str])[0] - sys_time[0] t_carla_end = convert_to_seconds([t_end_str])[0] - sys_time[0] t_carla = (t_carla_start, t_carla_end) average_cpu = np.mean(cpu_usages, axis=0) # plot carla fps carla_fps = sys_data_df["[CARLA]Fps"].to_numpy() carla_fps = carla_fps[~np.isnan(carla_fps)] # strip away the np.nan's # pad the carla fps data to start when the t_carla_end occurs start_pads = len(time) - len(carla_fps) - len(time[time > t_carla_end]) carla_fps = np.pad(carla_fps, (start_pads, 0), 'constant', constant_values=0) plot_many_versus(time, [carla_fps], name_x="time", name_y="fps", dir_path=dir_path, lines=True, points=False, t_carla=t_carla) # plot individual core usage plot_many_versus(time, cpu_usages, name_x="time", name_y="cores", dir_path=dir_path, lines=True, points=False, t_carla=t_carla) # plot cpu average plot_many_versus(time, [average_cpu], name_x="time", name_y="avg_cpu", dir_path=dir_path, lines=True, points=False, t_carla=t_carla) # plot memory usage mem_usage = sys_data_df["[MEM]Used"].to_numpy() / 1000000 #sys_data_df["[MEM]Tot"].to_numpy() - sys_data_df["[MEM]Free"].to_numpy() plot_many_versus(time, [mem_usage], name_x="time", name_y="mem_usage", dir_path=dir_path, lines=True, points=False, t_carla=t_carla) # plot gpu usage gpu_usage = sys_data_df["[NVIDIA]Gpu"].to_numpy() plot_many_versus(time, [gpu_usage], name_x="time", name_y="gpu_usage", dir_path=dir_path, lines=True, points=False, t_carla=t_carla) # plot gpu memory gpu_memory = sys_data_df["[NVIDIA]Mem"].to_numpy() plot_many_versus(time, [gpu_memory], name_x="time", name_y="gpu_memory", dir_path=dir_path, lines=True, points=False, t_carla=t_carla) print("Successfully plotted everything to", dir_path) def get_from_data(data_folder, filename): datadir = os.path.join(os.getcwd(), "data", data_folder) # output directory dir_path = os.path.join(os.getcwd(), "results", data_folder) # read from csv sys_data_df = read_from_file(datadir, filename) return sys_data_df, dir_path if __name__ == '__main__': print("\nPlotting data:\n") argparser = argparse.ArgumentParser(description=__doc__) argparser.add_argument( '-d', '--dir', metavar='D', default="collectl", # cwd type=str, help='data directory name for outputs') args = argparser.parse_args() data_folder = args.dir output_path = os.path.join("results") sys_data_df, dir_path = get_from_data(data_folder=data_folder, filename="combined.csv") main(sys_data_df, dir_path)
{ "alphanum_fraction": 0.616079989, "author": null, "avg_line_length": 34.4386792453, "converted": null, "ext": "py", "file": null, "hexsha": "e33a22f6b3c30bac47df09544cfbfed8406903df", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 5, "max_forks_repo_forks_event_max_datetime": "2022-03-08T08:36:07.000Z", "max_forks_repo_forks_event_min_datetime": "2022-01-10T06:40:07.000Z", "max_forks_repo_head_hexsha": "f6215cd8459f02fef204e08757f9a67b69232e0b", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "HARPLab/DReyeVR", "max_forks_repo_path": "Tools/Diagnostics/python/graph_sys_diagnostics.py", "max_issues_count": 6, "max_issues_repo_head_hexsha": "f6215cd8459f02fef204e08757f9a67b69232e0b", "max_issues_repo_issues_event_max_datetime": "2022-03-29T19:51:07.000Z", "max_issues_repo_issues_event_min_datetime": "2022-02-15T04:31:29.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "HARPLab/DReyeVR", "max_issues_repo_path": "Tools/Diagnostics/python/graph_sys_diagnostics.py", "max_line_length": 87, "max_stars_count": 13, "max_stars_repo_head_hexsha": "f6215cd8459f02fef204e08757f9a67b69232e0b", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "HARPLab/DReyeVR", "max_stars_repo_path": "Tools/Diagnostics/python/graph_sys_diagnostics.py", "max_stars_repo_stars_event_max_datetime": "2022-03-02T02:59:14.000Z", "max_stars_repo_stars_event_min_datetime": "2022-01-07T05:55:42.000Z", "num_tokens": 1986, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 7301 }
import os import numpy as np import subprocess import tempfile from hqca.tools import * from copy import deepcopy as copy def purify(rdm,quantstore): cdir = os.getcwd() rdm.save(name=cdir+'/_temp',spin=quantstore.spin_rdm) rdm.contract() path_to_maple = quantstore.path_to_maple print('Purifying 2-RDM...') print('(with eigenvalues: )') print(np.linalg.eigvalsh(rdm.rdm)) print('-----------------------------') print('-----------------------------') r = quantstore.No_as # blank = 'cdir := \"{}/\":\n'.format(cdir) blank+='with(LinearAlgebra): with(ArrayTools): with(QuantumChemistry):\n' blank+='loaddata := readdata(cat(cdir,\"_temp.rdm\"), 8):\n' # if quantstore.spin_rdm: order = ['i','j','k','l'] else: order = ['i','k','j','l'] blank+='Flatten := proc(x) local n, a, i, j, k, l; `local`(a, n, i, j, k, l); `description`("convert chemists to numpy and flatten an array to form a matrix"); n := Size(x); a := Matrix(1 .. n[1]*n[2], 1 .. n[3]*n[4], datatype = float[8]); for i to round(n[1]) do for j to round(n[2]) do for k to round(n[3]) do for l to round(n[4]) do a[(i - 1)*n[1] + j, (k - 1)*n[3] + l] := x[{}, {}, {}, {}]; end do; end do; end do; end do; return a; end proc:\n'.format(order[0],order[1],order[2],order[3]) blank+='New := Array(1 .. {}, 1 .. {}, 1 .. {}, 1 .. {}, datatype = float[8]):\n'.format( str(r),str(r),str(r),str(r)) blank+='for i in loaddata[3 .. ()] do\n' if quantstore.spin_rdm: blank+=' New[round(i[1]), round(i[2]), round(i[3]), round(i[4])] := i[5]:\n' else: blank+=' New[round(i[1]), round(i[3]), round(i[2]), round(i[4])] := i[5]:\n' blank+='end do:\n' if quantstore.spin_rdm==True: kw = 'false' state = 'alpha-beta' else: kw = 'true' state = 'spatial' blank+='pure := Purify2RDM(New, spin_free = {}, electron_number={}, conv_tol = 0.00000001):\n'.format(kw,quantstore.Ne) blank+='ExportMatrix(cat(cdir, \"_temp_purified.csv\"), Flatten(pure[rdm2])):\n' #blank+='Transpose(Re(Eigenvalues(Flatten(pure[rdm2]))));' # temp = tempfile.NamedTemporaryFile(mode='w+',dir=cdir,delete=False) temp.write(blank) temp.close() subprocess.run([path_to_maple+' '+temp.name],shell=True) test = np.loadtxt(cdir+'/_temp_purified.csv',delimiter=',') test = np.reshape(test, (r,r,r,r)) print('-----------------------------') print('-----------------------------') # pure = RDM(order=2, alpha=quantstore.groups[0], beta=quantstore.groups[1], Ne=quantstore.Ne, S=quantstore.Ne_alp-quantstore.Ne_bet, S2=0, state=state, rdm=test, ) pure.get_spin_properties() pure.contract() tr = pure.trace() sz = pure.sz s2 = pure.s2 name = copy(temp.name) print('Eigenvalues of purified 2-RDM...') print(np.linalg.eigvalsh(pure.rdm)) print('Trace of 2-RDM: {}'.format(pure.trace())) print('Projected spin: {}'.format(sz)) print('Total spin: {}'.format(s2)) pure.expand() cn = abs(abs(tr-quantstore.Ne*(quantstore.Ne-1)))>0.01 csz = abs(abs(sz)-abs(quantstore.Ne_alp-quantstore.Ne_bet))>0.01 cs2 = s2>0.01 if os.path.exists(temp.name): os.remove(temp.name) if os.path.exists(cdir+'/_temp_purified.csv'): os.remove(cdir+'/_temp_purified.csv') if os.path.exists(cdir+'/_temp.rdm'): os.remove(cdir+'/_temp.rdm') if cn or csz or cs2: print('RDM violated some property after purification, saving to: {}'.format( name+'.rdm') ) rdm.save(name=name,spin=quantstore.spin_rdm) return pure def purify_rdm(name,quantstore): cdir = os.getcwd() path_to_maple = quantstore.path_to_maple print('Purifying 2-RDM: {}'.format(name)) print('-----------------------------') print('-----------------------------') r = quantstore.No_as # blank = 'cdir := \"{}/\":\n'.format(cdir) blank+='with(LinearAlgebra): with(ArrayTools): with(QuantumChemistry):\n' blank+='loaddata := readdata(cat(cdir,\"{}.rdm\"), 8):\n'.format(name) # if quantstore.spin_rdm: order = ['i','j','k','l'] else: order = ['i','k','j','l'] blank+='Flatten := proc(x) local n, a, i, j, k, l; `local`(a, n, i, j, k, l); `description`("convert chemists to numpy and flatten an array to form a matrix"); n := Size(x); a := Matrix(1 .. n[1]*n[2], 1 .. n[3]*n[4], datatype = float[8]); for i to round(n[1]) do for j to round(n[2]) do for k to round(n[3]) do for l to round(n[4]) do a[(i - 1)*n[1] + j, (k - 1)*n[3] + l] := x[{}, {}, {}, {}]; end do; end do; end do; end do; return a; end proc:\n'.format(order[0],order[1],order[2],order[3]) blank+='New := Array(1 .. {}, 1 .. {}, 1 .. {}, 1 .. {}, datatype = float[8]):\n'.format( str(r),str(r),str(r),str(r)) blank+='for i in loaddata[3 .. ()] do\n' if quantstore.spin_rdm: blank+=' New[round(i[1]), round(i[2]), round(i[3]), round(i[4])] := i[5]:\n' else: blank+=' New[round(i[1]), round(i[3]), round(i[2]), round(i[4])] := i[5]:\n' blank+='end do:\n' if quantstore.spin_rdm==True: kw = 'false' state = 'alpha-beta' else: kw = 'true' state = 'spatial' blank+='Transpose(Re(Eigenvalues(Flatten(New))));\n' blank+='pure := Purify2RDM(New, spin_free = {}, electron_number={}, conv_tol = 0.00000000001):\n'.format(kw,quantstore.Ne) blank+='ExportMatrix(cat(cdir, \"_temp_purified.csv\"), Flatten(pure[rdm2])):\n' #blank+='Transpose(Re(Eigenvalues(Flatten(pure[rdm2]))));' # temp = tempfile.NamedTemporaryFile(mode='w+',dir=cdir,delete=False) temp.write(blank) temp.close() subprocess.run([path_to_maple+' '+temp.name],shell=True) test = np.loadtxt(cdir+'/_temp_purified.csv',delimiter=',') test = np.reshape(test, (r,r,r,r)) print('-----------------------------') print('-----------------------------') # pure = RDM(order=2, alpha=quantstore.groups[0], beta=quantstore.groups[1], Ne=quantstore.Ne, S=quantstore.Ne_alp-quantstore.Ne_bet, S2=0, state=state, rdm=test, ) pure.get_spin_properties() pure.contract() tr = pure.trace() sz = pure.sz s2 = pure.s2 name = copy(temp.name) print('Eigenvalues of purified 2-RDM...') print(np.linalg.eigvalsh(pure.rdm)) print('Trace of 2-RDM: {}'.format(pure.trace())) print('Projected spin: {}'.format(sz)) print('Total spin: {}'.format(s2)) pure.expand() cn = abs(abs(tr-quantstore.Ne*(quantstore.Ne-1)))>0.01 csz = abs(abs(sz)-abs(quantstore.Ne_alp-quantstore.Ne_bet))>0.01 cs2 = s2>0.01 if os.path.exists(temp.name): os.remove(temp.name) if os.path.exists(cdir+'/_temp_purified.csv'): os.remove(cdir+'/_temp_purified.csv') if os.path.exists(cdir+'/_temp.rdm'): os.remove(cdir+'/_temp.rdm') #if cn or csz or cs2: # print('RDM violated some property after purification, saving to: {}'.format( # name+'.rdm') # ) # rdm.save(name=name,spin=quantstore.spin_rdm) return pure
{ "alphanum_fraction": 0.551897701, "author": null, "avg_line_length": 41.0670391061, "converted": null, "ext": "py", "file": null, "hexsha": "99fea6dd1b2882fea73adb35b6fa88d4245e3a24", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2021-08-10T00:20:09.000Z", "max_forks_repo_forks_event_min_datetime": "2021-08-10T00:20:09.000Z", "max_forks_repo_head_hexsha": "b013ba68f86e42350913c4abc2e1c91695a429b7", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "damazz/HQCA", "max_forks_repo_path": "hqca/maple/test_cases/_purify.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "b013ba68f86e42350913c4abc2e1c91695a429b7", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "damazz/HQCA", "max_issues_repo_path": "hqca/maple/test_cases/_purify.py", "max_line_length": 498, "max_stars_count": null, "max_stars_repo_head_hexsha": "b013ba68f86e42350913c4abc2e1c91695a429b7", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "damazz/HQCA", "max_stars_repo_path": "hqca/maple/test_cases/_purify.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2280, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 7351 }
% !TeX spellcheck = en_GB \documentclass[12pt]{beamer} \usetheme[sectionpage=none, subsectionpage=progressbar, progressbar=foot, numbering=fraction]{metropolis} \makeatletter \setlength{\metropolis@frametitle@padding}{1.6ex}% <- default 2.2 ex \setbeamertemplate{footline}{% \begin{beamercolorbox}[wd=\textwidth, sep=1.5ex]{footline}% <- default 3ex \usebeamerfont{page number in head/foot}% \usebeamertemplate*{frame footer} \hfill% \usebeamertemplate*{frame numbering} \end{beamercolorbox}% } \makeatother \AtBeginSubsection { \begin{frame}{Where are we?} \tableofcontents[sectionstyle=show/shaded, subsectionstyle=show/shaded/hide] \end{frame} } \makeatletter \setbeamertemplate{headline}{ \begin{beamercolorbox}{upper separation line head} \end{beamercolorbox} \begin{beamercolorbox}{section in head/foot} \vskip2pt\insertsectionnavigationhorizontal{\paperwidth}{}{}\vskip2pt \end{beamercolorbox} \begin{beamercolorbox}{lower separation line head} \end{beamercolorbox} } \makeatother \setbeamercolor{section in head/foot}{fg=normal text.bg, bg=structure.fg} \setbeamertemplate{itemize items}[square] \usepackage{menukeys} \usepackage{minted} \setminted[bash]{fontsize=\footnotesize, tabsize=2, breaklines} \setminted[elixir]{fontsize=\footnotesize, tabsize=2, breaklines} \setminted[erlang]{fontsize=\footnotesize, tabsize=2, breaklines} \title{Elixir Workshop} \author{Julius Putra Tanu Setiaji} \date{13 May 2019} \begin{document} \frame[plain]{\titlepage} \section{First Steps} \subsection{Erlang} \begin{frame}{About Erlang} \begin{center} \includegraphics[width=0.2\textwidth]{erlang} \end{center} \begin{itemize} \item A development platform for building \textbf{scalable} and \textbf{reliable} systems. \item Built in Ericsson\footnote{Ericsson was and is one of the largest telecom infrastructure companies in the world.} in the 1990s \item Runs on BEAM (Bj\"{o}rn's Erlang Abstract Machine)\footnote{Similar idea: Java runs on the JVM (Java Virtual Machine)}. \end{itemize} \end{frame} \begin{frame}{High Availability} \begin{itemize} \item Fault tolerance \item Scalability \item Distribution \item Responsiveness \item Live update \end{itemize} \end{frame} \begin{frame}{A tribute to Prof. Joe Armstrong} \textbf{Prof. Joe Armstrong} was behind the development of Erlang. He passed away recently on 20th April 2019. \begin{center} \includegraphics[width=0.4\linewidth]{joearmstrong} \end{center} \end{frame} \begin{frame}[fragile]{Sample Erlang} Based on \textbf{Prolog}: its syntax borrows heavily from it and the first Erlang compiler\footnote{\url{https://www.erlang.se/publications/prac_appl_prolog.ps}} was written in it too. \begin{minted}{erlang} -module(fact). -export([fac/1]). fac(0) -> 1; fac(N) when N > 0, is_integer(N) -> N * fac(N - 1). \end{minted} \end{frame} \subsection{Elixir} \begin{frame}{About Elixir} \begin{center} \includegraphics[width=0.4\textwidth]{elixir} \end{center} \begin{itemize} \item Targets BEAM, with full Erlang interoperability\footnote{It means that we can use Erlang libraries and tooling}. \item Started by Jos\'{e} Valim in 2012 -- he was involved heavily in the Ruby on Rails coreteam before. \item Provides almost one-to-one mapping to Erlang constructs but with additions to reduce boilerplate and duplication. \end{itemize} \end{frame} \begin{frame}[fragile]{Sample Elixir} The syntax is heavily borrowed from Ruby. \begin{minted}{elixir} defmodule Fact do def fac(0) do 1 end def fac(n) when n > 0 and is_integer(n) do n * fac(n - 1) end end \end{minted} \end{frame} \begin{frame}{Phoenix Framework} \begin{center} \includegraphics[width=0.2\textwidth]{phoenix} \end{center} \begin{itemize} \item Elixir's web framework, just like RoR in Ruby, or Django in Python. \item Initially it was heavily based on Ruby on Rails. \item Over time, Phoenix has diverged from its Rails roots and developed its own unique ideas. \end{itemize} \end{frame} \begin{frame}{Ecto} \begin{center} \includegraphics[width=0.15\textwidth]{ecto} \end{center} \begin{itemize} \item A database wrapper and language integrated query for Elixir. \item Data mapping and validation, with a SQL adapter. \item Conceptually, this is the Model in MVC architecture. Similar to ActiveRecord in Ruby on Rails. \end{itemize} \end{frame} \subsection{Source Academy} \begin{frame}{Roles of Elixir in Source Academy} \begin{itemize} \item Source Academy has a separate \textbf{backend} and \textbf{frontend}. \item \textbf{Backend} stores data and does the business logic. \item \textbf{Frontend} is what the user interacts with and makes requests to the backend. \item Phoenix Framework is used to write the Source Academy's backend. \end{itemize} \end{frame} \begin{frame}{Why Elixir?} \begin{itemize} \item Elixir has good performance due to BEAM (compared to the alternatives, such as Ruby on Rails, Express on node.js, etc.). \item Elixir is a functional language, in line with what CS1101S and SICP taught. \item Elixir has more familiar syntax than Erlang. \item Good package manager (\texttt{hex} and \texttt{rebar}), which provides ability to use Erlang and Elixir libraries. \end{itemize} \end{frame} \subsection{Preparing Environment} \begin{frame}{Steps of Installing Elixir} \begin{enumerate} \item \textbf{Install Erlang}: I prefer using the native package manager for this (whatever the latest OTP version is) \item \textbf{Install Elixir}: I prefer using \texttt{asdf} so I can manage more than 1 version of Elixir at the same time \end{enumerate} \end{frame} \begin{frame}[fragile]{Installing Erlang} On Mac: \url{https://is.gd/install_erlang_mac} On Ubuntu: \url{https://is.gd/install_erlang_ubuntu} Check if you have a working Erlang/OTP 21 installation: \begin{minted}{bash} $ erl Erlang/OTP 21 ... Eshell ... \end{minted} \begin{minted}{erlang} > io:fwrite("Hello, world!~n"). Hello, world! ok \end{minted} \end{frame} \begin{frame}[fragile]{Installing asdf and Elixir} Go to \url{https://asdf-vm.com/}, click on ``Get Started'' and follow the instructions. Afterwards: \begin{minted}{bash} asdf plugin-add elixir asdf install elixir 1.8.1-otp-21 \end{minted} Check if you have a working Elixir 1.8.1 installation: \begin{minted}{bash} $ iex Erlang/OTP 21 ... Interactive Elixir (1.8.1) ... \end{minted} \begin{minted}{elixir} iex(1)> IO.puts("Hello, world!") Hello, world! :ok \end{minted} \end{frame} \begin{frame}{Recommended Reading} \begin{center} \textbf{Elixir in Action by Sasa Juric} \includegraphics[height=0.5\textheight]{sasajuric} The first part would do (first 4 chapters) \end{center} \end{frame} \section{Building Blocks} \subsection{The Interactive Shell} \begin{frame}[fragile]{Elixir Interactive Shell (REPL)} \begin{itemize} \item You can enter Elixir's REPL\footnote{Read-Evaluate-Print-Loop} by running \mintinline{bash}{iex} from terminal. \item \mintinline{elixir}{i}: inspect a value. Example: \mintinline{elixir}{i 5} \item \mintinline{elixir}{h}: get help about a command. Examples: \begin{minted}{elixir} h Enum.reduce h Enum.reduce/2 h Enum.reduce/3 \end{minted} \item Note that in Elixir, everything is an expression and thus will return something. \end{itemize} \end{frame} \subsection{Variables} \begin{frame}[fragile]{Variables} \begin{itemize} \item Elixir is a \textit{dynamic} language, thus there is no explicit type declaration. \item In Elixir, mapping a value to a variable is called \textbf{binding}. \item To do this, we use the match operator \mintinline{elixir}{=} \item Variable names uses snake\_case style. \item Example: \begin{minted}{elixir} x = 1 x y = "a" y x = 2 x \end{minted} \end{itemize} \end{frame} \begin{frame}[fragile]{Immutability} \begin{itemize} \item Elixir is a functional language where everything is immutable. \item Re-binding a variable changes the value that a variable is pointing to but never the value itself. \item Let's test it with anonymous function: \begin{minted}{elixir} x = 42 foo = fn -> IO.puts(x) end x = 0 IO.puts(x) foo.() \end{minted} \end{itemize} \end{frame} \subsection{Organising Your Code} \begin{frame}{Organising Your Code} \begin{itemize} \item As a functional language, Elixir relies heavily on functions. \item Due to immutable nature of data, typical Elixir program consists of many small functions. \item Multiple functions are grouped together into modules. \end{itemize} \end{frame} \begin{frame}[fragile]{Module and Functions} \begin{itemize} \item A collection of functions -- similar to namespace in other languages. \item Every named function in Elixir must be defined inside a module. \item Syntax: \texttt{ModuleName.function\_name(args)} \item Module names use CamelCase style. It can also contain the dot character to organise modules hierarchically. \item Module definition starts with the \mintinline{elixir}{defmodule} constuct, followed with a \mintinline{elixir}{do}-\mintinline{elixir}{end} block. \item Nested module or module whose names contains dot has no special relation -- all modules are independent of one another (for now). \end{itemize} \end{frame} \begin{frame}{Module and Functions (cont.)} \begin{itemize} \item Function names use snake\_case style. \item Function name conventions: \texttt{?} suffix means the function returns a boolean, \texttt{!} suffis means the function may raise a runtime error. \item Function definition starts with the \mintinline{elixir}{def} constuct, followed with a \mintinline{elixir}{do}-\mintinline{elixir}{end} block. \item Parantheses can be omitted in definition of 0-arity functions. \item No explicit return value -- instead, last expression is the return value. \end{itemize} \end{frame} \begin{frame}{Modules and Functions (cont.)} \begin{itemize} \item Default argument is declared using \mintinline{elixir}{\\} -- this creates functions with the same name but different arities \item In general, ignored variable are prefixed with underscores. \end{itemize} \end{frame} \begin{frame}[fragile]{Module and Functions (cont.)} Save as \texttt{Geometry.ex}, run as \texttt{iex Geometry.ex}: \begin{minted}{elixir} defmodule Geometry do def foo, do: "Hello, world!" defmodule Square do def area(side \\ 2), do: Geometry.Rectangle.area(side, side) end end defmodule Geometry.Rectangle do def area(length, width) do length * width end end \end{minted} \end{frame} \begin{frame}[fragile]{Module and Functions (cont.)} \begin{minted}{elixir} Geometry.Rectangle.area(2, 5) Geometry.Square.area(3) Geometry.foo() \end{minted} \end{frame} \begin{frame}{Function visibility} \begin{itemize} \item Functions defined using \mintinline{elixir}{def} is public (exported) and can be used by any module. \item To make a function private, define using \mintinline{elixir}{defp} -- private functions can only be invoked inside the module where it is defined. \end{itemize} \end{frame} \begin{frame}[fragile]{Imports and Aliases} \begin{itemize} \item \mintinline{elixir}{import} allows calling public functions of a module without prefixing with the module name. \item \mintinline{elixir}{alias} allows referencing a module under a different name. \item Example: \begin{minted}{elixir} import IO puts("Hello, world!") alias Geometry.Square Square.area(3) alias Geometry.Square, as: MySquare MySquare.area(3) \end{minted} \end{itemize} \end{frame} \begin{frame}{Module Attributes} Has 3 uses: \begin{itemize} \item As annotations \item As constants at run-time \item As temporary storage at compile-time \end{itemize} \end{frame} \begin{frame}[fragile]{Module Attributes: As annotation} Save as \texttt{Foo.ex}, then compile by running \texttt{elixirc Foo.ex}, then run \texttt{iex} \begin{minted}{elixir} defmodule Foo do @moduledoc """ This is the documentation for the Foo module. """ @doc "bar/0 returns the number 5" def bar, do: 5 end \end{minted} \end{frame} \begin{frame}[fragile]{Module Attributes: As constants at run-time} \begin{minted}{elixir} defmodule Geometry.Circle @pi 3.14159 def area(radius), do: @pi * radius * radius end \end{minted} \end{frame} \begin{frame}{Comments} \begin{itemize} \item Comments start with the character \texttt{\#} \item Block comments are not supported -- instead prefix each one with \texttt{\#} \end{itemize} \end{frame} \subsection{Types} \begin{frame}[fragile]{Identifying functions} \begin{itemize} \item Functions in Elixir are identified by their name and arity. \item The arity of a function describes the number of arguments that the function takes. \item Example: \mintinline{elixir}{Enum.reduce/2} identifies a function from the \mintinline{elixir}{Enum} module, function name is \mintinline{elixir}{reduce}, and the arity is 2, while \mintinline{elixir}{Enum.reduce/3} describes the same module, the same function name, but with arity 3. \end{itemize} \end{frame} \begin{frame}[fragile]{Integer and Float} \begin{itemize} \item In Elixir, just like in Erlang and Ruby, Integer has arbitrary precision, while float has 64-bit double precision. \item Predicate functions: \mintinline{elixir}{is_integer/1} and \mintinline{elixir}{is_float/1} \item Note that the \mintinline{elixir}{/} operator always return a float. \item To get integer division, use \mintinline{elixir}{div/1} instead. \item To get remainder, use \mintinline{elixir}{rem/1} \end{itemize} \end{frame} \begin{frame}[fragile]{Integer and Float (cont.)} \begin{itemize} \item Examples: \begin{minted}{elixir} 1 + 2 5 * 5 10 / 2 div(10, 2) rem(10, 3) rem 10, 3 \end{minted} \item Note that in Elixir, you can drop parantheses when invoking named functions, just like Ruby (generally discouraged) \end{itemize} \end{frame} \begin{frame}[fragile]{Integer and Float (cont.)} \begin{itemize} \item Elixir also provides shortcut notation to enter binary, octal, and hexadecimal numbers: \begin{minted}{elixir} 0b1010 0o777 0x1F \end{minted} \item Float requires a dot followed by at least 1 digit. \item It also supports \texttt{e} for scientific notation. \item You can use \mintinline{elixir}{floor/1}, \mintinline{elixir}{ceil/1}, \mintinline{elixir}{trunc/1}, \mintinline{elixir}{round/1}. \begin{minted}{elixir} 1.0 1.0e-10 round(-1.5) trunc(-1.5) floor(-1.5) ceil(-1.5) \end{minted} \end{itemize} \end{frame} \begin{frame}[fragile]{Boolean} \begin{itemize} \item Either the value \mintinline{elixir}{true} or \mintinline{elixir}{false} \item Predicate function: \mintinline{elixir}{is_boolean/1} \item Examples: \begin{minted}{elixir} is_boolean(true) is_boolean(false) is_boolean(5) \end{minted} \end{itemize} \end{frame} \begin{frame}[fragile]{Atom} \begin{itemize} \item A constant whose name is its own value. Similar to Symbols in Ruby or Lisp. \item Comparison is $O(1)$, while keeping the value named instead of an integer constant. \item Syntactically, written with a colon \texttt{:} prefix (like Ruby) \item Predicate function: \mintinline{elixir}{is_atom/1} \item Example: \begin{minted}{elixir} :test is_atom(:test) \end{minted} \end{itemize} \end{frame} \begin{frame}[fragile]{Atom (cont.)} \begin{itemize} \item Note that in Elixir (and Erlang), booleans are implemented as atoms \mintinline{elixir}{:true} and \mintinline{elixir}{:false}, and \mintinline{elixir}{nil} as \mintinline{elixir}{:nil} too. \item Module names are atoms too. \item You can also specify atom name that contains special characters (such as dot or colon) by delimiting them with double-quotes. \begin{minted}{elixir} is_atom(true) is_atom(Tuple) :"Elixir.Tuple" :"asd:a.sdd" \end{minted} \end{itemize} \end{frame} \begin{frame}[fragile]{String} \begin{itemize} \item Delimited by double quotes, encoded in UTF-8 \item Represented internally by binaries (sequence of bytes). Binaries are delimited with \texttt{<<} and \texttt{>>} \item Predicate function: \mintinline{elixir}{is_binary/1} \item Examples: \begin{minted}{elixir} "Hello, world!" <<104, 101, 108, 108, 111>> is_binary("Hi!") \end{minted} \item Elixir supports string interpolation: \begin{minted}{elixir} "Hello, #{:world}" \end{minted} \end{itemize} \end{frame} \begin{frame}[fragile]{String (cont.)} \begin{itemize} \item Get number of bytes in a string using \mintinline{elixir}{byte_size/1} \item Get length of string using \mintinline{elixir}{String.length/1} \item However, they might not be the same as the length of the string due to UTF-8 encoding: \begin{minted}{elixir} byte_size("hellö") String.length("hellö") \end{minted} \item \mintinline{elixir}{String} module contains helpful functions to manipulate string: \begin{minted}{elixir} String.upcase("hellö") \end{minted} \end{itemize} \end{frame} \begin{frame}[fragile]{Anonymous functions (lambda)} \begin{itemize} \item Delimited by keywords \mintinline{elixir}{fn} and \mintinline{elixir}{end} \item Functions are first-class citizens: they can be passed as arguments to other functions. \item Note that a dot between the variable and parantheses is required to invoke an anonymous function. \item Predicate functions: \mintinline{elixir}{is_function/1}, \mintinline{elixir}{is_function/2} \item Examples: \begin{minted}{elixir} add = fn a, b -> a + b end add.(1, 2) is_function(add) is_function(add, 2) is_function(add, 1) Enum.each('hello', fn x -> IO.puts(x) end) \end{minted} \end{itemize} \end{frame} \begin{frame}[fragile]{The Capture Operator} \begin{itemize} \item Ampersand \mintinline{elixir}{&} is the capture operator. \item It can be used to create anonymous function \item Examples: \begin{minted}{elixir} Enum.each('hello', &IO.puts/1) Enum.each('hello', &IO.puts(&1)) (&IO.puts(&1)) == &IO.puts(&1) \end{minted} \end{itemize} \end{frame} \begin{frame}[fragile]{(Linked) Lists} \begin{itemize} \item Dynamic, variable-sized collections of data. \item The syntax might look like an array, but it actually is a linked list with $O(n)$ complexity for most functions. \item It uses Lisp-y list (SICP list): the lists are built from pairs \item Syntax for pair: \mintinline{elixir}{[head | tail]} \item To get head and tail, use \mintinline{elixir}{hd/1} and \mintinline{elixir}{tl/1} respectively. \item Predicate functions: \mintinline{elixir}{is_list/1} \end{itemize} \end{frame} \begin{frame}[fragile]{(Linked) Lists} \begin{itemize} \item Examples: \begin{minted}{elixir} [1 | 2] [1 | [2 | []]] [1, 2] is_list([1, 2, 3, 4]) length([1, 2, 3, 4]) \end{minted} \item Concatenate using the \mintinline{elixir}{++/2} operator, subtract using the \mintinline{elixir}{--/2} operator: \begin{minted}{elixir} [1, 2, 3] ++ [4, 5, 6] [1, true, 2, false, 3, true] -- [true, false] \end{minted} \end{itemize} \end{frame} \begin{frame}[fragile]{(Linked) Lists (cont.)} \begin{itemize} \item Note that in Erlang, strings are usually represented as charlist (list of characters) instead of binaries. \item Charlist is written in Elixir delimited single quote. \item Examples: \begin{minted}{elixir} 'hello' [104, 101, 108, 108, 111] "hello" <<104, 101, 108, 108, 111>> 'hello' == "hello" \end{minted} \end{itemize} \end{frame} \begin{frame}[fragile]{Tuple} \begin{itemize} \item Delimited by curly braces. \item Group a fixed number of elements, stored contiguously in memory. Thus, most operations are $O(1)$ \item Examples: \begin{minted}{elixir} tuple = {:ok, "world"} tuple_size({:ok, "world"}) elem(tuple, 1) put_elem(tuple, 1, "world") \end{minted} \end{itemize} \end{frame} \begin{frame}{List vs Tuple} \begin{itemize} \item Appending lists is $O(n)$ \item Tuples are stored contiguously in memory, thus updating a value in tuple is expensive as a new tuple has to be created. \item Tuples are typically used to return more than 1 data from a function: \mintinline{elixir}{{:ok, data}}, \mintinline{elixir}{{:error, :reason}} \item Usually, Elixir will guide you to do the right thing: \mintinline{elixir}{elem/1} exists but no built-in equivalent for lists. \item When counting elements, in Elixir, \mintinline{elixir}{size} signifies $O(1)$, while \mintinline{elixir}{length} signifies $O(n)$ \item E.g. \mintinline{elixir}{byte_size/1}, \mintinline{elixir}{tuple_size/1} vs \mintinline{elixir}{length/1}, \mintinline{elixir}{String.length/1} \end{itemize} \end{frame} \begin{frame}{Operators} \begin{itemize} \item \textbf{Arithmetic}: \mintinline{elixir}{+}, \mintinline{elixir}{-}, \mintinline{elixir}{*}, \mintinline{elixir}{/}, \mintinline{elixir}{div}, \mintinline{elixir}{rem} \item \textbf{List}: \mintinline{elixir}{++}, \mintinline{elixir}{--} \item \textbf{Binary (String)}: concatenate \mintinline{elixir}{<>} \item \textbf{Boolean}: \mintinline{elixir}{and}, \mintinline{elixir}{or}, \mintinline{elixir}{not} \item \textbf{Truthy/Falsey}: \mintinline{elixir}{||} return the first truthy value or the last element, \mintinline{elixir}{&&} return the first falsey value or the last element, \mintinline{elixir}{!} returns \mintinline{elixir}{true} except for \mintinline{elixir}{false} and \mintinline{elixir}{nil} \item \textbf{Comparison}: \mintinline{elixir}{==}, \mintinline{elixir}{!=}, \mintinline{elixir}{<=}, \mintinline{elixir}{>=}, \mintinline{elixir}{>}, \mintinline{elixir}{<}, \mintinline{elixir}{===} strict compare integer and float, \mintinline{elixir}{!==} \item Different types can be compared with total order\footnote{number < atom < reference < function < port < pid < tuple < map < list < bitstring}. \end{itemize} \end{frame} \begin{frame}[fragile]{Maps} \begin{itemize} \item A key-value store, implemented using Hash Array Mapped Trie (HAMT). \item Examples: \begin{minted}{elixir} %{a: 1, b: 2} %{:a => 1, :b => 2} map = %{"a"=> %{"b" => [true, false, nil]}, 5 => "boo", :z => 100} map["a"] map["a"]["b"] map[:z] map.z %{map | 5 => "honhonhon"} \end{minted} \end{itemize} \end{frame} \begin{frame}[fragile]{Keyword List} \begin{itemize} \item Older way to create a key-value store is having a list of 2-item tuple. \item If the first item of the tuple is an atom, then this is a keyword list. \item Example: \begin{minted}{elixir} [{:a, 1}, {:b, 2}] [a: 1, b: 2] \end{minted} \item Important properties: \begin{itemize} \item Keys must be atoms \item Keys are ordered \item Keys can be given more than once. \end{itemize} \item Beware of the $O(n)$ performance characteristics. \end{itemize} \end{frame} \begin{frame}[fragile]{Range} \begin{itemize} \item Represents a range of numbers (like Ruby) \item Example: \begin{minted}{elixir} range = 1..2 1 in range -1 in range Enum.each(1..3, &IO.puts/1) \end{minted} \end{itemize} \end{frame} \begin{frame}{Macros} \begin{itemize} \item Advanced feature -- we will not go into much detail. \item A very lisp-y feature. \item Elixir metaprogramming feature: code that receives AST and manipulates them. \item Note that many of the constructs in Elixir are actually implemented as macros on the standard library: \mintinline{elixir}{def}, \mintinline{elixir}{defp}, etc. \end{itemize} \end{frame} \section{Control Flow} \subsection{Pattern Matching} \begin{frame}[fragile]{Pattern Matching} \begin{itemize} \item One of the most powerful features of functional languages. \item Similar to destructuring in Ruby or JavaScript. \item Recall the match operator \mintinline{elixir}{=} \item So far we have just done simple bindings from the RHS to LHS: \begin{minted}{elixir} x = 1 x \end{minted} \end{itemize} \end{frame} \begin{frame}[fragile]{Pattern Matching (cont.)} \begin{itemize} \item However, we can also do: \begin{minted}{elixir} 1 = x \end{minted} \item This is because LHS and RHS are both \mintinline{elixir}{1}. \item The value in the RHS, namely the value bound to \mintinline{elixir}{x} (i.e. \mintinline{elixir}{1}) is being pattern-matched against the value in the LHS, namely \mintinline{elixir}{1} \item However, doing \mintinline{elixir}{2 = x} will result in \mintinline{elixir}{MatchError} \end{itemize} \end{frame} \begin{frame}[fragile]{Pattern Matching on More Complex Data Types} \begin{minted}{elixir} {name, age} = {"Bob", 25} {_, {hour, _, _}} = :calendar.local_time() # Mimicking Prolog's unification. Suck it Haskell! {amount, amount, amount} = {127, 127, 127} {amount, amount, amount} = {127, 127, 1} [first, second, third] = [1, 2, 3] [head | tail] = [1, 2, 3] %{name: name, age: age} = %{name: "Bob", age: 25} %{age: age} = %{name: "Bob", age: 25} "Hello" <> rest = "Hello, world!" \end{minted} \end{frame} \begin{frame}[fragile]{Pin Operator \mintinline{elixir}{^}} \begin{itemize} \item Variables in Elixir can be rebound. \item To pattern match against an existing variable's value rather rebinding, use the pin operator \mintinline{elixir}{^}: \begin{minted}{elixir} x = 1 ^x = 2 \end{minted} \end{itemize} \end{frame} \subsection{Conditionals} \begin{frame}[fragile]{Pattern Matching on Function Arguments and Multi-clausal Functions} In functional languages, you can pattern match on function arguments too, and provide multiple clauses! \begin{minted}{elixir} defmodule Fact do def fac(0), do: 1 def fac(n), do: n * fac(n - 1) end \end{minted} For a function, each clause will be attempted based on the order of definition. \end{frame} \begin{frame}[fragile]{Function Guards} \begin{itemize} \item For a factorial function, only non-negative integers are valid input. \item Using predicate functions and simple comparison expressions, we can provide constraints more than just the pattern match on each function clause: \begin{minted}{elixir} defmodule Fact do def fac(0) do: 1 def fac(n) when is_integer(n) and n > 0, do: n * fac(n - 1) end \end{minted} \end{itemize} \end{frame} \begin{frame}{Function Guards (cont.)} \begin{itemize} \item The set of operators and functions that can be called is very limited, the full list is at \url{https://hexdocs.pm/elixir/guards.html} \item Note that error raised in the guard expression will simply result in match failure, and Elixir will move on to the next clause, e.g. applying \mintinline{elixir}{length/1} on a non-list. \end{itemize} \end{frame} \begin{frame}[fragile]{Multi-clausal lambdas} Anonymous functions (lambdas) may also consist of multiple clauses: \begin{minted}{elixir} test_num = fn 0 -> :zero x when is_number(x) and x < 0 -> :negative x when is_number(x) and x > 0 -> :positive end Enum.map(-2..2, test_num) \end{minted} \end{frame} \begin{frame}[fragile]{case} \begin{itemize} \item \mintinline{elixir}{case} allows us to compare a value against many patterns until we find a matching one. \item Guards can be used also. \item If none of the clauses match, \mintinline{elixir}{CaseClauseError} is raised. \item Example: \begin{minted}{elixir} case {1, 2, 3} do {4, 5, 6} -> :no_match {1, x, 3} when x > 0 -> :match _ -> :match_anything end x \end{minted} \end{itemize} \end{frame} \begin{frame}[fragile]{case (cont.)} In fact, we can reimplement our factorial function using \mintinline{elixir}{case}: \begin{minted}{elixir} defmodule Fact do def fac(n) do case n do 0 -> 1 x when is_integer(x) and x > 0 -> n * fac(n - 1) end end end \end{minted} \end{frame} \begin{frame}[fragile]{cond} \begin{itemize} \item Used to check for different conditions and find the first clause that is truthy. \item Similar to if-else if-else clauses in imperative languages. \item Note that idiomatic Elixir uses \mintinline{elixir}{cond} very sparingly. \item Example: \begin{minted}{elixir} cond do 2 + 2 == 5 -> :never_true 1 + 1 == 2 -> :should_match_this true -> :will_match_this_if_all_fail end \end{minted} \end{itemize} \end{frame} \begin{frame}[fragile]{\mintinline{elixir}{if} and \mintinline{elixir}{unless}} \begin{itemize} \item To check for only 1 condition, Elixir provides \mintinline{elixir}{if} and \mintinline{elixir}{unless} (if not). They are implemented as macros. \item If no block is specified, an implicit \mintinline{elixir}{nil} is returned. \item Example: \begin{minted}{elixir} if nil do "This won't be seen" else "This will" end unless true do "This will never be seen" end \end{minted} \end{itemize} \end{frame} \subsection{Recursion} \begin{frame}[fragile]{Our Old Friend Recursion!} \begin{minted}{elixir} defmodule Fact do def fac(0), do: 1 def fac(n) when is_integer(n) and n > 0, do: n * fac(n - 1) end \end{minted} \end{frame} \begin{frame}[fragile]{And Tail Call Recursions!} \begin{minted}{elixir} defmodule Fact do def fac(n, acc \\ 1) when is_integer(n) and is_integer(acc) and acc > 0 do case n do 0 -> acc n when n > 0 -> fac(n - 1, acc * n) end end end \end{minted} \end{frame} \subsection{Enumerable} \begin{frame}[fragile]{Enumerable} \begin{itemize} \item Elixir provides the concept of enumerables and the \mintinline{elixir}{Enum} module (\url{https://hexdocs.pm/elixir/Enum.html}) to work with them using higher order functions. \item Example: \begin{minted}{elixir} # Allow us to use macros in Integer module require Integer Enum.reduce(Enum.filter(Enum.map(1..100_000, &(&1 * 3)), &Integer.is_even/1), 0, &+/2) Enum.sum(Enum.filter(Enum.map(1..100_000, &(&1 * 3)), &Integer.is_even/1)) \end{minted} \end{itemize} \end{frame} \begin{frame}[fragile]{The Pipe Operator} \begin{itemize} \item In my opinion, the best feature that Elixir has! \item The pipe operator \mintinline{elixir}{|>} takes the output from the left side and passes it as the first argument to the function call on the right side. \item Similar to Unix shell \mintinline{bash}{|} operator. \item Clear pipeline of transformation of data. \item Example: \begin{minted}{elixir} require Integer 1..100_000 |> Enum.map(&(&1 * 3)) |> Enum.filter(&Integer.is_even/1) |> Enum.sum() \end{minted} \end{itemize} \end{frame} \subsection{Streams} \begin{frame}[fragile]{Streams} \begin{itemize} \item Enumerables are eager. Elixir provides a lazy alternative: the \mintinline{elixir}{Stream} module (\url{https://hexdocs.pm/elixir/Stream.html}) \item They are composable too. \item Examples: \begin{minted}{elixir} stream = Stream.cycle([1, 2, 3]) stream |> Stream.take(4) |> Enum.to_list() require Integer 1..100_000 |> Stream.map(&(&1 * 3)) |> Stream.filter(&Integer.is_odd/1) \end{minted} \end{itemize} \end{frame} \subsection{Comprehension} \begin{frame}[fragile]{Comprehension} \begin{itemize} \item Similar concept to Python's list comprehension, but on steroids (with pattern matching) \item Example: \begin{minted}{elixir} values = [good: 1, good: "a", bad: 3, good: 4] for {:good, n} when is_number(n) <- values, do: n * n \end{minted} \item Besides pattern matching and guards, filter expression can be used too: \begin{minted}{elixir} multiple_of_3? = fn n -> rem(n, 3) == 0 end for n <- 0..5, multiple_of_3?.(n), do: n * n \end{minted} \end{itemize} \end{frame} \begin{frame}[fragile]{Comprehension (cont.)} \begin{itemize} \item Comprehensions also allow multiple generators and filters: \begin{minted}{elixir} dirs = ['/tmp/', '/usr/lib'] for dir <- dirs, file <- File.ls!(dir), path = Path.join(dir, file), File.regular?(path) do File.stat!(path).size end \end{minted} \end{itemize} \end{frame} \begin{frame}[fragile]{Comprehension (cont.)} \begin{itemize} \item Using \mintinline{elixir}{:into}, result of a comprehension can be inserted into different data structures: \begin{minted}{elixir} for <<c <- " hello world ">>, c != ?\s, into: "", do: <<c>> for {key, val} <- %{"a" => 1, "b" => 2}, into: %{}, do: {key, val * val} \end{minted} \end{itemize} \end{frame} \begin{frame}[fragile]{Comprehension (cont.)} Using comprehension, we can even implement quicksort! \begin{minted}{elixir} defmodule Sort do def qsort([]) do [] end def qsort([x | xs]) do qsort(for a when a < x <- xs, do: a) ++ [x] ++ qsort(for a when a >= x <- xs, do: a) end end \end{minted} \end{frame} \section{Other Useful Features} \subsection{Struct} \begin{frame}[fragile]{Struct} \begin{itemize} \item Extensions built on top of maps that provide compile-time checks and default values. \item Defined using \mintinline{elixir}{defstruct} construct: \begin{minted}{elixir} defmodule User do defstruct name: "John", age: 27 end %User{} %User{oops: :field} jane = %User{age: 10, name: "Jane"} %{jane | age: 11} %{name: name} = jane name jane.name is_map(jane) \end{minted} \end{itemize} \end{frame} \begin{frame}{Struct (cont.)} \begin{itemize} \item In Phoenix Framework, ecto data are abstracted as a struct. \item However, ecto provides us with special constructs (using macros) to specify the struct fields, so typically in using the Phoenix Framework, we rarely need to use \mintinline{elixir}{defstruct} \item However, other syntaxes such as initialising a struct, updating a field, etc. are still used extensively. \end{itemize} \end{frame} \subsection{Polymorphism with Protocols} \begin{frame}[fragile]{Protocol} \begin{itemize} \item A mechanism in Elixir to achieve polymorphism. \item Dispatching on a protocol is available to any data type as long as it implements the protocol. \item Defined using \mintinline{elixir}{defprotocol} \item Example: \begin{minted}{elixir} defprotocol Size do @doc "Calculates the size (not the length!) of a data structure" def size(data) end \end{minted} \item the \texttt{Size} protocol expects a 1-arity function called \texttt{size} to be implemented. \end{itemize} \end{frame} \begin{frame}[fragile]{Protocol (cont.)} \begin{itemize} \item Implementation is defined using \mintinline{elixir}{defimpl} \begin{minted}{elixir} defimpl Size, for: BitString do def size(string), do: byte_size(string) end defimpl Size, for: Map do def size(map), do: map_size(map) end defimpl Size, for: Tuple do def size(tuple), do: tuple_size(tuple) end \end{minted} \end{itemize} \end{frame} \begin{frame}[fragile]{Protocol (cont.)} \begin{itemize} \item With the protocol and implementation defined, we can start using it. \item Passing a data type that doesn't implement the protocol raises \mintinline{elixir}{Protocol.UndefinedError}. \begin{minted}{elixir} Size.size("foo") Size.size({:ok, "Hello"}) Size.size(%{label: "some label"}) Size.size([1, 2, 3]) Size.size(%User{}) \end{minted} \item Protocols can be implemented for all Elixir data types, as well as user-defined structs: \begin{minted}{elixir} defimpl Size, for: User do def size(_user), do: 2 end \end{minted} \end{itemize} \end{frame} \begin{frame}{Protocol (cont.)} \begin{itemize} \item Elixir ships with some built-in protocols. \item For example, \mintinline{elixir}{Enum} module provides functions that work with any data structure that implements the \mintinline{elixir}{Enumerable} protocol. \end{itemize} \end{frame} \subsection{Sigil} \begin{frame}{Sigils} \begin{itemize} \item Like Perl and Ruby, Elixir has sigils too. \item Strings are delimited by double-quotes. Double-quotes inside a string must be escaped. \item These kinds of representation problems is what sigils try to solve. \item Unlike Perl and Ruby, Elixir only allows a limited set of delimiters: \texttt{//, ||, "", '', (), [], {}, <>} \end{itemize} \end{frame} \begin{frame}[fragile]{String, Charlist, Word List Sigils} \begin{itemize} \item \texttt{~s} sigil is used to generate string \item \texttt{~c} sigil is used to generate char lists \item \texttt{~w} sigil is used to generate lists of words (separated by whitespace). \item The \texttt{~w} sigil also accepts the \texttt{c}, \texttt{s}, \texttt{a} modifiers (for char lists, strings and atoms respectively) to specify the data type of the elements of the resulting list. \end{itemize} \end{frame} \begin{frame}[fragile]{String, Charlist, Word List Sigils (cont.)} \begin{minted}{elixir} ~s(a string with "double quotes") ~c|a charlist with 'single quotes' and (parantheses)| ~w(foo bar bat) ~w(ok error)a \end{minted} \end{frame} \begin{frame}[fragile]{Regular Expressions} \begin{itemize} \item Elixir provides Perl-compatible regexes, as implemented by the PCRE library. \item \textbf{A word of caution}: ``Some people, when confronted with a problem, think I know, I'll use regular expressions. Now they have two problems'' (Zawinski, 1997)\footnote{There is also an interesting read at \url{https://blog.codinghorror.com/regular-expressions-now-you-have-two-problems/}} \item There exists a \mintinline{elixir}{Regex} module in Elixir, as well as the regex match operator \mintinline{elixir}{=~}, and the \texttt{~r} sigil to specify precompiled regex. \end{itemize} \end{frame} \begin{frame}[fragile]{Regular Expressions} \begin{minted}{elixir} regex = ~r/foo|bar/ "foo" =~ regex "bar" =~ regex "bat" =~ regex \end{minted} \end{frame} \subsection{Typespec} \begin{frame}{Typespec} \begin{itemize} \item One advantage of using Elixir is that we can use tooling for Erlang, a 30 years old language. \item This includes type specs, a system of notating types in Erlang/Elixir. \item The Elixir compiler doesn't do type check, but one can use the \texttt{dialyzer} tool to perform type check. \item Owing to its age, \texttt{dialyzer} is more mature than, say, \texttt{mypy}, especially in terms of type inference\footnote{More information on Erlang type inference in \url{https://it.uu.se/research/group/hipe/papers/succ_types.pdf}} \end{itemize} \end{frame} \begin{frame}{Typespec (cont.)} \begin{itemize} \item Not compulsory, but being able to read type spec helps a lot in reading documentation. \item Some code that I wrote in Source Academy have type specs. \item In source code, typically notated using module attribute \mintinline{elixir}{@spec} \item More information in \url{https://hexdocs.pm/elixir/typespecs.html} \end{itemize} \end{frame} \section{Source Academy Backend: Cadet} \subsection{Structure} \begin{frame}{Structure} \begin{itemize} \item Phoenix Framework: \begin{itemize} \item Router \item Plugs \end{itemize} \item MVC \begin{itemize} \item Model \begin{itemize} \item Contexts: inside \texttt{lib/cadet/} \item Migrations: inside \texttt{priv/repo/migrations/} \item Seeds: inside \texttt{priv/repo/seeds.exs} \end{itemize} \item View: \texttt{lib/cadet\_web/views/} -- renders json \item Controller: inside \texttt{lib/cadet\_web/controllers/} \end{itemize} \end{itemize} \end{frame} \begin{frame}{Structure (Cont.)} \begin{itemize} \item Jobs: inside \texttt{lib/cadet/jobs/} \begin{itemize} \item Updater, XML Parser \item Autograder \end{itemize} \item Mix tasks for convenience: inside \texttt{lib/mix/tasks/} \item Config files: inside \texttt{config/} \item Tests: 98\% test coverage. Every feature are tested. \end{itemize} \end{frame} \end{document}
{ "alphanum_fraction": 0.6968052452, "author": null, "avg_line_length": 34.0493311037, "converted": null, "ext": "tex", "file": null, "hexsha": "f3272da705a15b0b4d68da331c1e72ce0e3917c1", "include": null, "lang": "TeX", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "8b6d79a4fa593f05933dfd01978d93b1c7862f3b", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "indocomsoft/elixir-workshop", "max_forks_repo_path": "elixir-workshop.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "8b6d79a4fa593f05933dfd01978d93b1c7862f3b", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "indocomsoft/elixir-workshop", "max_issues_repo_path": "elixir-workshop.tex", "max_line_length": 307, "max_stars_count": 3, "max_stars_repo_head_hexsha": "8b6d79a4fa593f05933dfd01978d93b1c7862f3b", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "indocomsoft/elixir-workshop", "max_stars_repo_path": "elixir-workshop.tex", "max_stars_repo_stars_event_max_datetime": "2020-08-06T09:44:19.000Z", "max_stars_repo_stars_event_min_datetime": "2019-05-10T05:03:25.000Z", "num_tokens": 12305, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 40723 }
import matplotlib.pyplot as plt import numpy as np import os def RAxisAngle(axis=[0, 1, 0], theta=0): """ Rotate an arbitrary point by an axis and an angle. """ cost = np.cos(theta) sint = np.sin(theta) return np.reshape( [ cost + axis[0] * axis[0] * (1 - cost), axis[0] * axis[1] * (1 - cost) - axis[2] * sint, axis[0] * axis[2] * (1 - cost) + axis[1] * sint, axis[1] * axis[0] * (1 - cost) + axis[2] * sint, cost + axis[1] * axis[1] * (1 - cost), axis[1] * axis[2] * (1 - cost) - axis[0] * sint, axis[2] * axis[0] * (1 - cost) - axis[1] * sint, axis[2] * axis[1] * (1 - cost) + axis[0] * sint, cost + axis[2] * axis[2] * (1 - cost), ], [3, 3], ) def get_ortho_latitude_lines(inc=np.pi / 2, obl=0, dlat=np.pi / 6, npts=1000): """ Return the lines of constant latitude on an orthographic projection. """ # Angular quantities ci = np.cos(inc) si = np.sin(inc) co = np.cos(obl) so = np.sin(obl) # Latitude lines res = [] latlines = np.arange(-np.pi / 2, np.pi / 2, dlat)[1:] for lat in latlines: # Figure out the equation of the ellipse y0 = np.sin(lat) * si a = np.cos(lat) b = a * ci x = np.linspace(-a, a, npts) y1 = y0 - b * np.sqrt(1 - (x / a) ** 2) y2 = y0 + b * np.sqrt(1 - (x / a) ** 2) # Mask lines on the backside if si != 0: if inc > np.pi / 2: ymax = y1[np.argmax(x ** 2 + y1 ** 2)] y1[y1 < ymax] = np.nan ymax = y2[np.argmax(x ** 2 + y2 ** 2)] y2[y2 < ymax] = np.nan else: ymax = y1[np.argmax(x ** 2 + y1 ** 2)] y1[y1 > ymax] = np.nan ymax = y2[np.argmax(x ** 2 + y2 ** 2)] y2[y2 > ymax] = np.nan # Rotate them for y in (y1, y2): xr = -x * co + y * so yr = x * so + y * co res.append((xr, yr)) return res def get_ortho_longitude_lines( inc=np.pi / 2, obl=0, theta=0, dlon=np.pi / 6, npts=1000 ): """ Return the lines of constant longitude on an orthographic projection. """ # Angular quantities ci = np.cos(inc) si = np.sin(inc) co = np.cos(obl) so = np.sin(obl) # Are we (essentially) equator-on? equator_on = (inc > 88 * np.pi / 180) and (inc < 92 * np.pi / 180) # Longitude grid lines res = [] if equator_on: offsets = np.arange(-np.pi / 2, np.pi / 2, dlon) else: offsets = np.arange(0, 2 * np.pi, dlon) for offset in offsets: # Super hacky, sorry. This can probably # be coded up more intelligently. if equator_on: sgns = [1] if np.cos(theta + offset) >= 0: bsgn = 1 else: bsgn = -1 else: bsgn = 1 if np.cos(theta + offset) >= 0: sgns = np.array([1, -1]) else: sgns = np.array([-1, 1]) for lon, sgn in zip([0, np.pi], sgns): # Viewed at i = 90 y = np.linspace(-1, 1, npts) b = bsgn * np.sin(lon - theta - offset) x = b * np.sqrt(1 - y ** 2) z = sgn * np.sqrt(np.abs(1 - x ** 2 - y ** 2)) if equator_on: pass else: # Rotate by the inclination R = RAxisAngle([1, 0, 0], np.pi / 2 - inc) v = np.vstack( (x.reshape(1, -1), y.reshape(1, -1), z.reshape(1, -1)) ) x, y, _ = np.dot(R, v) # Mask lines on the backside if si != 0: if inc < np.pi / 2: imax = np.argmax(x ** 2 + y ** 2) y[: imax + 1] = np.nan else: imax = np.argmax(x ** 2 + y ** 2) y[imax:] = np.nan # Rotate by the obliquity xr = -x * co + y * so yr = x * so + y * co res.append((xr, yr)) return res theta = 15 for inc in [30, 45, 60, 75, 85]: # Draw the wireframe fig, ax = plt.subplots(1, figsize=(2.5, 2.5)) x = np.linspace(-1, 1, 10000) y = np.sqrt(1 - x ** 2) ax.set_xlim(-1.5, 1.5) ax.set_ylim(-1.5, 1.5) ax.plot(x, y, "k-", lw=1, zorder=102) ax.plot(x, -y, "k-", lw=1, zorder=102) r = 1.035 x = np.linspace(-r, r, 10000) y = np.sqrt(r ** 2 - x ** 2) ax.plot(x, y, "w-", lw=1, zorder=103) ax.plot(x, -y, "w-", lw=1, zorder=103) lat_lines = get_ortho_latitude_lines(inc=inc * np.pi / 180) for n, l in enumerate(lat_lines): if n == 4: ax.plot(l[0], l[1], "k", lw=1, zorder=104) else: ax.plot(l[0], l[1], "#aaaaaa", lw=0.5, zorder=100) lon_lines = get_ortho_longitude_lines( inc=inc * np.pi / 180, theta=np.pi + theta * np.pi / 180 ) for n, l in enumerate(lon_lines): ax.plot(l[0], l[1], "#aaaaaa", lw=0.5, zorder=100) # Draw the axis if inc < 50: ymax = 2.25 / (0.5 + inc / 50) else: ymax = 1.5 y = np.linspace(-ymax, ymax, 1000) * np.sin(inc * np.pi / 180) ypole = np.sin(inc * np.pi / 180) y[(y < ypole) & (y > -1)] = np.nan ax.plot(np.zeros_like(y), y, "k-", lw=0.75, zorder=104) # Hack a circular arrow indicating the spin shrink = 0.4 offset = 0.45 x, y = lat_lines[8] y = offset + shrink * (y - ypole) + ypole x = shrink * x ax.plot(x, y, "r", zorder=105) x, y = lat_lines[9] y = offset + shrink * (y - ypole) + ypole x = shrink * x ax.plot(x[:100], y[:100], "r", zorder=105) ax.plot(x[-100:], y[-100:], "r", zorder=105) xa = x[100] ya = y[100] dx = 100 * (x[100] - x[99]) dy = 100 * (y[100] - y[99]) plt.arrow( xa, ya, dx, dy, length_includes_head=True, head_length=np.sqrt(dx ** 2 + dy ** 2), head_width=0.1, color="r", zorder=105, ) ax.axis("off") fig.savefig( os.path.abspath(__file__).replace(".py", "_{}.pdf".format(inc)), bbox_inches="tight", )
{ "alphanum_fraction": 0.4495844441, "author": null, "avg_line_length": 28.2168141593, "converted": null, "ext": "py", "file": null, "hexsha": "04438c3c7b892a322f0d2303370f2006520a71e9", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2021-01-25T16:26:15.000Z", "max_forks_repo_forks_event_min_datetime": "2021-01-21T15:41:58.000Z", "max_forks_repo_head_hexsha": "638e07e3264cfa0a1795e3eb34660e91117f8edd", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "rodluger/mapping_stellar_surfaces", "max_forks_repo_path": "paper1/figures/wireframe.py", "max_issues_count": 10, "max_issues_repo_head_hexsha": "638e07e3264cfa0a1795e3eb34660e91117f8edd", "max_issues_repo_issues_event_max_datetime": "2021-03-30T14:35:16.000Z", "max_issues_repo_issues_event_min_datetime": "2021-01-21T15:55:53.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "rodluger/mapping_stellar_surfaces", "max_issues_repo_path": "paper1/figures/wireframe.py", "max_line_length": 78, "max_stars_count": 10, "max_stars_repo_head_hexsha": "638e07e3264cfa0a1795e3eb34660e91117f8edd", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "rodluger/mapping_stellar_surfaces", "max_stars_repo_path": "paper1/figures/wireframe.py", "max_stars_repo_stars_event_max_datetime": "2021-12-19T17:49:28.000Z", "max_stars_repo_stars_event_min_datetime": "2021-01-21T17:03:26.000Z", "num_tokens": 2117, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 6377 }
import numpy as np def KnuthSampling(total, m, left = -1): ''' :param total: total :param m: sample :return: ''' res = [] n = total for i in range(total): if i != left: if np.random.random() < m / n: res.append(i) m -= 1 n -= 1 return res
{ "alphanum_fraction": 0.4417910448, "author": null, "avg_line_length": 19.7058823529, "converted": null, "ext": "py", "file": null, "hexsha": "762d264a66ce0a0ff7a79f41bb9d510df8615799", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "009bc8249d45d61a3d9f52146e447535005e79d2", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "shalei120/OxLegalReasoning", "max_forks_repo_path": "utils.py", "max_issues_count": 3, "max_issues_repo_head_hexsha": "009bc8249d45d61a3d9f52146e447535005e79d2", "max_issues_repo_issues_event_max_datetime": "2020-08-17T14:09:18.000Z", "max_issues_repo_issues_event_min_datetime": "2020-05-30T10:28:25.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "shalei120/OxLegalReasoning", "max_issues_repo_path": "utils.py", "max_line_length": 42, "max_stars_count": 4, "max_stars_repo_head_hexsha": "009bc8249d45d61a3d9f52146e447535005e79d2", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "shalei120/OxLegalReasoning", "max_stars_repo_path": "utils.py", "max_stars_repo_stars_event_max_datetime": "2021-08-04T11:17:32.000Z", "max_stars_repo_stars_event_min_datetime": "2020-07-09T04:11:05.000Z", "num_tokens": 94, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 335 }
Require Import Morphisms Coq.Floats.SpecFloat. From FloatCohorts Require Import Option Arith FloatPair Cohorts Tactics. Open Scope Z. Section natural_normalization. Fixpoint maximize_e' (m : positive) (e : Z) {struct m} : (positive * Z) := match m with | (m'~0)%positive => maximize_e' m' (e + 1) | _ => (m, e) end. (** maximize exponent [same as] minimize mantissa [same as] make mantissa odd **) Definition maximize_e (fp : float_pair) := let '(m, e) := (FPnum fp, FPexp fp) in let '(m', e') := maximize_e' m e in FPair m' e'. Lemma maximize_e_is_inc_e_by (fp : float_pair) : maximize_e fp = fp \/ exists de, inc_e_by fp de = Some (maximize_e fp). Proof. destruct fp as (m, e). generalize dependent e. induction m; auto. intros. right. specialize (IHm (e + 1)). destruct IHm. - exists 1%positive. cbn in *. congruence. - destruct H as [de H]. exists (de + 1)%positive. cbn in *. rewrite <-H. clear. unfold inc_e_by. rewrite Pos.iter_add. reflexivity. Qed. Lemma maximize_e_equiv (fp : float_pair) : maximize_e fp === fp. Proof. pose proof maximize_e_is_inc_e_by fp. destruct H. rewrite H; reflexivity. destruct H. enough (T : Some (maximize_e fp) === Some fp) by (inversion T; congruence). rewrite <-H. apply inc_e_by_equiv. rewrite H. constructor. Qed. Instance maximize_e_proper : Proper (equiv ==> equiv) maximize_e. Proof. intros fp1 fp2 EQ. repeat rewrite maximize_e_equiv. assumption. Qed. End natural_normalization. Section IEEE754_normalization. Variable prec emax : Z. Let emin := emin emax prec. Hypothesis prec_gt_0 : 0 < prec. Hypothesis Hmax : prec < emax. Definition normal_pair (fp : float_pair) := let '(m, e) := (FPnum fp, FPexp fp) in bounded prec emax m e. Lemma bounded_arithmetic (m : positive) (e : Z) : bounded prec emax m e = true <-> (Z.pos (Pos.size m) < prec /\ e = emin) \/ (Z.pos (Pos.size m) = prec /\ emin <= e <= emax - prec). Proof. intros. unfold bounded, canonical_mantissa, fexp, emin, SpecFloat.emin. destruct (Z_lt_le_dec (Z.pos (digits2_pos m) + e - prec) (3 - emax - prec)). all: try rewrite Z.max_r in * by lia. all: try rewrite Z.max_l in * by lia. all: split; intros. all: rewrite Bool.andb_true_iff in *. all: rewrite <-Zeq_is_eq_bool in *. all: replace (digits2_pos m) with (Pos.size m) in * by reflexivity. all: rewrite Z.leb_le in *. all: lia. Qed. (** ** normalization *) (* first try to satisfy the left side of [bounded_arithmetic], then try the right side if that doesn't work *) Definition normalize_pair (fp : float_pair) : option float_pair := match set_e fp emin with | None => None | Some f1 => if Z.pos (Pos.size (FPnum f1)) <=? prec then Some f1 else match set_digits_m fp (Z.to_pos prec) with | None => None | Some f2 => if andb (emin <=? FPexp f2) (FPexp f2 <=? emax - prec) then Some f2 else None end end. Lemma normalize_pair_equiv (fp : float_pair) : is_Some (normalize_pair fp) -> normalize_pair fp === Some fp. Proof. unfold normalize_pair. intros. repeat break_match; try (inversion H; fail). - rewrite <-Heqo. apply set_e_equiv. rewrite Heqo; constructor. - rewrite <-Heqo0. apply set_digits_m_equiv. rewrite Heqo0; constructor. Qed. Lemma normal_pair_unique (fp1 fp2 : float_pair) : normal_pair fp1 = true -> normal_pair fp2 = true -> fp1 === fp2 -> fp1 = fp2. Proof. intros. destruct fp1 as (m1, e1), fp2 as (m2, e2). unfold normal_pair in *. rewrite bounded_arithmetic in *. cbn in H, H0. destruct H as [[M1 E1] | [M1 E1]], H0 as [[M2 E2] | [M2 E2]]. - apply exponent_unique; try assumption. subst; reflexivity. - subst. apply equiv_neq_m_digits in H1; [| cbn; lia]. cbn in *; lia. - subst. symmetry in H1. apply equiv_neq_m_digits in H1; [| cbn; lia]. cbn in *; lia. - apply digits_m_unique; try assumption. cbn. congruence. Qed. Lemma normalize_pair_normal (fp : float_pair) : forall fp', normalize_pair fp = Some fp' -> normal_pair fp' = true. Proof. intros. unfold normal_pair. apply bounded_arithmetic. unfold normalize_pair in H. repeat break_match; inversion H; subst; clear H. - apply Z.leb_le in Heqb. apply set_e_res in Heqo. rewrite Heqo; clear Heqo. unfold emin, SpecFloat.emin. lia. - clear Heqo Heqb. apply andb_prop in Heqb0. destruct Heqb0. rewrite Z.leb_le in *. apply set_digits_m_res in Heqo0. unfold digits_m in Heqo0. rewrite Heqo0; clear Heqo0. unfold emin, SpecFloat.emin in *. rewrite Z2Pos.id by assumption. lia. Qed. Theorem normalize_pair_None_inv (fp : float_pair) : normalize_pair fp = None -> forall fp', fp' === fp -> normal_pair fp' = false. Proof. intros. destruct (normal_pair fp') eqn:NP'; [| reflexivity]. exfalso. unfold normal_pair in NP'. apply bounded_arithmetic in NP'. unfold normalize_pair in H. repeat break_match; inversion_clear H. - rewrite Bool.andb_false_iff in *. apply set_e_spec in Heqo; destruct Heqo. apply set_digits_m_spec in Heqo0; destruct Heqo0. (* cleanup *) replace (Pos.size (FPnum f)) with (digits_m f) in * by reflexivity. (* poor man's fold 1 *) replace (Pos.size (FPnum fp')) with (digits_m fp') in * by reflexivity. (* poor man's fold 2 *) rename f into f1, f0 into f2, fp' into f. assert (EQ1 : f === f1) by (rewrite H, H0; reflexivity). assert (EQ2 : f === f2) by (rewrite H0, H2; reflexivity). clear H H2 H0 fp. move f before f1; move f2 before f1. rename H1 into E1, Heqb into M1, H3 into M2. destruct Heqb0 as [E2 | E2]; rewrite Z.leb_gt in *. + destruct NP' as [[M E] | [M E]]. * clear - EQ1 M1 E1 M E. enough (FPexp f > FPexp f1) by lia. eapply equiv_neq_m_digits. assumption. lia. * clear - EQ2 M2 E2 M E. enough (digits_m f2 > digits_m f)%positive by (rewrite <-M, Pos2Z.id in M2; lia). eapply equiv_neq_e_digits. symmetry; assumption. lia. + destruct NP' as [[M E] | [M E]]. * clear - EQ1 M1 E1 M E. enough (FPexp f > FPexp f1) by lia. eapply equiv_neq_m_digits. assumption. lia. * clear - EQ2 M2 E2 M E. enough (digits_m f > digits_m f2)%positive by (rewrite <-M, Pos2Z.id in M2; lia). eapply equiv_neq_e_digits. assumption. lia. - rewrite Z.leb_gt in *. apply set_e_spec in Heqo; destruct Heqo. apply set_digits_m_None_inv with (fp':=fp') in Heqo0; [ |assumption]. unfold digits_m in *. destruct NP' as [[M E] | [M E]]. + replace fp' with f in *. lia. apply exponent_unique. lia. rewrite H, H0; reflexivity. + rewrite <-M in Heqo0. rewrite Pos2Z.id in Heqo0. lia. - apply set_e_None_inv with (fp':=fp') in Heqo; [lia | assumption]. Qed. Theorem normalize_pair_spec (fp nfp : float_pair) : normalize_pair fp = Some nfp <-> (fp === nfp /\ normal_pair nfp = true). Proof. split; intros. - split. + assert (SN : is_Some (normalize_pair fp)) by (rewrite H; constructor). apply normalize_pair_equiv in SN. rewrite H in SN. inversion SN. symmetry; assumption. + eapply normalize_pair_normal. eassumption. - destruct H as [EQ N]. destruct normalize_pair eqn:NP. + assert (SN : is_Some (normalize_pair fp)) by (rewrite NP; constructor). apply normalize_pair_equiv in SN. rewrite NP in SN. apply normalize_pair_normal in NP. f_equal. apply normal_pair_unique. assumption. assumption. rewrite <-EQ. inversion SN. assumption. + exfalso. apply normalize_pair_None_inv with (fp':=nfp) in NP. congruence. symmetry; assumption. Qed. End IEEE754_normalization.
{ "alphanum_fraction": null, "author": "zoickx", "avg_line_length": null, "converted": null, "ext": null, "file": null, "hexsha": null, "include": null, "lang": null, "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": null, "max_forks_repo_licenses": null, "max_forks_repo_name": null, "max_forks_repo_path": null, "max_issues_count": null, "max_issues_repo_head_hexsha": null, "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": null, "max_issues_repo_name": null, "max_issues_repo_path": null, "max_line_length": null, "max_stars_count": null, "max_stars_repo_head_hexsha": null, "max_stars_repo_licenses": null, "max_stars_repo_name": null, "max_stars_repo_path": null, "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": null, "path": "github-repos/coq/zoickx-float-cohorts/float-cohorts-524fe9ba2a992961c142eec6d2fd74e3869af4ce/Normalization.v", "reason": null, "repo": "float-cohorts", "save_path": "github-repos/coq/zoickx-float-cohorts", "sha": "524fe9ba2a992961c142eec6d2fd74e3869af4ce", "size": null }
from sentence_transformers import SentenceTransformer import numpy as np import re from sklearn.metrics.pairwise import cosine_similarity model = SentenceTransformer('roberta-large-nli-stsb-mean-tokens') def get_similarity(question, target, evall): question = re.sub('[\n\r]+', '', question) target = re.sub('[\n\r]+', '', target) evall = re.sub('[\n\r]+', '', evall) sentences = [question, target, evall] encoder = model.encode(sentences) bert_q_t = cosine_similarity(encoder[0].reshape(1, -1), encoder[1].reshape(1, -1)) bert_q_e = cosine_similarity(encoder[0].reshape(1, -1), encoder[2].reshape(1, -1)) bert_t_e = cosine_similarity(encoder[1].reshape(1, -1), encoder[2].reshape(1, -1)) print('====== ROBERTA_STS ======') print('Question x Target: {}'.format(bert_q_t)) print('Question x Eval: {}'.format(bert_q_e)) print('Target x Eval: {}'.format(bert_t_e)) return bert_q_t, bert_q_e, bert_t_e
{ "alphanum_fraction": 0.6729362591, "author": null, "avg_line_length": 35.4444444444, "converted": null, "ext": "py", "file": null, "hexsha": "a3f327ef31c78dd5e396a91a1eb099e67db5b006", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2020-11-20T07:07:24.000Z", "max_forks_repo_forks_event_min_datetime": "2020-11-15T02:39:44.000Z", "max_forks_repo_head_hexsha": "e0241e1ded5ef868633f6b761581a1b45a6e1b9f", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "renatoviolin/autograde-deeplearning", "max_forks_repo_path": "src/roberta_sts.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "e0241e1ded5ef868633f6b761581a1b45a6e1b9f", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "renatoviolin/autograde-deeplearning", "max_issues_repo_path": "src/roberta_sts.py", "max_line_length": 86, "max_stars_count": 6, "max_stars_repo_head_hexsha": "e0241e1ded5ef868633f6b761581a1b45a6e1b9f", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "renatoviolin/autograde-deeplearning", "max_stars_repo_path": "src/roberta_sts.py", "max_stars_repo_stars_event_max_datetime": "2022-03-23T08:20:11.000Z", "max_stars_repo_stars_event_min_datetime": "2020-11-14T15:50:34.000Z", "num_tokens": 269, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 957 }
module WaveformCommunications using QuadGK, Statistics export cosinepulse, halfsinepulse, rcpulse, srrcpulse, gaussianpulse, Constellation, pam, qam, psk, Pulse, pulseshaper, eyediag include("pulses.jl") include("constellations.jl") include("utils.jl") """ pulseshaper(c, pulse, nsyms = 20) Return a function of time that generates a pulse-shaped waveform. Symbols are drawn at random from constellation `c`. The pulse shape is specified by `pulse`. The waveform starts at time 0 and ends after `nsyms` symbols have been generated. """ function pulseshaper(c::Constellation, pulse::Pulse, nsyms = 20) syms = rand(c, nsyms) return t -> begin s = zero(ComplexF64) for k = 1:nsyms s += syms[k].*pulse.(t.-(k-1)*pulse.Tp) end return s end end end
{ "alphanum_fraction": 0.6816524909, "author": null, "avg_line_length": 25.71875, "converted": null, "ext": "jl", "file": null, "hexsha": "a66c17055c4b03bbb80d0da53b05c00948891297", "include": null, "lang": "Julia", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "5f5a66d679dfae1e9b97c08e22e6efaa57938d1e", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "mbaz/WaveformCommunications.jl", "max_forks_repo_path": "src/WaveformCommunications.jl", "max_issues_count": null, "max_issues_repo_head_hexsha": "5f5a66d679dfae1e9b97c08e22e6efaa57938d1e", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "mbaz/WaveformCommunications.jl", "max_issues_repo_path": "src/WaveformCommunications.jl", "max_line_length": 90, "max_stars_count": 1, "max_stars_repo_head_hexsha": "5f5a66d679dfae1e9b97c08e22e6efaa57938d1e", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "mbaz/WaveformCommunications.jl", "max_stars_repo_path": "src/WaveformCommunications.jl", "max_stars_repo_stars_event_max_datetime": "2021-12-25T13:03:43.000Z", "max_stars_repo_stars_event_min_datetime": "2021-12-25T13:03:43.000Z", "num_tokens": 234, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 823 }
import pickle import cv2 import numpy as np from src.utils.image import crop_person_img def load_gei(datapath, dim=None, crop_person=False, flatten=True): with open(datapath, 'rb') as f: data = pickle.load(f) X = [data[idx]['sample'].astype('float64') for idx in range(len(data))] if crop_person: X = [crop_person_img(x).astype('float64') for x in X] if dim is not None: X = [cv2.resize(x, dim, interpolation=cv2.INTER_CUBIC).astype('float64') for x in X] if flatten: X = [x.flatten().astype('float64') for x in X] X = np.array(X) y = np.array([data[idx]['label'] for idx in range(len(data))]) - 1 return X, y
{ "alphanum_fraction": 0.6291970803, "author": null, "avg_line_length": 23.6206896552, "converted": null, "ext": "py", "file": null, "hexsha": "d195c9d4b49a0663b3f6dba0492c5341b89ef587", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2022-01-17T20:18:36.000Z", "max_forks_repo_forks_event_min_datetime": "2021-12-29T16:33:58.000Z", "max_forks_repo_head_hexsha": "b293fc8f3d2a7c1a8d584ce7fc7a66d863b2ce6c", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "wesleylp/libras", "max_forks_repo_path": "src/utils/feats.py", "max_issues_count": 1, "max_issues_repo_head_hexsha": "b293fc8f3d2a7c1a8d584ce7fc7a66d863b2ce6c", "max_issues_repo_issues_event_max_datetime": "2021-12-22T13:02:25.000Z", "max_issues_repo_issues_event_min_datetime": "2021-12-12T08:10:23.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "wesleylp/libras", "max_issues_repo_path": "src/utils/feats.py", "max_line_length": 92, "max_stars_count": 8, "max_stars_repo_head_hexsha": "b293fc8f3d2a7c1a8d584ce7fc7a66d863b2ce6c", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "wesleylp/libras", "max_stars_repo_path": "src/utils/feats.py", "max_stars_repo_stars_event_max_datetime": "2022-01-03T00:47:01.000Z", "max_stars_repo_stars_event_min_datetime": "2021-04-16T12:45:43.000Z", "num_tokens": 198, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 685 }
import re from typing import Dict, List, Optional, Tuple import numpy import pandas from nonbonded.library.models.datasets import DataSet, DataSetEntry from nonbonded.library.models.projects import Benchmark from nonbonded.library.models.results import BenchmarkResult, TargetResultType from nonbonded.library.models.targets import ( EvaluatorTarget, OptimizationTarget, RechargeTarget, ) from nonbonded.library.statistics.statistics import StatisticType def property_type_to_title(property_type: str, n_components: int): try: from openff.evaluator import unit except ImportError: unit = None abbreviations = { "Density": r"\rho", "DielectricConstant": r"\epsilon", "EnthalpyOfMixing": r"H_{mix}", "EnthalpyOfVaporization": r"H_{vap}", "ExcessMolarVolume": r"V_{ex}", "SolvationFreeEnergy": r"G_{solv}", } unit_string = DataSetEntry.default_units()[property_type] if unit is not None: property_unit = unit.Unit(unit_string) unit_string = ( "" if property_unit == unit.dimensionless else f" ({property_unit:~P})" ) abbreviation = abbreviations.get(property_type, property_type) if "FreeEnergy" not in property_type and n_components > 1: abbreviation = f"{abbreviation} (x)" return f"${abbreviation}$ {unit_string}" def format_category(category: Optional[str]) -> str: """Formats a category ready for plotting.""" if category is None: category = "Other" category = re.sub("Carboxylic Acid Ester", "Ester", category) category = re.sub("Carboxylic Acid", "Acid", category) return category def combine_data_set_results( data_sets: List[DataSet], benchmarks: List[Benchmark], benchmark_results: List[BenchmarkResult], ) -> pandas.DataFrame: """Combines a set of benchmarked results with their corresponding reference data set values into a pandas data frame which can be readily plotted. Parameters ---------- data_sets The data sets which contain the reference data points. benchmarks The benchmarks associated with each result. benchmark_results The results to map. Returns ------- A pandas data frames containing the estimated and reference values for each set of benchmark results. The data frame has columns: * "Benchmark Id": The benchmark name. * "Property Type": The type of physical property. * "Property Title": A friendly title for the property type. * "Estimated Value": The benchmarked value. * "Estimated Std": The uncertainty in the benchmarked value. * "Reference Value": The reference value. * "Reference Std": The uncertainty in the reference value. * "Category": The category assigned to the data point. """ reference_data_points: Dict[int, DataSetEntry] = { entry.id: entry for data_set in data_sets for entry in data_set.entries } # Re-shape the data into a pandas data frame for easier plotting. data_rows = [] for benchmark, benchmark_result in zip(benchmarks, benchmark_results): for result_entry in benchmark_result.data_set_result.result_entries: reference_data_point = reference_data_points[result_entry.reference_id] reference_value = reference_data_point.value reference_std = reference_data_point.std_error estimated_value = result_entry.estimated_value estimated_std = result_entry.estimated_std_error # For now trim down the number of different categories and # shorten certain category names. for category in ( [None] if len(result_entry.categories) == 0 else result_entry.categories ): category = re.sub("[<>~]", "+", format_category(category)) property_type = ( f"{reference_data_point.property_type}-" f"{len(reference_data_point.components)}" ) # Generate a meaningful title for the plot. property_title = property_type_to_title( reference_data_point.property_type, len(reference_data_point.components), ) data_row = { "Benchmark Id": benchmark.id, "Benchmark Name": benchmark.name, "Property Type": property_type, "Property Title": property_title, "Estimated Value": estimated_value, "Estimated Std": estimated_std, "Reference Value": reference_value, "Reference Std": reference_std, "Category": category, } data_rows.append(data_row) return pandas.DataFrame(data_rows) def combine_target_rmse( targets: List[OptimizationTarget], target_results: List[TargetResultType], target_labels: List[str], ): """Combines the RMSE information from multiple target results into a single, easily plottable, pandas data frame. Parameters ---------- targets The targets which the results were collected for. target_results The target results to combine. target_labels The labels associated with each target result. Returns ------- A pandas data frames containing the combined RMSE values. The data frame has columns: * "Label": The label associated with the parent result target. * "Data Type": The data type associated with a given RMSE. For evaluator target results this will be a combination of the property type and the number of components. For recharge targets this will be the targeted electronic property. * "Value": The value of the RMSE. * "Lower CI": The lower 95% confidence interval. * "Upper CI": The upper 95% confidence interval. * "Category": The category associated with the RMSE. """ def statistic_to_key(target, statistic): if isinstance(target, EvaluatorTarget): return f"{statistic.property_type}-{statistic.n_components}" elif isinstance(target, RechargeTarget): return target.property # Gather the statistics statistics_per_label = { label: { (statistic_to_key(target, statistic), statistic.category): statistic for statistic in target_result.statistic_entries if statistic.category is not None and statistic.statistic_type == StatisticType.RMSE } for label, target, target_result in zip(target_labels, targets, target_results) } # Reshape the statistics into a uniform data frame. data_rows = [] for label, statistics in statistics_per_label.items(): for statistic_key, statistic in statistics.items(): data_type, category = statistic_key data_row = { "Label": label, "Data Type": data_type, "Value": statistic.value, "Lower CI": numpy.abs(statistic.lower_95_ci - statistic.value), "Upper CI": numpy.abs(statistic.upper_95_ci - statistic.value), "Category": format_category(category), } data_rows.append(data_row) return pandas.DataFrame(data_rows) def sort_categories_key(category: str) -> Tuple[str, Optional[str], Optional[int]]: """A function which may be used as the key when sorting a list of categories. This function assumes categories are based on chemical environments and compositions (up to a maximum of two components). Parameters ---------- category The category to map to a sortable key. Returns ------- A tuple containing at least the primary category key. For categories encoding a binary mixture, the tuple also the category key of the second component and an integer describing the type of composition (i.e. less than, equal or greater than). """ splitter = ( "<" if "<" in category else "~" if "~" in category else ">" if ">" in category else None ) if splitter is None: return category, None, None splitter_ordering = {"<": 0, "~": 1, ">": 2} split_string = category.split(splitter) return ( split_string[0].strip(), split_string[1].strip(), splitter_ordering[splitter], )
{ "alphanum_fraction": 0.6299645187, "author": null, "avg_line_length": 33.0946969697, "converted": null, "ext": "py", "file": null, "hexsha": "480a7dae887e35a7bee5b1ffd86752a8dc2c75da", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "3efbb7d943d936b47248975f9ad0d8a006ea8684", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "SimonBoothroyd/nonbonded", "max_forks_repo_path": "nonbonded/library/plotting/utilities.py", "max_issues_count": 88, "max_issues_repo_head_hexsha": "3efbb7d943d936b47248975f9ad0d8a006ea8684", "max_issues_repo_issues_event_max_datetime": "2022-03-02T09:20:39.000Z", "max_issues_repo_issues_event_min_datetime": "2020-06-02T14:40:05.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "SimonBoothroyd/nonbonded", "max_issues_repo_path": "nonbonded/library/plotting/utilities.py", "max_line_length": 88, "max_stars_count": 5, "max_stars_repo_head_hexsha": "3efbb7d943d936b47248975f9ad0d8a006ea8684", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "SimonBoothroyd/nonbonded", "max_stars_repo_path": "nonbonded/library/plotting/utilities.py", "max_stars_repo_stars_event_max_datetime": "2022-01-27T10:55:09.000Z", "max_stars_repo_stars_event_min_datetime": "2020-05-11T18:25:00.000Z", "num_tokens": 1734, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 8737 }
from matplotlib import pyplot from ContrastBrightness import check import unittest import cv2 import sys sys.path.insert(0, '../login_signup') from detectors import age_model, gender_model, face_detect, crop from mtcnn.mtcnn import MTCNN import tensorflow as tf import numpy as np class Test(unittest.TestCase): def test1(self): image1 = cv2.imread("../IMAGE/1.jpg") self.assertEqual(check(image1),"High brightness and Low contrast") def test2(self): image2 = cv2.imread("../IMAGE/2.jpg") self.assertEqual(check(image2),"Low brightness and High contrast") def test3(self): image3 = cv2.imread("../IMAGE/3.jpg") self.assertEqual(check(image3),"Low brightness and High contrast") def test4(self): image4 = cv2.imread("../IMAGE/4.jpg") self.assertEqual(check(image4),"High brightness and High contrast") def test5(self): image5 = cv2.imread("../IMAGE/5.jpg") self.assertEqual(check(image5),"High brightness and Low contrast") def test6(self): image6 = cv2.imread("../IMAGE/6.jpg") self.assertEqual(check(image6),"Low brightness and Low contrast") def test7(self): image7 = cv2.imread("../IMAGE/7.jpg") self.assertEqual(check(image7),"Low brightness and High contrast") def test8(self): image8 = cv2.imread("../IMAGE/8.jpg") self.assertEqual(check(image8),"Low brightness and Low contrast") def test9(self): image9 = cv2.imread("../IMAGE/9.jpg") self.assertEqual(check(image9),"High brightness and High contrast") def test10(self): image10 = cv2.imread("../IMAGE/10.jpg") self.assertEqual(check(image10),"High brightness and High contrast") class MLTest(unittest.TestCase): def setUp(self): self.model, self.prediction_age = age_model() self.model_gender = gender_model() self.images = ['../IMAGE/image11.jpeg', '../IMAGE/image12.jpeg', '../IMAGE/image13.jpeg'] self.age = ['0 - 5', '21 - 30', '31 - 40'] self.gender = ['Male', 'Female', 'Female'] self.detector = MTCNN() def test_age(self): for i in range(len(self.images)): with self.subTest(i = i): b = [] img = pyplot.imread(self.images[i]) faces = self.detector.detect_faces(img) face_detect(self.images[i], faces, b) crop(b, self.images[i]) test_image = tf.expand_dims(tf.io.decode_image(tf.io.read_file("IM/resized_image.jpg"), dtype=tf.float32), axis=0) prediction = self.prediction_age[np.argmax(self.model.predict(test_image)[0])] self.assertEqual(prediction, self.age[i]) def test_gender(self): for i in range(len(self.images)): with self.subTest(i = i): b = [] img = pyplot.imread(self.images[i]) faces = self.detector.detect_faces(img) face_detect(self.images[i], faces, b) crop(b, self.images[i]) test_image = tf.expand_dims(tf.io.decode_image(tf.io.read_file("IM/resized_image.jpg"), dtype=tf.float32), axis=0) prediction = np.argmax(self.model.predict(test_image)[0]) if prediction == 0: prediction = 'Male' else: prediction = 'Female' self.assertEqual(prediction, self.gender[i]) # if __name__ == '__main__': # unittest.main()
{ "alphanum_fraction": 0.6054421769, "author": null, "avg_line_length": 42, "converted": null, "ext": "py", "file": null, "hexsha": "9b256c64b0828d2a605695ca9ce62ad8f6244846", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "f2c7f57e8ae127d0deedfdca1a90e75d3b20fcc0", "max_forks_repo_licenses": [ "Unlicense" ], "max_forks_repo_name": "darshmgandhi/agender", "max_forks_repo_path": "ui/agender_ui/Pybuilder/src/unittest/python/UnitTest_tests.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "f2c7f57e8ae127d0deedfdca1a90e75d3b20fcc0", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Unlicense" ], "max_issues_repo_name": "darshmgandhi/agender", "max_issues_repo_path": "ui/agender_ui/Pybuilder/src/unittest/python/UnitTest_tests.py", "max_line_length": 130, "max_stars_count": null, "max_stars_repo_head_hexsha": "f2c7f57e8ae127d0deedfdca1a90e75d3b20fcc0", "max_stars_repo_licenses": [ "Unlicense" ], "max_stars_repo_name": "darshmgandhi/agender", "max_stars_repo_path": "ui/agender_ui/Pybuilder/src/unittest/python/UnitTest_tests.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 768, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 3528 }
@testset "4.5.1.4 (d tan)^n (a+b sec)^m" begin (a, b, c, d, e, f, m, n, x, ) = @variables a b c d e f m n x #= ::Package:: =# #= ::Title:: =# #=Integrands*of*the*form*(d*tan(e+f*x))^n*(a+b*sec(e+f*x))^m=# #= ::Section::Closed:: =# #=Integrands*of*the*form*(d*tan(e+f*x))^n*(a+a*sec(e+f*x))^m=# #= ::Subsection::Closed:: =# #=Integrands*of*the*form*tan(e+f*x)^n*(a+a*sec(e+f*x))^m=# #= ::Subsubsection::Closed:: =# #=n>0=# @test_int [tan(c + d*x)^9*(a + a*sec(c + d*x)), x, 3, -((a*log(cos(c + d*x)))/d) + (a*sec(c + d*x))/d - (2*a*sec(c + d*x)^2)/d - (4*a*sec(c + d*x)^3)/(3*d) + (3*a*sec(c + d*x)^4)/(2*d) + (6*a*sec(c + d*x)^5)/(5*d) - (2*a*sec(c + d*x)^6)/(3*d) - (4*a*sec(c + d*x)^7)/(7*d) + (a*sec(c + d*x)^8)/(8*d) + (a*sec(c + d*x)^9)/(9*d)] @test_int [tan(c + d*x)^7*(a + a*sec(c + d*x)), x, 3, (a*log(cos(c + d*x)))/d - (a*sec(c + d*x))/d + (3*a*sec(c + d*x)^2)/(2*d) + (a*sec(c + d*x)^3)/d - (3*a*sec(c + d*x)^4)/(4*d) - (3*a*sec(c + d*x)^5)/(5*d) + (a*sec(c + d*x)^6)/(6*d) + (a*sec(c + d*x)^7)/(7*d)] @test_int [tan(c + d*x)^5*(a + a*sec(c + d*x)), x, 3, -((a*log(cos(c + d*x)))/d) + (a*sec(c + d*x))/d - (a*sec(c + d*x)^2)/d - (2*a*sec(c + d*x)^3)/(3*d) + (a*sec(c + d*x)^4)/(4*d) + (a*sec(c + d*x)^5)/(5*d)] @test_int [tan(c + d*x)^3*(a + a*sec(c + d*x)), x, 3, (a*log(cos(c + d*x)))/d - (a*sec(c + d*x))/d + (a*sec(c + d*x)^2)/(2*d) + (a*sec(c + d*x)^3)/(3*d)] @test_int [tan(c + d*x)^1*(a + a*sec(c + d*x)), x, 3, -((a*log(cos(c + d*x)))/d) + (a*sec(c + d*x))/d] @test_int [cot(c + d*x)^1*(a + a*sec(c + d*x)), x, 2, (a*log(1 - cos(c + d*x)))/d] @test_int [cot(c + d*x)^3*(a + a*sec(c + d*x)), x, 3, -(a/(2*d*(1 - cos(c + d*x)))) - (3*a*log(1 - cos(c + d*x)))/(4*d) - (a*log(1 + cos(c + d*x)))/(4*d)] @test_int [cot(c + d*x)^5*(a + a*sec(c + d*x)), x, 3, -(a/(8*d*(1 - cos(c + d*x))^2)) + (3*a)/(4*d*(1 - cos(c + d*x))) + a/(8*d*(1 + cos(c + d*x))) + (11*a*log(1 - cos(c + d*x)))/(16*d) + (5*a*log(1 + cos(c + d*x)))/(16*d)] @test_int [cot(c + d*x)^7*(a + a*sec(c + d*x)), x, 3, -(a/(24*d*(1 - cos(c + d*x))^3)) + (9*a)/(32*d*(1 - cos(c + d*x))^2) - (15*a)/(16*d*(1 - cos(c + d*x))) + a/(32*d*(1 + cos(c + d*x))^2) - a/(4*d*(1 + cos(c + d*x))) - (21*a*log(1 - cos(c + d*x)))/(32*d) - (11*a*log(1 + cos(c + d*x)))/(32*d)] @test_int [tan(c + d*x)^8*(a + a*sec(c + d*x)), x, 6, a*x + (35*a*atanh(sin(c + d*x)))/(128*d) - ((128*a + 35*a*sec(c + d*x))*tan(c + d*x))/(128*d) + ((64*a + 35*a*sec(c + d*x))*tan(c + d*x)^3)/(192*d) - ((48*a + 35*a*sec(c + d*x))*tan(c + d*x)^5)/(240*d) + ((8*a + 7*a*sec(c + d*x))*tan(c + d*x)^7)/(56*d)] @test_int [tan(c + d*x)^6*(a + a*sec(c + d*x)), x, 5, (-a)*x - (5*a*atanh(sin(c + d*x)))/(16*d) + ((16*a + 5*a*sec(c + d*x))*tan(c + d*x))/(16*d) - ((8*a + 5*a*sec(c + d*x))*tan(c + d*x)^3)/(24*d) + ((6*a + 5*a*sec(c + d*x))*tan(c + d*x)^5)/(30*d)] @test_int [tan(c + d*x)^4*(a + a*sec(c + d*x)), x, 4, a*x + (3*a*atanh(sin(c + d*x)))/(8*d) - ((8*a + 3*a*sec(c + d*x))*tan(c + d*x))/(8*d) + ((4*a + 3*a*sec(c + d*x))*tan(c + d*x)^3)/(12*d)] @test_int [tan(c + d*x)^2*(a + a*sec(c + d*x)), x, 3, (-a)*x - (a*atanh(sin(c + d*x)))/(2*d) + ((2*a + a*sec(c + d*x))*tan(c + d*x))/(2*d)] @test_int [cot(c + d*x)^2*(a + a*sec(c + d*x)), x, 2, (-a)*x - (cot(c + d*x)*(a + a*sec(c + d*x)))/d] @test_int [cot(c + d*x)^4*(a + a*sec(c + d*x)), x, 3, a*x - (cot(c + d*x)^3*(a + a*sec(c + d*x)))/(3*d) + (cot(c + d*x)*(3*a + 2*a*sec(c + d*x)))/(3*d)] @test_int [cot(c + d*x)^6*(a + a*sec(c + d*x)), x, 4, (-a)*x - (cot(c + d*x)^5*(a + a*sec(c + d*x)))/(5*d) + (cot(c + d*x)^3*(5*a + 4*a*sec(c + d*x)))/(15*d) - (cot(c + d*x)*(15*a + 8*a*sec(c + d*x)))/(15*d)] @test_int [cot(c + d*x)^8*(a + a*sec(c + d*x)), x, 5, a*x - (cot(c + d*x)^7*(a + a*sec(c + d*x)))/(7*d) + (cot(c + d*x)^5*(7*a + 6*a*sec(c + d*x)))/(35*d) + (cot(c + d*x)*(35*a + 16*a*sec(c + d*x)))/(35*d) - (cot(c + d*x)^3*(35*a + 24*a*sec(c + d*x)))/(105*d)] @test_int [cot(c + d*x)^10*(a + a*sec(c + d*x)), x, 6, (-a)*x - (cot(c + d*x)^9*(a + a*sec(c + d*x)))/(9*d) + (cot(c + d*x)^7*(9*a + 8*a*sec(c + d*x)))/(63*d) - (cot(c + d*x)^5*(21*a + 16*a*sec(c + d*x)))/(105*d) + (cot(c + d*x)^3*(105*a + 64*a*sec(c + d*x)))/(315*d) - (cot(c + d*x)*(315*a + 128*a*sec(c + d*x)))/(315*d)] @test_int [tan(c + d*x)^9*(a + a*sec(c + d*x))^2, x, 3, -((a^2*log(cos(c + d*x)))/d) + (2*a^2*sec(c + d*x))/d - (3*a^2*sec(c + d*x)^2)/(2*d) - (8*a^2*sec(c + d*x)^3)/(3*d) + (a^2*sec(c + d*x)^4)/(2*d) + (12*a^2*sec(c + d*x)^5)/(5*d) + (a^2*sec(c + d*x)^6)/(3*d) - (8*a^2*sec(c + d*x)^7)/(7*d) - (3*a^2*sec(c + d*x)^8)/(8*d) + (2*a^2*sec(c + d*x)^9)/(9*d) + (a^2*sec(c + d*x)^10)/(10*d)] @test_int [tan(c + d*x)^7*(a + a*sec(c + d*x))^2, x, 3, (a^2*log(cos(c + d*x)))/d - (2*a^2*sec(c + d*x))/d + (a^2*sec(c + d*x)^2)/d + (2*a^2*sec(c + d*x)^3)/d - (6*a^2*sec(c + d*x)^5)/(5*d) - (a^2*sec(c + d*x)^6)/(3*d) + (2*a^2*sec(c + d*x)^7)/(7*d) + (a^2*sec(c + d*x)^8)/(8*d)] @test_int [tan(c + d*x)^5*(a + a*sec(c + d*x))^2, x, 3, -((a^2*log(cos(c + d*x)))/d) + (2*a^2*sec(c + d*x))/d - (a^2*sec(c + d*x)^2)/(2*d) - (4*a^2*sec(c + d*x)^3)/(3*d) - (a^2*sec(c + d*x)^4)/(4*d) + (2*a^2*sec(c + d*x)^5)/(5*d) + (a^2*sec(c + d*x)^6)/(6*d)] @test_int [tan(c + d*x)^3*(a + a*sec(c + d*x))^2, x, 3, (a^2*log(cos(c + d*x)))/d - (2*a^2*sec(c + d*x))/d + (2*a^2*sec(c + d*x)^3)/(3*d) + (a^2*sec(c + d*x)^4)/(4*d)] @test_int [tan(c + d*x)^1*(a + a*sec(c + d*x))^2, x, 3, -((a^2*log(cos(c + d*x)))/d) + (2*a^2*sec(c + d*x))/d + (a^2*sec(c + d*x)^2)/(2*d)] @test_int [cot(c + d*x)^1*(a + a*sec(c + d*x))^2, x, 3, (2*a^2*log(1 - cos(c + d*x)))/d - (a^2*log(cos(c + d*x)))/d] @test_int [cot(c + d*x)^3*(a + a*sec(c + d*x))^2, x, 3, -(a^2/(d*(1 - cos(c + d*x)))) - (a^2*log(1 - cos(c + d*x)))/d] @test_int [cot(c + d*x)^5*(a + a*sec(c + d*x))^2, x, 3, -(a^2/(4*d*(1 - cos(c + d*x))^2)) + (5*a^2)/(4*d*(1 - cos(c + d*x))) + (7*a^2*log(1 - cos(c + d*x)))/(8*d) + (a^2*log(1 + cos(c + d*x)))/(8*d)] @test_int [cot(c + d*x)^7*(a + a*sec(c + d*x))^2, x, 3, -(a^2/(12*d*(1 - cos(c + d*x))^3)) + a^2/(2*d*(1 - cos(c + d*x))^2) - (23*a^2)/(16*d*(1 - cos(c + d*x))) - a^2/(16*d*(1 + cos(c + d*x))) - (13*a^2*log(1 - cos(c + d*x)))/(16*d) - (3*a^2*log(1 + cos(c + d*x)))/(16*d)] @test_int [cot(c + d*x)^9*(a + a*sec(c + d*x))^2, x, 3, -(a^2/(32*d*(1 - cos(c + d*x))^4)) + (11*a^2)/(48*d*(1 - cos(c + d*x))^3) - (3*a^2)/(4*d*(1 - cos(c + d*x))^2) + (51*a^2)/(32*d*(1 - cos(c + d*x))) - a^2/(64*d*(1 + cos(c + d*x))^2) + (9*a^2)/(64*d*(1 + cos(c + d*x))) + (99*a^2*log(1 - cos(c + d*x)))/(128*d) + (29*a^2*log(1 + cos(c + d*x)))/(128*d)] @test_int [tan(c + d*x)^6*(a + a*sec(c + d*x))^2, x, 12, (-a^2)*x - (5*a^2*atanh(sin(c + d*x)))/(8*d) + (a^2*tan(c + d*x))/d + (5*a^2*sec(c + d*x)*tan(c + d*x))/(8*d) - (a^2*tan(c + d*x)^3)/(3*d) - (5*a^2*sec(c + d*x)*tan(c + d*x)^3)/(12*d) + (a^2*tan(c + d*x)^5)/(5*d) + (a^2*sec(c + d*x)*tan(c + d*x)^5)/(3*d) + (a^2*tan(c + d*x)^7)/(7*d)] @test_int [tan(c + d*x)^4*(a + a*sec(c + d*x))^2, x, 10, a^2*x + (3*a^2*atanh(sin(c + d*x)))/(4*d) - (a^2*tan(c + d*x))/d - (3*a^2*sec(c + d*x)*tan(c + d*x))/(4*d) + (a^2*tan(c + d*x)^3)/(3*d) + (a^2*sec(c + d*x)*tan(c + d*x)^3)/(2*d) + (a^2*tan(c + d*x)^5)/(5*d)] @test_int [tan(c + d*x)^2*(a + a*sec(c + d*x))^2, x, 8, (-a^2)*x - (a^2*atanh(sin(c + d*x)))/d + (a^2*tan(c + d*x))/d + (a^2*sec(c + d*x)*tan(c + d*x))/d + (a^2*tan(c + d*x)^3)/(3*d)] @test_int [cot(c + d*x)^2*(a + a*sec(c + d*x))^2, x, 8, (-a^2)*x - (2*a^2*cot(c + d*x))/d - (2*a^2*csc(c + d*x))/d] @test_int [cot(c + d*x)^4*(a + a*sec(c + d*x))^2, x, 9, a^2*x + (a^2*cot(c + d*x))/d - (2*a^2*cot(c + d*x)^3)/(3*d) + (2*a^2*csc(c + d*x))/d - (2*a^2*csc(c + d*x)^3)/(3*d)] @test_int [cot(c + d*x)^6*(a + a*sec(c + d*x))^2, x, 11, (-a^2)*x - (a^2*cot(c + d*x))/d + (a^2*cot(c + d*x)^3)/(3*d) - (2*a^2*cot(c + d*x)^5)/(5*d) - (2*a^2*csc(c + d*x))/d + (4*a^2*csc(c + d*x)^3)/(3*d) - (2*a^2*csc(c + d*x)^5)/(5*d)] @test_int [cot(c + d*x)^8*(a + a*sec(c + d*x))^2, x, 12, a^2*x + (a^2*cot(c + d*x))/d - (a^2*cot(c + d*x)^3)/(3*d) + (a^2*cot(c + d*x)^5)/(5*d) - (2*a^2*cot(c + d*x)^7)/(7*d) + (2*a^2*csc(c + d*x))/d - (2*a^2*csc(c + d*x)^3)/d + (6*a^2*csc(c + d*x)^5)/(5*d) - (2*a^2*csc(c + d*x)^7)/(7*d)] @test_int [cot(c + d*x)^10*(a + a*sec(c + d*x))^2, x, 13, (-a^2)*x - (a^2*cot(c + d*x))/d + (a^2*cot(c + d*x)^3)/(3*d) - (a^2*cot(c + d*x)^5)/(5*d) + (a^2*cot(c + d*x)^7)/(7*d) - (2*a^2*cot(c + d*x)^9)/(9*d) - (2*a^2*csc(c + d*x))/d + (8*a^2*csc(c + d*x)^3)/(3*d) - (12*a^2*csc(c + d*x)^5)/(5*d) + (8*a^2*csc(c + d*x)^7)/(7*d) - (2*a^2*csc(c + d*x)^9)/(9*d)] @test_int [tan(c + d*x)^9*(a + a*sec(c + d*x))^3, x, 3, -((a^3*log(cos(c + d*x)))/d) + (3*a^3*sec(c + d*x))/d - (a^3*sec(c + d*x)^2)/(2*d) - (11*a^3*sec(c + d*x)^3)/(3*d) - (3*a^3*sec(c + d*x)^4)/(2*d) + (14*a^3*sec(c + d*x)^5)/(5*d) + (7*a^3*sec(c + d*x)^6)/(3*d) - (6*a^3*sec(c + d*x)^7)/(7*d) - (11*a^3*sec(c + d*x)^8)/(8*d) - (a^3*sec(c + d*x)^9)/(9*d) + (3*a^3*sec(c + d*x)^10)/(10*d) + (a^3*sec(c + d*x)^11)/(11*d)] @test_int [tan(c + d*x)^7*(a + a*sec(c + d*x))^3, x, 3, (a^3*log(cos(c + d*x)))/d - (3*a^3*sec(c + d*x))/d + (8*a^3*sec(c + d*x)^3)/(3*d) + (3*a^3*sec(c + d*x)^4)/(2*d) - (6*a^3*sec(c + d*x)^5)/(5*d) - (4*a^3*sec(c + d*x)^6)/(3*d) + (3*a^3*sec(c + d*x)^8)/(8*d) + (a^3*sec(c + d*x)^9)/(9*d)] @test_int [tan(c + d*x)^5*(a + a*sec(c + d*x))^3, x, 3, -((a^3*log(cos(c + d*x)))/d) + (3*a^3*sec(c + d*x))/d + (a^3*sec(c + d*x)^2)/(2*d) - (5*a^3*sec(c + d*x)^3)/(3*d) - (5*a^3*sec(c + d*x)^4)/(4*d) + (a^3*sec(c + d*x)^5)/(5*d) + (a^3*sec(c + d*x)^6)/(2*d) + (a^3*sec(c + d*x)^7)/(7*d)] @test_int [tan(c + d*x)^3*(a + a*sec(c + d*x))^3, x, 3, (a^3*log(cos(c + d*x)))/d - (3*a^3*sec(c + d*x))/d - (a^3*sec(c + d*x)^2)/d + (2*a^3*sec(c + d*x)^3)/(3*d) + (3*a^3*sec(c + d*x)^4)/(4*d) + (a^3*sec(c + d*x)^5)/(5*d)] @test_int [tan(c + d*x)^1*(a + a*sec(c + d*x))^3, x, 3, -((a^3*log(cos(c + d*x)))/d) + (3*a^3*sec(c + d*x))/d + (3*a^3*sec(c + d*x)^2)/(2*d) + (a^3*sec(c + d*x)^3)/(3*d)] @test_int [cot(c + d*x)^1*(a + a*sec(c + d*x))^3, x, 3, (4*a^3*log(1 - cos(c + d*x)))/d - (3*a^3*log(cos(c + d*x)))/d + (a^3*sec(c + d*x))/d] @test_int [cot(c + d*x)^3*(a + a*sec(c + d*x))^3, x, 3, -((2*a^3)/(d*(1 - cos(c + d*x)))) - (a^3*log(1 - cos(c + d*x)))/d] @test_int [cot(c + d*x)^5*(a + a*sec(c + d*x))^3, x, 3, -(a^3/(2*d*(1 - cos(c + d*x))^2)) + (2*a^3)/(d*(1 - cos(c + d*x))) + (a^3*log(1 - cos(c + d*x)))/d] @test_int [cot(c + d*x)^7*(a + a*sec(c + d*x))^3, x, 3, -(a^3/(6*d*(1 - cos(c + d*x))^3)) + (7*a^3)/(8*d*(1 - cos(c + d*x))^2) - (17*a^3)/(8*d*(1 - cos(c + d*x))) - (15*a^3*log(1 - cos(c + d*x)))/(16*d) - (a^3*log(1 + cos(c + d*x)))/(16*d)] @test_int [cot(c + d*x)^9*(a + a*sec(c + d*x))^3, x, 3, -(a^3/(16*d*(1 - cos(c + d*x))^4)) + (5*a^3)/(12*d*(1 - cos(c + d*x))^3) - (39*a^3)/(32*d*(1 - cos(c + d*x))^2) + (9*a^3)/(4*d*(1 - cos(c + d*x))) + a^3/(32*d*(1 + cos(c + d*x))) + (57*a^3*log(1 - cos(c + d*x)))/(64*d) + (7*a^3*log(1 + cos(c + d*x)))/(64*d)] @test_int [tan(c + d*x)^6*(a + a*sec(c + d*x))^3, x, 17, (-a^3)*x - (125*a^3*atanh(sin(c + d*x)))/(128*d) + (a^3*tan(c + d*x))/d + (115*a^3*sec(c + d*x)*tan(c + d*x))/(128*d) + (5*a^3*sec(c + d*x)^3*tan(c + d*x))/(64*d) - (a^3*tan(c + d*x)^3)/(3*d) - (5*a^3*sec(c + d*x)*tan(c + d*x)^3)/(8*d) - (5*a^3*sec(c + d*x)^3*tan(c + d*x)^3)/(48*d) + (a^3*tan(c + d*x)^5)/(5*d) + (a^3*sec(c + d*x)*tan(c + d*x)^5)/(2*d) + (a^3*sec(c + d*x)^3*tan(c + d*x)^5)/(8*d) + (3*a^3*tan(c + d*x)^7)/(7*d)] @test_int [tan(c + d*x)^4*(a + a*sec(c + d*x))^3, x, 14, a^3*x + (19*a^3*atanh(sin(c + d*x)))/(16*d) - (a^3*tan(c + d*x))/d - (17*a^3*sec(c + d*x)*tan(c + d*x))/(16*d) - (a^3*sec(c + d*x)^3*tan(c + d*x))/(8*d) + (a^3*tan(c + d*x)^3)/(3*d) + (3*a^3*sec(c + d*x)*tan(c + d*x)^3)/(4*d) + (a^3*sec(c + d*x)^3*tan(c + d*x)^3)/(6*d) + (3*a^3*tan(c + d*x)^5)/(5*d)] @test_int [tan(c + d*x)^2*(a + a*sec(c + d*x))^3, x, 11, (-a^3)*x - (13*a^3*atanh(sin(c + d*x)))/(8*d) + (a^3*tan(c + d*x))/d + (11*a^3*sec(c + d*x)*tan(c + d*x))/(8*d) + (a^3*sec(c + d*x)^3*tan(c + d*x))/(4*d) + (a^3*tan(c + d*x)^3)/d] @test_int [cot(c + d*x)^2*(a + a*sec(c + d*x))^3, x, 11, (-a^3)*x + (a^3*atanh(sin(c + d*x)))/d - (4*a^3*cot(c + d*x))/d - (4*a^3*csc(c + d*x))/d] @test_int [cot(c + d*x)^4*(a + a*sec(c + d*x))^3, x, 11, a^3*x + (a^3*cot(c + d*x))/d - (4*a^3*cot(c + d*x)^3)/(3*d) + (3*a^3*csc(c + d*x))/d - (4*a^3*csc(c + d*x)^3)/(3*d)] @test_int [cot(c + d*x)^6*(a + a*sec(c + d*x))^3, x, 14, (-a^3)*x - (a^3*cot(c + d*x))/d + (a^3*cot(c + d*x)^3)/(3*d) - (4*a^3*cot(c + d*x)^5)/(5*d) - (3*a^3*csc(c + d*x))/d + (7*a^3*csc(c + d*x)^3)/(3*d) - (4*a^3*csc(c + d*x)^5)/(5*d)] @test_int [cot(c + d*x)^8*(a + a*sec(c + d*x))^3, x, 15, a^3*x + (a^3*cot(c + d*x))/d - (a^3*cot(c + d*x)^3)/(3*d) + (a^3*cot(c + d*x)^5)/(5*d) - (4*a^3*cot(c + d*x)^7)/(7*d) + (3*a^3*csc(c + d*x))/d - (10*a^3*csc(c + d*x)^3)/(3*d) + (11*a^3*csc(c + d*x)^5)/(5*d) - (4*a^3*csc(c + d*x)^7)/(7*d)] @test_int [cot(c + d*x)^10*(a + a*sec(c + d*x))^3, x, 16, (-a^3)*x - (a^3*cot(c + d*x))/d + (a^3*cot(c + d*x)^3)/(3*d) - (a^3*cot(c + d*x)^5)/(5*d) + (a^3*cot(c + d*x)^7)/(7*d) - (4*a^3*cot(c + d*x)^9)/(9*d) - (3*a^3*csc(c + d*x))/d + (13*a^3*csc(c + d*x)^3)/(3*d) - (21*a^3*csc(c + d*x)^5)/(5*d) + (15*a^3*csc(c + d*x)^7)/(7*d) - (4*a^3*csc(c + d*x)^9)/(9*d)] @test_int [cot(c + d*x)^12*(a + a*sec(c + d*x))^3, x, 17, a^3*x + (a^3*cot(c + d*x))/d - (a^3*cot(c + d*x)^3)/(3*d) + (a^3*cot(c + d*x)^5)/(5*d) - (a^3*cot(c + d*x)^7)/(7*d) + (a^3*cot(c + d*x)^9)/(9*d) - (4*a^3*cot(c + d*x)^11)/(11*d) + (3*a^3*csc(c + d*x))/d - (16*a^3*csc(c + d*x)^3)/(3*d) + (34*a^3*csc(c + d*x)^5)/(5*d) - (36*a^3*csc(c + d*x)^7)/(7*d) + (19*a^3*csc(c + d*x)^9)/(9*d) - (4*a^3*csc(c + d*x)^11)/(11*d)] #= ::Subsubsection::Closed:: =# #=n<0=# @test_int [tan(c + d*x)^9/(a + a*sec(c + d*x)), x, 3, -(log(cos(c + d*x))/(a*d)) - sec(c + d*x)/(a*d) - (3*sec(c + d*x)^2)/(2*a*d) + sec(c + d*x)^3/(a*d) + (3*sec(c + d*x)^4)/(4*a*d) - (3*sec(c + d*x)^5)/(5*a*d) - sec(c + d*x)^6/(6*a*d) + sec(c + d*x)^7/(7*a*d)] @test_int [tan(c + d*x)^7/(a + a*sec(c + d*x)), x, 3, log(cos(c + d*x))/(a*d) + sec(c + d*x)/(a*d) + sec(c + d*x)^2/(a*d) - (2*sec(c + d*x)^3)/(3*a*d) - sec(c + d*x)^4/(4*a*d) + sec(c + d*x)^5/(5*a*d)] @test_int [tan(c + d*x)^5/(a + a*sec(c + d*x)), x, 3, -(log(cos(c + d*x))/(a*d)) - sec(c + d*x)/(a*d) - sec(c + d*x)^2/(2*a*d) + sec(c + d*x)^3/(3*a*d)] @test_int [tan(c + d*x)^3/(a + a*sec(c + d*x)), x, 3, log(cos(c + d*x))/(a*d) + sec(c + d*x)/(a*d)] @test_int [tan(c + d*x)^1/(a + a*sec(c + d*x)), x, 2, -(log(1 + cos(c + d*x))/(a*d))] @test_int [cot(c + d*x)^1/(a + a*sec(c + d*x)), x, 3, 1/(2*a*d*(1 + cos(c + d*x))) + log(1 - cos(c + d*x))/(4*a*d) + (3*log(1 + cos(c + d*x)))/(4*a*d)] @test_int [cot(c + d*x)^3/(a + a*sec(c + d*x)), x, 3, -(1/(8*a*d*(1 - cos(c + d*x)))) + 1/(8*a*d*(1 + cos(c + d*x))^2) - 3/(4*a*d*(1 + cos(c + d*x))) - (5*log(1 - cos(c + d*x)))/(16*a*d) - (11*log(1 + cos(c + d*x)))/(16*a*d)] @test_int [cot(c + d*x)^5/(a + a*sec(c + d*x)), x, 3, -(1/(32*a*d*(1 - cos(c + d*x))^2)) + 1/(4*a*d*(1 - cos(c + d*x))) + 1/(24*a*d*(1 + cos(c + d*x))^3) - 9/(32*a*d*(1 + cos(c + d*x))^2) + 15/(16*a*d*(1 + cos(c + d*x))) + (11*log(1 - cos(c + d*x)))/(32*a*d) + (21*log(1 + cos(c + d*x)))/(32*a*d)] @test_int [tan(c + d*x)^8/(a + a*sec(c + d*x)), x, 6, x/a - (5*atanh(sin(c + d*x)))/(16*a*d) - ((16 - 5*sec(c + d*x))*tan(c + d*x))/(16*a*d) + ((8 - 5*sec(c + d*x))*tan(c + d*x)^3)/(24*a*d) - ((6 - 5*sec(c + d*x))*tan(c + d*x)^5)/(30*a*d)] @test_int [tan(c + d*x)^6/(a + a*sec(c + d*x)), x, 5, -(x/a) + (3*atanh(sin(c + d*x)))/(8*a*d) + ((8 - 3*sec(c + d*x))*tan(c + d*x))/(8*a*d) - ((4 - 3*sec(c + d*x))*tan(c + d*x)^3)/(12*a*d)] @test_int [tan(c + d*x)^4/(a + a*sec(c + d*x)), x, 4, x/a - atanh(sin(c + d*x))/(2*a*d) - ((2 - sec(c + d*x))*tan(c + d*x))/(2*a*d)] @test_int [tan(c + d*x)^2/(a + a*sec(c + d*x)), x, 3, -(x/a) + atanh(sin(c + d*x))/(a*d)] @test_int [cot(c + d*x)^2/(a + a*sec(c + d*x)), x, 4, -(x/a) - (cot(c + d*x)*(3 - 2*sec(c + d*x)))/(3*a*d) + (cot(c + d*x)^3*(1 - sec(c + d*x)))/(3*a*d)] @test_int [cot(c + d*x)^4/(a + a*sec(c + d*x)), x, 5, x/a + (cot(c + d*x)*(15 - 8*sec(c + d*x)))/(15*a*d) - (cot(c + d*x)^3*(5 - 4*sec(c + d*x)))/(15*a*d) + (cot(c + d*x)^5*(1 - sec(c + d*x)))/(5*a*d)] @test_int [cot(c + d*x)^6/(a + a*sec(c + d*x)), x, 6, -(x/a) + (cot(c + d*x)^3*(35 - 24*sec(c + d*x)))/(105*a*d) - (cot(c + d*x)*(35 - 16*sec(c + d*x)))/(35*a*d) - (cot(c + d*x)^5*(7 - 6*sec(c + d*x)))/(35*a*d) + (cot(c + d*x)^7*(1 - sec(c + d*x)))/(7*a*d)] @test_int [tan(c + d*x)^9/(a + a*sec(c + d*x))^2, x, 3, -(log(cos(c + d*x))/(a^2*d)) - (2*sec(c + d*x))/(a^2*d) - sec(c + d*x)^2/(2*a^2*d) + (4*sec(c + d*x)^3)/(3*a^2*d) - sec(c + d*x)^4/(4*a^2*d) - (2*sec(c + d*x)^5)/(5*a^2*d) + sec(c + d*x)^6/(6*a^2*d)] @test_int [tan(c + d*x)^7/(a + a*sec(c + d*x))^2, x, 3, log(cos(c + d*x))/(a^2*d) + (2*sec(c + d*x))/(a^2*d) - (2*sec(c + d*x)^3)/(3*a^2*d) + sec(c + d*x)^4/(4*a^2*d)] @test_int [tan(c + d*x)^5/(a + a*sec(c + d*x))^2, x, 3, -(log(cos(c + d*x))/(a^2*d)) - (2*sec(c + d*x))/(a^2*d) + sec(c + d*x)^2/(2*a^2*d)] @test_int [tan(c + d*x)^3/(a + a*sec(c + d*x))^2, x, 3, -(log(cos(c + d*x))/(a^2*d)) + (2*log(1 + cos(c + d*x)))/(a^2*d)] @test_int [tan(c + d*x)^1/(a + a*sec(c + d*x))^2, x, 3, -(1/(a^2*d*(1 + cos(c + d*x)))) - log(1 + cos(c + d*x))/(a^2*d)] @test_int [cot(c + d*x)^1/(a + a*sec(c + d*x))^2, x, 3, -(1/(4*a^2*d*(1 + cos(c + d*x))^2)) + 5/(4*a^2*d*(1 + cos(c + d*x))) + log(1 - cos(c + d*x))/(8*a^2*d) + (7*log(1 + cos(c + d*x)))/(8*a^2*d)] @test_int [cot(c + d*x)^3/(a + a*sec(c + d*x))^2, x, 3, -(1/(16*a^2*d*(1 - cos(c + d*x)))) - 1/(12*a^2*d*(1 + cos(c + d*x))^3) + 1/(2*a^2*d*(1 + cos(c + d*x))^2) - 23/(16*a^2*d*(1 + cos(c + d*x))) - (3*log(1 - cos(c + d*x)))/(16*a^2*d) - (13*log(1 + cos(c + d*x)))/(16*a^2*d)] @test_int [cot(c + d*x)^5/(a + a*sec(c + d*x))^2, x, 3, -(1/(64*a^2*d*(1 - cos(c + d*x))^2)) + 9/(64*a^2*d*(1 - cos(c + d*x))) - 1/(32*a^2*d*(1 + cos(c + d*x))^4) + 11/(48*a^2*d*(1 + cos(c + d*x))^3) - 3/(4*a^2*d*(1 + cos(c + d*x))^2) + 51/(32*a^2*d*(1 + cos(c + d*x))) + (29*log(1 - cos(c + d*x)))/(128*a^2*d) + (99*log(1 + cos(c + d*x)))/(128*a^2*d)] @test_int [tan(c + d*x)^8/(a + a*sec(c + d*x))^2, x, 11, x/a^2 - (3*atanh(sin(c + d*x)))/(4*a^2*d) - tan(c + d*x)/(a^2*d) + (3*sec(c + d*x)*tan(c + d*x))/(4*a^2*d) + tan(c + d*x)^3/(3*a^2*d) - (sec(c + d*x)*tan(c + d*x)^3)/(2*a^2*d) + tan(c + d*x)^5/(5*a^2*d)] @test_int [tan(c + d*x)^6/(a + a*sec(c + d*x))^2, x, 9, -(x/a^2) + atanh(sin(c + d*x))/(a^2*d) + tan(c + d*x)/(a^2*d) - (sec(c + d*x)*tan(c + d*x))/(a^2*d) + tan(c + d*x)^3/(3*a^2*d)] @test_int [tan(c + d*x)^4/(a + a*sec(c + d*x))^2, x, 5, x/a^2 - (2*atanh(sin(c + d*x)))/(a^2*d) + tan(c + d*x)/(a^2*d)] @test_int [tan(c + d*x)^2/(a + a*sec(c + d*x))^2, x, 9, -(x/a^2) + (2*tan(c + d*x))/(a*d*(a + a*sec(c + d*x))), -(x/a^2) - (2*cot(c + d*x))/(a^2*d) + (2*csc(c + d*x))/(a^2*d)] @test_int [cot(c + d*x)^2/(a + a*sec(c + d*x))^2, x, 12, -(x/a^2) - cot(c + d*x)/(a^2*d) + cot(c + d*x)^3/(3*a^2*d) - (2*cot(c + d*x)^5)/(5*a^2*d) + (2*csc(c + d*x))/(a^2*d) - (4*csc(c + d*x)^3)/(3*a^2*d) + (2*csc(c + d*x)^5)/(5*a^2*d)] @test_int [cot(c + d*x)^4/(a + a*sec(c + d*x))^2, x, 13, x/a^2 + cot(c + d*x)/(a^2*d) - cot(c + d*x)^3/(3*a^2*d) + cot(c + d*x)^5/(5*a^2*d) - (2*cot(c + d*x)^7)/(7*a^2*d) - (2*csc(c + d*x))/(a^2*d) + (2*csc(c + d*x)^3)/(a^2*d) - (6*csc(c + d*x)^5)/(5*a^2*d) + (2*csc(c + d*x)^7)/(7*a^2*d)] @test_int [cot(c + d*x)^6/(a + a*sec(c + d*x))^2, x, 14, -(x/a^2) - cot(c + d*x)/(a^2*d) + cot(c + d*x)^3/(3*a^2*d) - cot(c + d*x)^5/(5*a^2*d) + cot(c + d*x)^7/(7*a^2*d) - (2*cot(c + d*x)^9)/(9*a^2*d) + (2*csc(c + d*x))/(a^2*d) - (8*csc(c + d*x)^3)/(3*a^2*d) + (12*csc(c + d*x)^5)/(5*a^2*d) - (8*csc(c + d*x)^7)/(7*a^2*d) + (2*csc(c + d*x)^9)/(9*a^2*d)] @test_int [tan(c + d*x)^11/(a + a*sec(c + d*x))^3, x, 3, log(cos(c + d*x))/(a^3*d) + (3*sec(c + d*x))/(a^3*d) - sec(c + d*x)^2/(2*a^3*d) - (5*sec(c + d*x)^3)/(3*a^3*d) + (5*sec(c + d*x)^4)/(4*a^3*d) + sec(c + d*x)^5/(5*a^3*d) - sec(c + d*x)^6/(2*a^3*d) + sec(c + d*x)^7/(7*a^3*d)] @test_int [tan(c + d*x)^9/(a + a*sec(c + d*x))^3, x, 3, -(log(cos(c + d*x))/(a^3*d)) - (3*sec(c + d*x))/(a^3*d) + sec(c + d*x)^2/(a^3*d) + (2*sec(c + d*x)^3)/(3*a^3*d) - (3*sec(c + d*x)^4)/(4*a^3*d) + sec(c + d*x)^5/(5*a^3*d)] @test_int [tan(c + d*x)^7/(a + a*sec(c + d*x))^3, x, 3, log(cos(c + d*x))/(a^3*d) + (3*sec(c + d*x))/(a^3*d) - (3*sec(c + d*x)^2)/(2*a^3*d) + sec(c + d*x)^3/(3*a^3*d)] @test_int [tan(c + d*x)^5/(a + a*sec(c + d*x))^3, x, 3, (3*log(cos(c + d*x)))/(a^3*d) - (4*log(1 + cos(c + d*x)))/(a^3*d) + sec(c + d*x)/(a^3*d)] @test_int [tan(c + d*x)^3/(a + a*sec(c + d*x))^3, x, 3, 2/(a^3*d*(1 + cos(c + d*x))) + log(1 + cos(c + d*x))/(a^3*d)] @test_int [tan(c + d*x)^1/(a + a*sec(c + d*x))^3, x, 3, 1/(2*a^3*d*(1 + cos(c + d*x))^2) - 2/(a^3*d*(1 + cos(c + d*x))) - log(1 + cos(c + d*x))/(a^3*d)] @test_int [cot(c + d*x)^1/(a + a*sec(c + d*x))^3, x, 3, 1/(6*a^3*d*(1 + cos(c + d*x))^3) - 7/(8*a^3*d*(1 + cos(c + d*x))^2) + 17/(8*a^3*d*(1 + cos(c + d*x))) + log(1 - cos(c + d*x))/(16*a^3*d) + (15*log(1 + cos(c + d*x)))/(16*a^3*d)] @test_int [cot(c + d*x)^3/(a + a*sec(c + d*x))^3, x, 3, -(1/(32*a^3*d*(1 - cos(c + d*x)))) + 1/(16*a^3*d*(1 + cos(c + d*x))^4) - 5/(12*a^3*d*(1 + cos(c + d*x))^3) + 39/(32*a^3*d*(1 + cos(c + d*x))^2) - 9/(4*a^3*d*(1 + cos(c + d*x))) - (7*log(1 - cos(c + d*x)))/(64*a^3*d) - (57*log(1 + cos(c + d*x)))/(64*a^3*d)] @test_int [cot(c + d*x)^5/(a + a*sec(c + d*x))^3, x, 3, -(1/(128*a^3*d*(1 - cos(c + d*x))^2)) + 5/(64*a^3*d*(1 - cos(c + d*x))) + 1/(40*a^3*d*(1 + cos(c + d*x))^5) - 13/(64*a^3*d*(1 + cos(c + d*x))^4) + 35/(48*a^3*d*(1 + cos(c + d*x))^3) - 99/(64*a^3*d*(1 + cos(c + d*x))^2) + 303/(128*a^3*d*(1 + cos(c + d*x))) + (37*log(1 - cos(c + d*x)))/(256*a^3*d) + (219*log(1 + cos(c + d*x)))/(256*a^3*d)] @test_int [tan(c + d*x)^12/(a + a*sec(c + d*x))^3, x, 18, x/a^3 - (125*atanh(sin(c + d*x)))/(128*a^3*d) - tan(c + d*x)/(a^3*d) + (115*sec(c + d*x)*tan(c + d*x))/(128*a^3*d) + (5*sec(c + d*x)^3*tan(c + d*x))/(64*a^3*d) + tan(c + d*x)^3/(3*a^3*d) - (5*sec(c + d*x)*tan(c + d*x)^3)/(8*a^3*d) - (5*sec(c + d*x)^3*tan(c + d*x)^3)/(48*a^3*d) - tan(c + d*x)^5/(5*a^3*d) + (sec(c + d*x)*tan(c + d*x)^5)/(2*a^3*d) + (sec(c + d*x)^3*tan(c + d*x)^5)/(8*a^3*d) - (3*tan(c + d*x)^7)/(7*a^3*d)] @test_int [tan(c + d*x)^10/(a + a*sec(c + d*x))^3, x, 15, -(x/a^3) + (19*atanh(sin(c + d*x)))/(16*a^3*d) + tan(c + d*x)/(a^3*d) - (17*sec(c + d*x)*tan(c + d*x))/(16*a^3*d) - (sec(c + d*x)^3*tan(c + d*x))/(8*a^3*d) - tan(c + d*x)^3/(3*a^3*d) + (3*sec(c + d*x)*tan(c + d*x)^3)/(4*a^3*d) + (sec(c + d*x)^3*tan(c + d*x)^3)/(6*a^3*d) - (3*tan(c + d*x)^5)/(5*a^3*d)] @test_int [tan(c + d*x)^8/(a + a*sec(c + d*x))^3, x, 12, x/a^3 - (13*atanh(sin(c + d*x)))/(8*a^3*d) - tan(c + d*x)/(a^3*d) + (11*sec(c + d*x)*tan(c + d*x))/(8*a^3*d) + (sec(c + d*x)^3*tan(c + d*x))/(4*a^3*d) - tan(c + d*x)^3/(a^3*d)] @test_int [tan(c + d*x)^6/(a + a*sec(c + d*x))^3, x, 6, -(x/a^3) + (7*atanh(sin(c + d*x)))/(2*a^3*d) - (5*tan(c + d*x))/(2*a^3*d) - ((1 - sec(c + d*x))*tan(c + d*x))/(2*a^3*d)] @test_int [tan(c + d*x)^4/(a + a*sec(c + d*x))^3, x, 12, x/a^3 + atanh(sin(c + d*x))/(a^3*d) - (4*tan(c + d*x))/(a^2*d*(a + a*sec(c + d*x))), x/a^3 + atanh(sin(c + d*x))/(a^3*d) + (4*cot(c + d*x))/(a^3*d) - (4*csc(c + d*x))/(a^3*d)] @test_int [tan(c + d*x)^2/(a + a*sec(c + d*x))^3, x, 12, -(x/a^3) + (2*tan(c + d*x))/(a^2*d*(a + a*sec(c + d*x))) - tan(c + d*x)^3/(3*d*(a + a*sec(c + d*x))^3), -(x/a^3) - cot(c + d*x)/(a^3*d) + (4*cot(c + d*x)^3)/(3*a^3*d) + (3*csc(c + d*x))/(a^3*d) - (4*csc(c + d*x)^3)/(3*a^3*d)] @test_int [cot(c + d*x)^2/(a + a*sec(c + d*x))^3, x, 16, -(x/a^3) - cot(c + d*x)/(a^3*d) + cot(c + d*x)^3/(3*a^3*d) - cot(c + d*x)^5/(5*a^3*d) + (4*cot(c + d*x)^7)/(7*a^3*d) + (3*csc(c + d*x))/(a^3*d) - (10*csc(c + d*x)^3)/(3*a^3*d) + (11*csc(c + d*x)^5)/(5*a^3*d) - (4*csc(c + d*x)^7)/(7*a^3*d)] @test_int [cot(c + d*x)^4/(a + a*sec(c + d*x))^3, x, 17, x/a^3 + cot(c + d*x)/(a^3*d) - cot(c + d*x)^3/(3*a^3*d) + cot(c + d*x)^5/(5*a^3*d) - cot(c + d*x)^7/(7*a^3*d) + (4*cot(c + d*x)^9)/(9*a^3*d) - (3*csc(c + d*x))/(a^3*d) + (13*csc(c + d*x)^3)/(3*a^3*d) - (21*csc(c + d*x)^5)/(5*a^3*d) + (15*csc(c + d*x)^7)/(7*a^3*d) - (4*csc(c + d*x)^9)/(9*a^3*d)] @test_int [cot(c + d*x)^6/(a + a*sec(c + d*x))^3, x, 18, -(x/a^3) - cot(c + d*x)/(a^3*d) + cot(c + d*x)^3/(3*a^3*d) - cot(c + d*x)^5/(5*a^3*d) + cot(c + d*x)^7/(7*a^3*d) - cot(c + d*x)^9/(9*a^3*d) + (4*cot(c + d*x)^11)/(11*a^3*d) + (3*csc(c + d*x))/(a^3*d) - (16*csc(c + d*x)^3)/(3*a^3*d) + (34*csc(c + d*x)^5)/(5*a^3*d) - (36*csc(c + d*x)^7)/(7*a^3*d) + (19*csc(c + d*x)^9)/(9*a^3*d) - (4*csc(c + d*x)^11)/(11*a^3*d)] #= ::Subsection::Closed:: =# #=Integrands*of*the*form*(d*tan(e+f*x))^(n/2)*(a+a*sec(e+f*x))^m=# #= ::Subsubsection::Closed:: =# #=n>0=# @test_int [(a + a*sec(c + d*x))*(e*tan(c + d*x))^(5/2), x, 17, (a*e^(5/2)*atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*d) - (a*e^(5/2)*atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*d) - (a*e^(5/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*d) + (a*e^(5/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*d) + (6*a*e^2*cos(c + d*x)*Elliptic.E(c - pi/4 + d*x, 2)*sqrt(e*tan(c + d*x)))/(5*d*sqrt(sin(2*c + 2*d*x))) - (6*a*e*cos(c + d*x)*(e*tan(c + d*x))^(3/2))/(5*d) + (2*e*(5*a + 3*a*sec(c + d*x))*(e*tan(c + d*x))^(3/2))/(15*d)] @test_int [(a + a*sec(c + d*x))*(e*tan(c + d*x))^(3/2), x, 16, (a*e^(3/2)*atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*d) - (a*e^(3/2)*atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*d) + (a*e^(3/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*d) - (a*e^(3/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*d) - (a*e^2*Elliptic.F(c - pi/4 + d*x, 2)*sec(c + d*x)*sqrt(sin(2*c + 2*d*x)))/(3*d*sqrt(e*tan(c + d*x))) + (2*e*(3*a + a*sec(c + d*x))*sqrt(e*tan(c + d*x)))/(3*d)] @test_int [(a + a*sec(c + d*x))*sqrt(e*tan(c + d*x)), x, 16, -((a*sqrt(e)*atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*d)) + (a*sqrt(e)*atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*d) + (a*sqrt(e)*log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*d) - (a*sqrt(e)*log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*d) - (2*a*cos(c + d*x)*Elliptic.E(c - pi/4 + d*x, 2)*sqrt(e*tan(c + d*x)))/(d*sqrt(sin(2*c + 2*d*x))) + (2*a*cos(c + d*x)*(e*tan(c + d*x))^(3/2))/(d*e)] @test_int [(a + a*sec(c + d*x))/sqrt(e*tan(c + d*x)), x, 15, -((a*atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*d*sqrt(e))) + (a*atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*d*sqrt(e)) - (a*log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*d*sqrt(e)) + (a*log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*d*sqrt(e)) + (a*Elliptic.F(c - pi/4 + d*x, 2)*sec(c + d*x)*sqrt(sin(2*c + 2*d*x)))/(d*sqrt(e*tan(c + d*x)))] @test_int [(a + a*sec(c + d*x))/(e*tan(c + d*x))^(3/2), x, 17, (a*atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*d*e^(3/2)) - (a*atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*d*e^(3/2)) - (a*log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*d*e^(3/2)) + (a*log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*d*e^(3/2)) - (2*(a + a*sec(c + d*x)))/(d*e*sqrt(e*tan(c + d*x))) - (2*a*cos(c + d*x)*Elliptic.E(c - pi/4 + d*x, 2)*sqrt(e*tan(c + d*x)))/(d*e^2*sqrt(sin(2*c + 2*d*x))) + (2*a*cos(c + d*x)*(e*tan(c + d*x))^(3/2))/(d*e^3)] @test_int [(a + a*sec(c + d*x))/(e*tan(c + d*x))^(5/2), x, 16, (a*atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*d*e^(5/2)) - (a*atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*d*e^(5/2)) + (a*log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*d*e^(5/2)) - (a*log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*d*e^(5/2)) - (2*(a + a*sec(c + d*x)))/(3*d*e*(e*tan(c + d*x))^(3/2)) - (a*Elliptic.F(c - pi/4 + d*x, 2)*sec(c + d*x)*sqrt(sin(2*c + 2*d*x)))/(3*d*e^2*sqrt(e*tan(c + d*x)))] @test_int [(a + a*sec(c + d*x))/(e*tan(c + d*x))^(7/2), x, 18, -((a*atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*d*e^(7/2))) + (a*atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*d*e^(7/2)) + (a*log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*d*e^(7/2)) - (a*log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*d*e^(7/2)) - (2*(a + a*sec(c + d*x)))/(5*d*e*(e*tan(c + d*x))^(5/2)) + (2*(5*a + 3*a*sec(c + d*x)))/(5*d*e^3*sqrt(e*tan(c + d*x))) + (6*a*cos(c + d*x)*Elliptic.E(c - pi/4 + d*x, 2)*sqrt(e*tan(c + d*x)))/(5*d*e^4*sqrt(sin(2*c + 2*d*x))) - (6*a*cos(c + d*x)*(e*tan(c + d*x))^(3/2))/(5*d*e^5)] @test_int [(a + a*sec(c + d*x))^2*(e*tan(c + d*x))^(5/2), x, 21, (a^2*e^(5/2)*atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*d) - (a^2*e^(5/2)*atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*d) - (a^2*e^(5/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*d) + (a^2*e^(5/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*d) + (12*a^2*e^2*cos(c + d*x)*Elliptic.E(c - pi/4 + d*x, 2)*sqrt(e*tan(c + d*x)))/(5*d*sqrt(sin(2*c + 2*d*x))) + (2*a^2*e*(e*tan(c + d*x))^(3/2))/(3*d) - (12*a^2*e*cos(c + d*x)*(e*tan(c + d*x))^(3/2))/(5*d) + (4*a^2*e*sec(c + d*x)*(e*tan(c + d*x))^(3/2))/(5*d) + (2*a^2*(e*tan(c + d*x))^(7/2))/(7*d*e)] @test_int [(a + a*sec(c + d*x))^2*(e*tan(c + d*x))^(3/2), x, 20, (a^2*e^(3/2)*atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*d) - (a^2*e^(3/2)*atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*d) + (a^2*e^(3/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*d) - (a^2*e^(3/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*d) - (2*a^2*e^2*Elliptic.F(c - pi/4 + d*x, 2)*sec(c + d*x)*sqrt(sin(2*c + 2*d*x)))/(3*d*sqrt(e*tan(c + d*x))) + (2*a^2*e*sqrt(e*tan(c + d*x)))/d + (4*a^2*e*sec(c + d*x)*sqrt(e*tan(c + d*x)))/(3*d) + (2*a^2*(e*tan(c + d*x))^(5/2))/(5*d*e)] @test_int [(a + a*sec(c + d*x))^2*sqrt(e*tan(c + d*x)), x, 19, -((a^2*sqrt(e)*atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*d)) + (a^2*sqrt(e)*atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*d) + (a^2*sqrt(e)*log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*d) - (a^2*sqrt(e)*log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*d) - (4*a^2*cos(c + d*x)*Elliptic.E(c - pi/4 + d*x, 2)*sqrt(e*tan(c + d*x)))/(d*sqrt(sin(2*c + 2*d*x))) + (2*a^2*(e*tan(c + d*x))^(3/2))/(3*d*e) + (4*a^2*cos(c + d*x)*(e*tan(c + d*x))^(3/2))/(d*e)] @test_int [(a + a*sec(c + d*x))^2/sqrt(e*tan(c + d*x)), x, 18, -((a^2*atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*d*sqrt(e))) + (a^2*atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*d*sqrt(e)) - (a^2*log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*d*sqrt(e)) + (a^2*log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*d*sqrt(e)) + (2*a^2*Elliptic.F(c - pi/4 + d*x, 2)*sec(c + d*x)*sqrt(sin(2*c + 2*d*x)))/(d*sqrt(e*tan(c + d*x))) + (2*a^2*sqrt(e*tan(c + d*x)))/(d*e)] @test_int [(a + a*sec(c + d*x))^2/(e*tan(c + d*x))^(3/2), x, 20, (a^2*atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*d*e^(3/2)) - (a^2*atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*d*e^(3/2)) - (a^2*log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*d*e^(3/2)) + (a^2*log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*d*e^(3/2)) - (4*a^2)/(d*e*sqrt(e*tan(c + d*x))) - (4*a^2*cos(c + d*x))/(d*e*sqrt(e*tan(c + d*x))) - (4*a^2*cos(c + d*x)*Elliptic.E(c - pi/4 + d*x, 2)*sqrt(e*tan(c + d*x)))/(d*e^2*sqrt(sin(2*c + 2*d*x)))] @test_int [(a + a*sec(c + d*x))^2/(e*tan(c + d*x))^(5/2), x, 20, (a^2*atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*d*e^(5/2)) - (a^2*atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*d*e^(5/2)) + (a^2*log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*d*e^(5/2)) - (a^2*log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*d*e^(5/2)) - (4*a^2)/(3*d*e*(e*tan(c + d*x))^(3/2)) - (4*a^2*sec(c + d*x))/(3*d*e*(e*tan(c + d*x))^(3/2)) - (2*a^2*Elliptic.F(c - pi/4 + d*x, 2)*sec(c + d*x)*sqrt(sin(2*c + 2*d*x)))/(3*d*e^2*sqrt(e*tan(c + d*x)))] @test_int [(a + a*sec(c + d*x))^2/(e*tan(c + d*x))^(7/2), x, 22, -((a^2*atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*d*e^(7/2))) + (a^2*atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*d*e^(7/2)) + (a^2*log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*d*e^(7/2)) - (a^2*log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*d*e^(7/2)) - (4*a^2)/(5*d*e*(e*tan(c + d*x))^(5/2)) - (4*a^2*sec(c + d*x))/(5*d*e*(e*tan(c + d*x))^(5/2)) + (2*a^2)/(d*e^3*sqrt(e*tan(c + d*x))) + (12*a^2*cos(c + d*x))/(5*d*e^3*sqrt(e*tan(c + d*x))) + (12*a^2*cos(c + d*x)*Elliptic.E(c - pi/4 + d*x, 2)*sqrt(e*tan(c + d*x)))/(5*d*e^4*sqrt(sin(2*c + 2*d*x)))] #= ::Subsubsection::Closed:: =# #=n<0=# @test_int [(e*tan(c + d*x))^(11/2)/(a + a*sec(c + d*x)), x, 18, (e^(11/2)*atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*a*d) - (e^(11/2)*atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*a*d) + (e^(11/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*a*d) - (e^(11/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*a*d) + (5*e^6*Elliptic.F(c - pi/4 + d*x, 2)*sec(c + d*x)*sqrt(sin(2*c + 2*d*x)))/(21*a*d*sqrt(e*tan(c + d*x))) + (2*e^5*(21 - 5*sec(c + d*x))*sqrt(e*tan(c + d*x)))/(21*a*d) - (2*e^3*(7 - 5*sec(c + d*x))*(e*tan(c + d*x))^(5/2))/(35*a*d)] @test_int [(e*tan(c + d*x))^(9/2)/(a + a*sec(c + d*x)), x, 18, -((e^(9/2)*atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*a*d)) + (e^(9/2)*atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*a*d) + (e^(9/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*a*d) - (e^(9/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*a*d) + (6*e^4*cos(c + d*x)*Elliptic.E(c - pi/4 + d*x, 2)*sqrt(e*tan(c + d*x)))/(5*a*d*sqrt(sin(2*c + 2*d*x))) - (6*e^3*cos(c + d*x)*(e*tan(c + d*x))^(3/2))/(5*a*d) - (2*e^3*(5 - 3*sec(c + d*x))*(e*tan(c + d*x))^(3/2))/(15*a*d)] @test_int [(e*tan(c + d*x))^(7/2)/(a + a*sec(c + d*x)), x, 17, -((e^(7/2)*atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*a*d)) + (e^(7/2)*atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*a*d) - (e^(7/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*a*d) + (e^(7/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*a*d) - (e^4*Elliptic.F(c - pi/4 + d*x, 2)*sec(c + d*x)*sqrt(sin(2*c + 2*d*x)))/(3*a*d*sqrt(e*tan(c + d*x))) - (2*e^3*(3 - sec(c + d*x))*sqrt(e*tan(c + d*x)))/(3*a*d)] @test_int [(e*tan(c + d*x))^(5/2)/(a + a*sec(c + d*x)), x, 17, (e^(5/2)*atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*a*d) - (e^(5/2)*atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*a*d) - (e^(5/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*a*d) + (e^(5/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*a*d) - (2*e^2*cos(c + d*x)*Elliptic.E(c - pi/4 + d*x, 2)*sqrt(e*tan(c + d*x)))/(a*d*sqrt(sin(2*c + 2*d*x))) + (2*e*cos(c + d*x)*(e*tan(c + d*x))^(3/2))/(a*d)] @test_int [(e*tan(c + d*x))^(3/2)/(a + a*sec(c + d*x)), x, 16, (e^(3/2)*atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*a*d) - (e^(3/2)*atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*a*d) + (e^(3/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*a*d) - (e^(3/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*a*d) + (e^2*Elliptic.F(c - pi/4 + d*x, 2)*sec(c + d*x)*sqrt(sin(2*c + 2*d*x)))/(a*d*sqrt(e*tan(c + d*x)))] @test_int [sqrt(e*tan(c + d*x))/(a + a*sec(c + d*x)), x, 18, -((sqrt(e)*atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*a*d)) + (sqrt(e)*atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*a*d) + (sqrt(e)*log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*a*d) - (sqrt(e)*log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*a*d) + (2*e*(1 - sec(c + d*x)))/(a*d*sqrt(e*tan(c + d*x))) - (2*cos(c + d*x)*Elliptic.E(c - pi/4 + d*x, 2)*sqrt(e*tan(c + d*x)))/(a*d*sqrt(sin(2*c + 2*d*x))) + (2*cos(c + d*x)*(e*tan(c + d*x))^(3/2))/(a*d*e)] @test_int [1/((a + a*sec(c + d*x))*sqrt(e*tan(c + d*x))), x, 17, -(atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e))/(sqrt(2)*a*d*sqrt(e))) + atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e))/(sqrt(2)*a*d*sqrt(e)) - log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x)))/(2*sqrt(2)*a*d*sqrt(e)) + log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x)))/(2*sqrt(2)*a*d*sqrt(e)) + (2*e*(1 - sec(c + d*x)))/(3*a*d*(e*tan(c + d*x))^(3/2)) - (Elliptic.F(c - pi/4 + d*x, 2)*sec(c + d*x)*sqrt(sin(2*c + 2*d*x)))/(3*a*d*sqrt(e*tan(c + d*x)))] @test_int [1/((a + a*sec(c + d*x))*(e*tan(c + d*x))^(3/2)), x, 19, atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e))/(sqrt(2)*a*d*e^(3/2)) - atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e))/(sqrt(2)*a*d*e^(3/2)) - log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x)))/(2*sqrt(2)*a*d*e^(3/2)) + log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x)))/(2*sqrt(2)*a*d*e^(3/2)) + (2*e*(1 - sec(c + d*x)))/(5*a*d*(e*tan(c + d*x))^(5/2)) - (2*(5 - 3*sec(c + d*x)))/(5*a*d*e*sqrt(e*tan(c + d*x))) + (6*cos(c + d*x)*Elliptic.E(c - pi/4 + d*x, 2)*sqrt(e*tan(c + d*x)))/(5*a*d*e^2*sqrt(sin(2*c + 2*d*x))) - (6*cos(c + d*x)*(e*tan(c + d*x))^(3/2))/(5*a*d*e^3)] @test_int [1/((a + a*sec(c + d*x))*(e*tan(c + d*x))^(5/2)), x, 18, atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e))/(sqrt(2)*a*d*e^(5/2)) - atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e))/(sqrt(2)*a*d*e^(5/2)) + log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x)))/(2*sqrt(2)*a*d*e^(5/2)) - log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x)))/(2*sqrt(2)*a*d*e^(5/2)) + (2*e*(1 - sec(c + d*x)))/(7*a*d*(e*tan(c + d*x))^(7/2)) - (2*(7 - 5*sec(c + d*x)))/(21*a*d*e*(e*tan(c + d*x))^(3/2)) + (5*Elliptic.F(c - pi/4 + d*x, 2)*sec(c + d*x)*sqrt(sin(2*c + 2*d*x)))/(21*a*d*e^2*sqrt(e*tan(c + d*x)))] @test_int [(e*tan(c + d*x))^(13/2)/(a + a*sec(c + d*x))^2, x, 22, (e^(13/2)*atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*a^2*d) - (e^(13/2)*atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*a^2*d) - (e^(13/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*a^2*d) + (e^(13/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*a^2*d) - (12*e^6*cos(c + d*x)*Elliptic.E(c - pi/4 + d*x, 2)*sqrt(e*tan(c + d*x)))/(5*a^2*d*sqrt(sin(2*c + 2*d*x))) + (2*e^5*(e*tan(c + d*x))^(3/2))/(3*a^2*d) + (12*e^5*cos(c + d*x)*(e*tan(c + d*x))^(3/2))/(5*a^2*d) - (4*e^5*sec(c + d*x)*(e*tan(c + d*x))^(3/2))/(5*a^2*d) + (2*e^3*(e*tan(c + d*x))^(7/2))/(7*a^2*d)] @test_int [(e*tan(c + d*x))^(11/2)/(a + a*sec(c + d*x))^2, x, 21, (e^(11/2)*atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*a^2*d) - (e^(11/2)*atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*a^2*d) + (e^(11/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*a^2*d) - (e^(11/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*a^2*d) + (2*e^6*Elliptic.F(c - pi/4 + d*x, 2)*sec(c + d*x)*sqrt(sin(2*c + 2*d*x)))/(3*a^2*d*sqrt(e*tan(c + d*x))) + (2*e^5*sqrt(e*tan(c + d*x)))/(a^2*d) - (4*e^5*sec(c + d*x)*sqrt(e*tan(c + d*x)))/(3*a^2*d) + (2*e^3*(e*tan(c + d*x))^(5/2))/(5*a^2*d)] @test_int [(e*tan(c + d*x))^(9/2)/(a + a*sec(c + d*x))^2, x, 20, -((e^(9/2)*atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*a^2*d)) + (e^(9/2)*atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*a^2*d) + (e^(9/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*a^2*d) - (e^(9/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*a^2*d) + (4*e^4*cos(c + d*x)*Elliptic.E(c - pi/4 + d*x, 2)*sqrt(e*tan(c + d*x)))/(a^2*d*sqrt(sin(2*c + 2*d*x))) + (2*e^3*(e*tan(c + d*x))^(3/2))/(3*a^2*d) - (4*e^3*cos(c + d*x)*(e*tan(c + d*x))^(3/2))/(a^2*d)] @test_int [(e*tan(c + d*x))^(7/2)/(a + a*sec(c + d*x))^2, x, 19, -((e^(7/2)*atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*a^2*d)) + (e^(7/2)*atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*a^2*d) - (e^(7/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*a^2*d) + (e^(7/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*a^2*d) - (2*e^4*Elliptic.F(c - pi/4 + d*x, 2)*sec(c + d*x)*sqrt(sin(2*c + 2*d*x)))/(a^2*d*sqrt(e*tan(c + d*x))) + (2*e^3*sqrt(e*tan(c + d*x)))/(a^2*d)] @test_int [(e*tan(c + d*x))^(5/2)/(a + a*sec(c + d*x))^2, x, 21, (e^(5/2)*atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*a^2*d) - (e^(5/2)*atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*a^2*d) - (e^(5/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*a^2*d) + (e^(5/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*a^2*d) - (4*e^3)/(a^2*d*sqrt(e*tan(c + d*x))) + (4*e^3*cos(c + d*x))/(a^2*d*sqrt(e*tan(c + d*x))) + (4*e^2*cos(c + d*x)*Elliptic.E(c - pi/4 + d*x, 2)*sqrt(e*tan(c + d*x)))/(a^2*d*sqrt(sin(2*c + 2*d*x)))] @test_int [(e*tan(c + d*x))^(3/2)/(a + a*sec(c + d*x))^2, x, 21, (e^(3/2)*atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*a^2*d) - (e^(3/2)*atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*a^2*d) + (e^(3/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*a^2*d) - (e^(3/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*a^2*d) - (4*e^3)/(3*a^2*d*(e*tan(c + d*x))^(3/2)) + (4*e^3*sec(c + d*x))/(3*a^2*d*(e*tan(c + d*x))^(3/2)) + (2*e^2*Elliptic.F(c - pi/4 + d*x, 2)*sec(c + d*x)*sqrt(sin(2*c + 2*d*x)))/(3*a^2*d*sqrt(e*tan(c + d*x)))] @test_int [sqrt(e*tan(c + d*x))/(a + a*sec(c + d*x))^2, x, 23, -((sqrt(e)*atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*a^2*d)) + (sqrt(e)*atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*a^2*d) + (sqrt(e)*log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*a^2*d) - (sqrt(e)*log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*a^2*d) - (4*e^3)/(5*a^2*d*(e*tan(c + d*x))^(5/2)) + (4*e^3*sec(c + d*x))/(5*a^2*d*(e*tan(c + d*x))^(5/2)) + (2*e)/(a^2*d*sqrt(e*tan(c + d*x))) - (12*e*cos(c + d*x))/(5*a^2*d*sqrt(e*tan(c + d*x))) - (12*cos(c + d*x)*Elliptic.E(c - pi/4 + d*x, 2)*sqrt(e*tan(c + d*x)))/(5*a^2*d*sqrt(sin(2*c + 2*d*x)))] @test_int [1/((a + a*sec(c + d*x))^2*sqrt(e*tan(c + d*x))), x, 23, -(atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e))/(sqrt(2)*a^2*d*sqrt(e))) + atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e))/(sqrt(2)*a^2*d*sqrt(e)) - log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x)))/(2*sqrt(2)*a^2*d*sqrt(e)) + log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x)))/(2*sqrt(2)*a^2*d*sqrt(e)) - (4*e^3)/(7*a^2*d*(e*tan(c + d*x))^(7/2)) + (4*e^3*sec(c + d*x))/(7*a^2*d*(e*tan(c + d*x))^(7/2)) + (2*e)/(3*a^2*d*(e*tan(c + d*x))^(3/2)) - (20*e*sec(c + d*x))/(21*a^2*d*(e*tan(c + d*x))^(3/2)) - (10*Elliptic.F(c - pi/4 + d*x, 2)*sec(c + d*x)*sqrt(sin(2*c + 2*d*x)))/(21*a^2*d*sqrt(e*tan(c + d*x)))] #= ::Subsection::Closed:: =# #=Integrands*of*the*form*tan(e+f*x)^n*(a+a*sec(e+f*x))^(m/2)=# #= ::Subsubsection::Closed:: =# #=n>0=# @test_int [sqrt(a + a*sec(c + d*x))*tan(c + d*x)^5, x, 8, (-2*sqrt(a)*atanh(sqrt(a + a*sec(c + d*x))/sqrt(a)))/d + (2*sqrt(a + a*sec(c + d*x)))/d + (2*(a + a*sec(c + d*x))^(3/2))/(3*a*d) + (2*(a + a*sec(c + d*x))^(5/2))/(5*a^2*d) - (6*(a + a*sec(c + d*x))^(7/2))/(7*a^3*d) + (2*(a + a*sec(c + d*x))^(9/2))/(9*a^4*d)] @test_int [sqrt(a + a*sec(c + d*x))*tan(c + d*x)^3, x, 6, (2*sqrt(a)*atanh(sqrt(a + a*sec(c + d*x))/sqrt(a)))/d - (2*sqrt(a + a*sec(c + d*x)))/d - (2*(a + a*sec(c + d*x))^(3/2))/(3*a*d) + (2*(a + a*sec(c + d*x))^(5/2))/(5*a^2*d)] @test_int [sqrt(a + a*sec(c + d*x))*tan(c + d*x), x, 4, (-2*sqrt(a)*atanh(sqrt(a + a*sec(c + d*x))/sqrt(a)))/d + (2*sqrt(a + a*sec(c + d*x)))/d] @test_int [cot(c + d*x)*sqrt(a + a*sec(c + d*x)), x, 6, (2*sqrt(a)*atanh(sqrt(a + a*sec(c + d*x))/sqrt(a)))/d - (sqrt(2)*sqrt(a)*atanh(sqrt(a + a*sec(c + d*x))/(sqrt(2)*sqrt(a))))/d] @test_int [cot(c + d*x)^3*sqrt(a + a*sec(c + d*x)), x, 8, -((2*sqrt(a)*atanh(sqrt(a + a*sec(c + d*x))/sqrt(a)))/d) + (7*sqrt(a)*atanh(sqrt(a + a*sec(c + d*x))/(sqrt(2)*sqrt(a))))/(4*sqrt(2)*d) + a/(4*d*sqrt(a + a*sec(c + d*x))) + a/(2*d*(1 - sec(c + d*x))*sqrt(a + a*sec(c + d*x)))] @test_int [cot(c + d*x)^5*sqrt(a + a*sec(c + d*x)), x, 10, (2*sqrt(a)*atanh(sqrt(a + a*sec(c + d*x))/sqrt(a)))/d - (107*sqrt(a)*atanh(sqrt(a + a*sec(c + d*x))/(sqrt(2)*sqrt(a))))/(64*sqrt(2)*d) + (43*a^2)/(96*d*(a + a*sec(c + d*x))^(3/2)) - a^2/(4*d*(1 - sec(c + d*x))^2*(a + a*sec(c + d*x))^(3/2)) - (15*a^2)/(16*d*(1 - sec(c + d*x))*(a + a*sec(c + d*x))^(3/2)) - (21*a)/(64*d*sqrt(a + a*sec(c + d*x)))] @test_int [sqrt(a + a*sec(c + d*x))*tan(c + d*x)^6, x, 4, -((2*sqrt(a)*atan((sqrt(a)*tan(c + d*x))/sqrt(a + a*sec(c + d*x))))/d) + (2*a*tan(c + d*x))/(d*sqrt(a + a*sec(c + d*x))) - (2*a^2*tan(c + d*x)^3)/(3*d*(a + a*sec(c + d*x))^(3/2)) + (2*a^3*tan(c + d*x)^5)/(5*d*(a + a*sec(c + d*x))^(5/2)) + (2*a^4*tan(c + d*x)^7)/(d*(a + a*sec(c + d*x))^(7/2)) + (10*a^5*tan(c + d*x)^9)/(9*d*(a + a*sec(c + d*x))^(9/2)) + (2*a^6*tan(c + d*x)^11)/(11*d*(a + a*sec(c + d*x))^(11/2))] @test_int [sqrt(a + a*sec(c + d*x))*tan(c + d*x)^4, x, 4, (2*sqrt(a)*atan((sqrt(a)*tan(c + d*x))/sqrt(a + a*sec(c + d*x))))/d - (2*a*tan(c + d*x))/(d*sqrt(a + a*sec(c + d*x))) + (2*a^2*tan(c + d*x)^3)/(3*d*(a + a*sec(c + d*x))^(3/2)) + (6*a^3*tan(c + d*x)^5)/(5*d*(a + a*sec(c + d*x))^(5/2)) + (2*a^4*tan(c + d*x)^7)/(7*d*(a + a*sec(c + d*x))^(7/2))] @test_int [sqrt(a + a*sec(c + d*x))*tan(c + d*x)^2, x, 4, -((2*sqrt(a)*atan((sqrt(a)*tan(c + d*x))/sqrt(a + a*sec(c + d*x))))/d) + (2*a*tan(c + d*x))/(d*sqrt(a + a*sec(c + d*x))) + (2*a^2*tan(c + d*x)^3)/(3*d*(a + a*sec(c + d*x))^(3/2))] @test_int [cot(c + d*x)^2*sqrt(a + a*sec(c + d*x)), x, 5, -((2*sqrt(a)*atan((sqrt(a)*tan(c + d*x))/sqrt(a + a*sec(c + d*x))))/d) + (sqrt(a)*atan((sqrt(a)*tan(c + d*x))/(sqrt(2)*sqrt(a + a*sec(c + d*x)))))/(sqrt(2)*d) - (cot(c + d*x)*sqrt(a + a*sec(c + d*x)))/d] @test_int [cot(c + d*x)^4*sqrt(a + a*sec(c + d*x)), x, 7, (2*sqrt(a)*atan((sqrt(a)*tan(c + d*x))/sqrt(a + a*sec(c + d*x))))/d - (9*sqrt(a)*atan((sqrt(a)*tan(c + d*x))/(sqrt(2)*sqrt(a + a*sec(c + d*x)))))/(8*sqrt(2)*d) + (7*cot(c + d*x)*sqrt(a + a*sec(c + d*x)))/(8*d) + (cot(c + d*x)^3*(a + a*sec(c + d*x))^(3/2))/(12*a*d) - (cos(c + d*x)*cot(c + d*x)^3*sec((1/2)*(c + d*x))^2*(a + a*sec(c + d*x))^(3/2))/(4*a*d)] @test_int [cot(c + d*x)^6*sqrt(a + a*sec(c + d*x)), x, 9, -((2*sqrt(a)*atan((sqrt(a)*tan(c + d*x))/sqrt(a + a*sec(c + d*x))))/d) + (151*sqrt(a)*atan((sqrt(a)*tan(c + d*x))/(sqrt(2)*sqrt(a + a*sec(c + d*x)))))/(128*sqrt(2)*d) - (105*cot(c + d*x)*sqrt(a + a*sec(c + d*x)))/(128*d) - (23*cot(c + d*x)^3*(a + a*sec(c + d*x))^(3/2))/(192*a*d) + (87*cot(c + d*x)^5*(a + a*sec(c + d*x))^(5/2))/(160*a^2*d) - (17*cos(c + d*x)*cot(c + d*x)^5*sec((1/2)*(c + d*x))^2*(a + a*sec(c + d*x))^(5/2))/(32*a^2*d) - (cos(c + d*x)^2*cot(c + d*x)^5*sec((1/2)*(c + d*x))^4*(a + a*sec(c + d*x))^(5/2))/(16*a^2*d)] @test_int [(a + a*sec(c + d*x))^(3/2)*tan(c + d*x)^5, x, 9, (-2*a^(3/2)*atanh(sqrt(a + a*sec(c + d*x))/sqrt(a)))/d + (2*a*sqrt(a + a*sec(c + d*x)))/d + (2*(a + a*sec(c + d*x))^(3/2))/(3*d) + (2*(a + a*sec(c + d*x))^(5/2))/(5*a*d) + (2*(a + a*sec(c + d*x))^(7/2))/(7*a^2*d) - (2*(a + a*sec(c + d*x))^(9/2))/(3*a^3*d) + (2*(a + a*sec(c + d*x))^(11/2))/(11*a^4*d)] @test_int [(a + a*sec(c + d*x))^(3/2)*tan(c + d*x)^3, x, 7, (2*a^(3/2)*atanh(sqrt(a + a*sec(c + d*x))/sqrt(a)))/d - (2*a*sqrt(a + a*sec(c + d*x)))/d - (2*(a + a*sec(c + d*x))^(3/2))/(3*d) - (2*(a + a*sec(c + d*x))^(5/2))/(5*a*d) + (2*(a + a*sec(c + d*x))^(7/2))/(7*a^2*d)] @test_int [(a + a*sec(c + d*x))^(3/2)*tan(c + d*x), x, 5, (-2*a^(3/2)*atanh(sqrt(a + a*sec(c + d*x))/sqrt(a)))/d + (2*a*sqrt(a + a*sec(c + d*x)))/d + (2*(a + a*sec(c + d*x))^(3/2))/(3*d)] @test_int [cot(c + d*x)*(a + a*sec(c + d*x))^(3/2), x, 6, (2*a^(3/2)*atanh(sqrt(a + a*sec(c + d*x))/sqrt(a)))/d - (2*sqrt(2)*a^(3/2)*atanh(sqrt(a + a*sec(c + d*x))/(sqrt(2)*sqrt(a))))/d] @test_int [cot(c + d*x)^3*(a + a*sec(c + d*x))^(3/2), x, 7, (-2*a^(3/2)*atanh(sqrt(a + a*sec(c + d*x))/sqrt(a)))/d + (5*a^(3/2)*atanh(sqrt(a + a*sec(c + d*x))/(sqrt(2)*sqrt(a))))/(2*sqrt(2)*d) + (a*sqrt(a + a*sec(c + d*x)))/(2*d*(1 - sec(c + d*x)))] @test_int [cot(c + d*x)^5*(a + a*sec(c + d*x))^(3/2), x, 9, (2*a^(3/2)*atanh(sqrt(a + a*sec(c + d*x))/sqrt(a)))/d - (71*a^(3/2)*atanh(sqrt(a + a*sec(c + d*x))/(sqrt(2)*sqrt(a))))/(32*sqrt(2)*d) + (7*a^2)/(32*d*sqrt(a + a*sec(c + d*x))) - a^2/(4*d*(1 - sec(c + d*x))^2*sqrt(a + a*sec(c + d*x))) - (13*a^2)/(16*d*(1 - sec(c + d*x))*sqrt(a + a*sec(c + d*x)))] @test_int [(a + a*sec(c + d*x))^(3/2)*tan(c + d*x)^6, x, 4, -((2*a^(3/2)*atan((sqrt(a)*tan(c + d*x))/sqrt(a + a*sec(c + d*x))))/d) + (2*a^2*tan(c + d*x))/(d*sqrt(a + a*sec(c + d*x))) - (2*a^3*tan(c + d*x)^3)/(3*d*(a + a*sec(c + d*x))^(3/2)) + (2*a^4*tan(c + d*x)^5)/(5*d*(a + a*sec(c + d*x))^(5/2)) + (30*a^5*tan(c + d*x)^7)/(7*d*(a + a*sec(c + d*x))^(7/2)) + (34*a^6*tan(c + d*x)^9)/(9*d*(a + a*sec(c + d*x))^(9/2)) + (14*a^7*tan(c + d*x)^11)/(11*d*(a + a*sec(c + d*x))^(11/2)) + (2*a^8*tan(c + d*x)^13)/(13*d*(a + a*sec(c + d*x))^(13/2))] @test_int [(a + a*sec(c + d*x))^(3/2)*tan(c + d*x)^4, x, 4, (2*a^(3/2)*atan((sqrt(a)*tan(c + d*x))/sqrt(a + a*sec(c + d*x))))/d - (2*a^2*tan(c + d*x))/(d*sqrt(a + a*sec(c + d*x))) + (2*a^3*tan(c + d*x)^3)/(3*d*(a + a*sec(c + d*x))^(3/2)) + (14*a^4*tan(c + d*x)^5)/(5*d*(a + a*sec(c + d*x))^(5/2)) + (10*a^5*tan(c + d*x)^7)/(7*d*(a + a*sec(c + d*x))^(7/2)) + (2*a^6*tan(c + d*x)^9)/(9*d*(a + a*sec(c + d*x))^(9/2))] @test_int [(a + a*sec(c + d*x))^(3/2)*tan(c + d*x)^2, x, 4, -((2*a^(3/2)*atan((sqrt(a)*tan(c + d*x))/sqrt(a + a*sec(c + d*x))))/d) + (2*a^2*tan(c + d*x))/(d*sqrt(a + a*sec(c + d*x))) + (2*a^3*tan(c + d*x)^3)/(d*(a + a*sec(c + d*x))^(3/2)) + (2*a^4*tan(c + d*x)^5)/(5*d*(a + a*sec(c + d*x))^(5/2))] @test_int [cot(c + d*x)^2*(a + a*sec(c + d*x))^(3/2), x, 3, -((2*a^(3/2)*atan((sqrt(a)*tan(c + d*x))/sqrt(a + a*sec(c + d*x))))/d) - (2*a*cot(c + d*x)*sqrt(a + a*sec(c + d*x)))/d] @test_int [cot(c + d*x)^4*(a + a*sec(c + d*x))^(3/2), x, 6, (2*a^(3/2)*atan((sqrt(a)*tan(c + d*x))/sqrt(a + a*sec(c + d*x))))/d - (a^(3/2)*atan((sqrt(a)*tan(c + d*x))/(sqrt(2)*sqrt(a + a*sec(c + d*x)))))/(2*sqrt(2)*d) + (3*a*cot(c + d*x)*sqrt(a + a*sec(c + d*x)))/(2*d) - (cot(c + d*x)^3*(a + a*sec(c + d*x))^(3/2))/(3*d)] @test_int [cot(c + d*x)^6*(a + a*sec(c + d*x))^(3/2), x, 8, -((2*a^(3/2)*atan((sqrt(a)*tan(c + d*x))/sqrt(a + a*sec(c + d*x))))/d) + (11*a^(3/2)*atan((sqrt(a)*tan(c + d*x))/(sqrt(2)*sqrt(a + a*sec(c + d*x)))))/(16*sqrt(2)*d) - (21*a*cot(c + d*x)*sqrt(a + a*sec(c + d*x)))/(16*d) + (5*cot(c + d*x)^3*(a + a*sec(c + d*x))^(3/2))/(24*d) + (3*cot(c + d*x)^5*(a + a*sec(c + d*x))^(5/2))/(20*a*d) - (cos(c + d*x)*cot(c + d*x)^5*sec((1/2)*(c + d*x))^2*(a + a*sec(c + d*x))^(5/2))/(4*a*d)] @test_int [(a + a*sec(c + d*x))^(5/2)*tan(c + d*x)^5, x, 10, (-2*a^(5/2)*atanh(sqrt(a + a*sec(c + d*x))/sqrt(a)))/d + (2*a^2*sqrt(a + a*sec(c + d*x)))/d + (2*a*(a + a*sec(c + d*x))^(3/2))/(3*d) + (2*(a + a*sec(c + d*x))^(5/2))/(5*d) + (2*(a + a*sec(c + d*x))^(7/2))/(7*a*d) + (2*(a + a*sec(c + d*x))^(9/2))/(9*a^2*d) - (6*(a + a*sec(c + d*x))^(11/2))/(11*a^3*d) + (2*(a + a*sec(c + d*x))^(13/2))/(13*a^4*d)] @test_int [(a + a*sec(c + d*x))^(5/2)*tan(c + d*x)^3, x, 8, (2*a^(5/2)*atanh(sqrt(a + a*sec(c + d*x))/sqrt(a)))/d - (2*a^2*sqrt(a + a*sec(c + d*x)))/d - (2*a*(a + a*sec(c + d*x))^(3/2))/(3*d) - (2*(a + a*sec(c + d*x))^(5/2))/(5*d) - (2*(a + a*sec(c + d*x))^(7/2))/(7*a*d) + (2*(a + a*sec(c + d*x))^(9/2))/(9*a^2*d)] @test_int [(a + a*sec(c + d*x))^(5/2)*tan(c + d*x), x, 6, (-2*a^(5/2)*atanh(sqrt(a + a*sec(c + d*x))/sqrt(a)))/d + (2*a^2*sqrt(a + a*sec(c + d*x)))/d + (2*a*(a + a*sec(c + d*x))^(3/2))/(3*d) + (2*(a + a*sec(c + d*x))^(5/2))/(5*d)] @test_int [cot(c + d*x)*(a + a*sec(c + d*x))^(5/2), x, 7, (2*a^(5/2)*atanh(sqrt(a + a*sec(c + d*x))/sqrt(a)))/d - (4*sqrt(2)*a^(5/2)*atanh(sqrt(a + a*sec(c + d*x))/(sqrt(2)*sqrt(a))))/d + (2*a^2*sqrt(a + a*sec(c + d*x)))/d] @test_int [cot(c + d*x)^3*(a + a*sec(c + d*x))^(5/2), x, 7, (-2*a^(5/2)*atanh(sqrt(a + a*sec(c + d*x))/sqrt(a)))/d + (3*a^(5/2)*atanh(sqrt(a + a*sec(c + d*x))/(sqrt(2)*sqrt(a))))/(sqrt(2)*d) + (a^2*sqrt(a + a*sec(c + d*x)))/(d*(1 - sec(c + d*x)))] @test_int [cot(c + d*x)^5*(a + a*sec(c + d*x))^(5/2), x, 8, (2*a^(5/2)*atanh(sqrt(a + a*sec(c + d*x))/sqrt(a)))/d - (43*a^(5/2)*atanh(sqrt(a + a*sec(c + d*x))/(sqrt(2)*sqrt(a))))/(16*sqrt(2)*d) - (a^2*sqrt(a + a*sec(c + d*x)))/(4*d*(1 - sec(c + d*x))^2) - (11*a^2*sqrt(a + a*sec(c + d*x)))/(16*d*(1 - sec(c + d*x)))] @test_int [(a + a*sec(c + d*x))^(5/2)*tan(c + d*x)^6, x, 4, -((2*a^(5/2)*atan((sqrt(a)*tan(c + d*x))/sqrt(a + a*sec(c + d*x))))/d) + (2*a^3*tan(c + d*x))/(d*sqrt(a + a*sec(c + d*x))) - (2*a^4*tan(c + d*x)^3)/(3*d*(a + a*sec(c + d*x))^(3/2)) + (2*a^5*tan(c + d*x)^5)/(5*d*(a + a*sec(c + d*x))^(5/2)) + (62*a^6*tan(c + d*x)^7)/(7*d*(a + a*sec(c + d*x))^(7/2)) + (98*a^7*tan(c + d*x)^9)/(9*d*(a + a*sec(c + d*x))^(9/2)) + (62*a^8*tan(c + d*x)^11)/(11*d*(a + a*sec(c + d*x))^(11/2)) + (18*a^9*tan(c + d*x)^13)/(13*d*(a + a*sec(c + d*x))^(13/2)) + (2*a^10*tan(c + d*x)^15)/(15*d*(a + a*sec(c + d*x))^(15/2))] @test_int [(a + a*sec(c + d*x))^(5/2)*tan(c + d*x)^4, x, 4, (2*a^(5/2)*atan((sqrt(a)*tan(c + d*x))/sqrt(a + a*sec(c + d*x))))/d - (2*a^3*tan(c + d*x))/(d*sqrt(a + a*sec(c + d*x))) + (2*a^4*tan(c + d*x)^3)/(3*d*(a + a*sec(c + d*x))^(3/2)) + (6*a^5*tan(c + d*x)^5)/(d*(a + a*sec(c + d*x))^(5/2)) + (34*a^6*tan(c + d*x)^7)/(7*d*(a + a*sec(c + d*x))^(7/2)) + (14*a^7*tan(c + d*x)^9)/(9*d*(a + a*sec(c + d*x))^(9/2)) + (2*a^8*tan(c + d*x)^11)/(11*d*(a + a*sec(c + d*x))^(11/2))] @test_int [(a + a*sec(c + d*x))^(5/2)*tan(c + d*x)^2, x, 4, -((2*a^(5/2)*atan((sqrt(a)*tan(c + d*x))/sqrt(a + a*sec(c + d*x))))/d) + (2*a^3*tan(c + d*x))/(d*sqrt(a + a*sec(c + d*x))) + (14*a^4*tan(c + d*x)^3)/(3*d*(a + a*sec(c + d*x))^(3/2)) + (2*a^5*tan(c + d*x)^5)/(d*(a + a*sec(c + d*x))^(5/2)) + (2*a^6*tan(c + d*x)^7)/(7*d*(a + a*sec(c + d*x))^(7/2))] @test_int [cot(c + d*x)^2*(a + a*sec(c + d*x))^(5/2), x, 3, -((2*a^(5/2)*atan((sqrt(a)*tan(c + d*x))/sqrt(a + a*sec(c + d*x))))/d) - (4*a^2*cot(c + d*x)*sqrt(a + a*sec(c + d*x)))/d] @test_int [cot(c + d*x)^4*(a + a*sec(c + d*x))^(5/2), x, 4, (2*a^(5/2)*atan((sqrt(a)*tan(c + d*x))/sqrt(a + a*sec(c + d*x))))/d + (2*a^2*cot(c + d*x)*sqrt(a + a*sec(c + d*x)))/d - (2*a*cot(c + d*x)^3*(a + a*sec(c + d*x))^(3/2))/(3*d)] @test_int [cot(c + d*x)^6*(a + a*sec(c + d*x))^(5/2), x, 7, -((2*a^(5/2)*atan((sqrt(a)*tan(c + d*x))/sqrt(a + a*sec(c + d*x))))/d) + (a^(5/2)*atan((sqrt(a)*tan(c + d*x))/(sqrt(2)*sqrt(a + a*sec(c + d*x)))))/(4*sqrt(2)*d) - (7*a^2*cot(c + d*x)*sqrt(a + a*sec(c + d*x)))/(4*d) + (a*cot(c + d*x)^3*(a + a*sec(c + d*x))^(3/2))/(2*d) - (cot(c + d*x)^5*(a + a*sec(c + d*x))^(5/2))/(5*d)] #= ::Subsubsection::Closed:: =# #=n<0=# @test_int [tan(c + d*x)^5/sqrt(a + a*sec(c + d*x)), x, 7, (-2*atanh(sqrt(a + a*sec(c + d*x))/sqrt(a)))/(sqrt(a)*d) + (2*sqrt(a + a*sec(c + d*x)))/(a*d) + (2*(a + a*sec(c + d*x))^(3/2))/(3*a^2*d) - (6*(a + a*sec(c + d*x))^(5/2))/(5*a^3*d) + (2*(a + a*sec(c + d*x))^(7/2))/(7*a^4*d)] @test_int [tan(c + d*x)^3/sqrt(a + a*sec(c + d*x)), x, 5, (2*atanh(sqrt(a + a*sec(c + d*x))/sqrt(a)))/(sqrt(a)*d) - (2*sqrt(a + a*sec(c + d*x)))/(a*d) + (2*(a + a*sec(c + d*x))^(3/2))/(3*a^2*d)] @test_int [tan(c + d*x)/sqrt(a + a*sec(c + d*x)), x, 3, (-2*atanh(sqrt(a + a*sec(c + d*x))/sqrt(a)))/(sqrt(a)*d)] @test_int [cot(c + d*x)/sqrt(a + a*sec(c + d*x)), x, 7, (2*atanh(sqrt(a + a*sec(c + d*x))/sqrt(a)))/(sqrt(a)*d) - atanh(sqrt(a + a*sec(c + d*x))/(sqrt(2)*sqrt(a)))/(sqrt(2)*sqrt(a)*d) - 1/(d*sqrt(a + a*sec(c + d*x)))] @test_int [cot(c + d*x)^3/sqrt(a + a*sec(c + d*x)), x, 9, -((2*atanh(sqrt(a + a*sec(c + d*x))/sqrt(a)))/(sqrt(a)*d)) + (9*atanh(sqrt(a + a*sec(c + d*x))/(sqrt(2)*sqrt(a))))/(8*sqrt(2)*sqrt(a)*d) - a/(12*d*(a + a*sec(c + d*x))^(3/2)) + a/(2*d*(1 - sec(c + d*x))*(a + a*sec(c + d*x))^(3/2)) + 7/(8*d*sqrt(a + a*sec(c + d*x)))] @test_int [cot(c + d*x)^5/sqrt(a + a*sec(c + d*x)), x, 11, (2*atanh(sqrt(a + a*sec(c + d*x))/sqrt(a)))/(sqrt(a)*d) - (151*atanh(sqrt(a + a*sec(c + d*x))/(sqrt(2)*sqrt(a))))/(128*sqrt(2)*sqrt(a)*d) + (87*a^2)/(160*d*(a + a*sec(c + d*x))^(5/2)) - a^2/(4*d*(1 - sec(c + d*x))^2*(a + a*sec(c + d*x))^(5/2)) - (17*a^2)/(16*d*(1 - sec(c + d*x))*(a + a*sec(c + d*x))^(5/2)) + (23*a)/(192*d*(a + a*sec(c + d*x))^(3/2)) - 105/(128*d*sqrt(a + a*sec(c + d*x)))] @test_int [tan(c + d*x)^6/sqrt(a + a*sec(c + d*x)), x, 4, -((2*atan((sqrt(a)*tan(c + d*x))/sqrt(a + a*sec(c + d*x))))/(sqrt(a)*d)) + (2*tan(c + d*x))/(d*sqrt(a + a*sec(c + d*x))) - (2*a*tan(c + d*x)^3)/(3*d*(a + a*sec(c + d*x))^(3/2)) + (2*a^2*tan(c + d*x)^5)/(5*d*(a + a*sec(c + d*x))^(5/2)) + (6*a^3*tan(c + d*x)^7)/(7*d*(a + a*sec(c + d*x))^(7/2)) + (2*a^4*tan(c + d*x)^9)/(9*d*(a + a*sec(c + d*x))^(9/2))] @test_int [tan(c + d*x)^4/sqrt(a + a*sec(c + d*x)), x, 5, (2*atan((sqrt(a)*tan(c + d*x))/sqrt(a + a*sec(c + d*x))))/(sqrt(a)*d) - (2*tan(c + d*x))/(d*sqrt(a + a*sec(c + d*x))) + (2*a*tan(c + d*x)^3)/(3*d*(a + a*sec(c + d*x))^(3/2)) + (2*a^2*tan(c + d*x)^5)/(5*d*(a + a*sec(c + d*x))^(5/2))] @test_int [tan(c + d*x)^2/sqrt(a + a*sec(c + d*x)), x, 3, (-2*atan((sqrt(a)*tan(c + d*x))/sqrt(a + a*sec(c + d*x))))/(sqrt(a)*d) + (2*tan(c + d*x))/(d*sqrt(a + a*sec(c + d*x)))] @test_int [cot(c + d*x)^2/sqrt(a + a*sec(c + d*x)), x, 6, -((2*atan((sqrt(a)*tan(c + d*x))/sqrt(a + a*sec(c + d*x))))/(sqrt(a)*d)) + (7*atan((sqrt(a)*tan(c + d*x))/(sqrt(2)*sqrt(a + a*sec(c + d*x)))))/(4*sqrt(2)*sqrt(a)*d) - (cot(c + d*x)*sqrt(a + a*sec(c + d*x)))/(4*a*d) - (cos(c + d*x)*cot(c + d*x)*sec((1/2)*(c + d*x))^2*sqrt(a + a*sec(c + d*x)))/(4*a*d)] @test_int [cot(c + d*x)^4/sqrt(a + a*sec(c + d*x)), x, 8, (2*atan((sqrt(a)*tan(c + d*x))/sqrt(a + a*sec(c + d*x))))/(sqrt(a)*d) - (107*atan((sqrt(a)*tan(c + d*x))/(sqrt(2)*sqrt(a + a*sec(c + d*x)))))/(64*sqrt(2)*sqrt(a)*d) + (21*cot(c + d*x)*sqrt(a + a*sec(c + d*x)))/(64*a*d) + (43*cot(c + d*x)^3*(a + a*sec(c + d*x))^(3/2))/(96*a^2*d) - (15*cos(c + d*x)*cot(c + d*x)^3*sec((1/2)*(c + d*x))^2*(a + a*sec(c + d*x))^(3/2))/(32*a^2*d) - (cos(c + d*x)^2*cot(c + d*x)^3*sec((1/2)*(c + d*x))^4*(a + a*sec(c + d*x))^(3/2))/(16*a^2*d)] @test_int [cot(c + d*x)^6/sqrt(a + a*sec(c + d*x)), x, 10, -((2*atan((sqrt(a)*tan(c + d*x))/sqrt(a + a*sec(c + d*x))))/(sqrt(a)*d)) + (835*atan((sqrt(a)*tan(c + d*x))/(sqrt(2)*sqrt(a + a*sec(c + d*x)))))/(512*sqrt(2)*sqrt(a)*d) - (189*cot(c + d*x)*sqrt(a + a*sec(c + d*x)))/(512*a*d) - (323*cot(c + d*x)^3*(a + a*sec(c + d*x))^(3/2))/(768*a^2*d) + (579*cot(c + d*x)^5*(a + a*sec(c + d*x))^(5/2))/(640*a^3*d) - (101*cos(c + d*x)*cot(c + d*x)^5*sec((1/2)*(c + d*x))^2*(a + a*sec(c + d*x))^(5/2))/(128*a^3*d) - (23*cos(c + d*x)^2*cot(c + d*x)^5*sec((1/2)*(c + d*x))^4*(a + a*sec(c + d*x))^(5/2))/(192*a^3*d) - (cos(c + d*x)^3*cot(c + d*x)^5*sec((1/2)*(c + d*x))^6*(a + a*sec(c + d*x))^(5/2))/(48*a^3*d)] @test_int [tan(c + d*x)^5/(a + a*sec(c + d*x))^(3/2), x, 6, (-2*atanh(sqrt(a + a*sec(c + d*x))/sqrt(a)))/(a^(3/2)*d) + (2*sqrt(a + a*sec(c + d*x)))/(a^2*d) - (2*(a + a*sec(c + d*x))^(3/2))/(a^3*d) + (2*(a + a*sec(c + d*x))^(5/2))/(5*a^4*d)] @test_int [tan(c + d*x)^3/(a + a*sec(c + d*x))^(3/2), x, 4, (2*atanh(sqrt(a + a*sec(c + d*x))/sqrt(a)))/(a^(3/2)*d) + (2*sqrt(a + a*sec(c + d*x)))/(a^2*d)] @test_int [tan(c + d*x)/(a + a*sec(c + d*x))^(3/2), x, 4, (-2*atanh(sqrt(a + a*sec(c + d*x))/sqrt(a)))/(a^(3/2)*d) + 2/(a*d*sqrt(a + a*sec(c + d*x)))] @test_int [cot(c + d*x)/(a + a*sec(c + d*x))^(3/2), x, 8, (2*atanh(sqrt(a + a*sec(c + d*x))/sqrt(a)))/(a^(3/2)*d) - atanh(sqrt(a + a*sec(c + d*x))/(sqrt(2)*sqrt(a)))/(2*sqrt(2)*a^(3/2)*d) - 1/(3*d*(a + a*sec(c + d*x))^(3/2)) - 3/(2*a*d*sqrt(a + a*sec(c + d*x)))] @test_int [cot(c + d*x)^3/(a + a*sec(c + d*x))^(3/2), x, 10, -((2*atanh(sqrt(a + a*sec(c + d*x))/sqrt(a)))/(a^(3/2)*d)) + (11*atanh(sqrt(a + a*sec(c + d*x))/(sqrt(2)*sqrt(a))))/(16*sqrt(2)*a^(3/2)*d) - (3*a)/(20*d*(a + a*sec(c + d*x))^(5/2)) + a/(2*d*(1 - sec(c + d*x))*(a + a*sec(c + d*x))^(5/2)) + 5/(24*d*(a + a*sec(c + d*x))^(3/2)) + 21/(16*a*d*sqrt(a + a*sec(c + d*x)))] @test_int [cot(c + d*x)^5/(a + a*sec(c + d*x))^(3/2), x, 12, (2*atanh(sqrt(a + a*sec(c + d*x))/sqrt(a)))/(a^(3/2)*d) - (203*atanh(sqrt(a + a*sec(c + d*x))/(sqrt(2)*sqrt(a))))/(256*sqrt(2)*a^(3/2)*d) + (139*a^2)/(224*d*(a + a*sec(c + d*x))^(7/2)) - a^2/(4*d*(1 - sec(c + d*x))^2*(a + a*sec(c + d*x))^(7/2)) - (19*a^2)/(16*d*(1 - sec(c + d*x))*(a + a*sec(c + d*x))^(7/2)) + (15*a)/(64*d*(a + a*sec(c + d*x))^(5/2)) - 53/(384*d*(a + a*sec(c + d*x))^(3/2)) - 309/(256*a*d*sqrt(a + a*sec(c + d*x)))] @test_int [tan(c + d*x)^6/(a + a*sec(c + d*x))^(3/2), x, 5, -((2*atan((sqrt(a)*tan(c + d*x))/sqrt(a + a*sec(c + d*x))))/(a^(3/2)*d)) + (2*tan(c + d*x))/(a*d*sqrt(a + a*sec(c + d*x))) - (2*tan(c + d*x)^3)/(3*d*(a + a*sec(c + d*x))^(3/2)) + (2*a*tan(c + d*x)^5)/(5*d*(a + a*sec(c + d*x))^(5/2)) + (2*a^2*tan(c + d*x)^7)/(7*d*(a + a*sec(c + d*x))^(7/2))] @test_int [tan(c + d*x)^4/(a + a*sec(c + d*x))^(3/2), x, 4, (2*atan((sqrt(a)*tan(c + d*x))/sqrt(a + a*sec(c + d*x))))/(a^(3/2)*d) - (2*tan(c + d*x))/(a*d*sqrt(a + a*sec(c + d*x))) + (2*tan(c + d*x)^3)/(3*d*(a + a*sec(c + d*x))^(3/2))] @test_int [tan(c + d*x)^2/(a + a*sec(c + d*x))^(3/2), x, 4, (-2*atan((sqrt(a)*tan(c + d*x))/sqrt(a + a*sec(c + d*x))))/(a^(3/2)*d) + (2*sqrt(2)*atan((sqrt(a)*tan(c + d*x))/(sqrt(2)*sqrt(a + a*sec(c + d*x)))))/(a^(3/2)*d)] @test_int [cot(c + d*x)^2/(a + a*sec(c + d*x))^(3/2), x, 7, -((2*atan((sqrt(a)*tan(c + d*x))/sqrt(a + a*sec(c + d*x))))/(a^(3/2)*d)) + (71*atan((sqrt(a)*tan(c + d*x))/(sqrt(2)*sqrt(a + a*sec(c + d*x)))))/(32*sqrt(2)*a^(3/2)*d) + (7*cot(c + d*x)*sqrt(a + a*sec(c + d*x)))/(32*a^2*d) - (13*cos(c + d*x)*cot(c + d*x)*sec((1/2)*(c + d*x))^2*sqrt(a + a*sec(c + d*x)))/(32*a^2*d) - (cos(c + d*x)^2*cot(c + d*x)*sec((1/2)*(c + d*x))^4*sqrt(a + a*sec(c + d*x)))/(16*a^2*d)] @test_int [cot(c + d*x)^4/(a + a*sec(c + d*x))^(3/2), x, 9, (2*atan((sqrt(a)*tan(c + d*x))/sqrt(a + a*sec(c + d*x))))/(a^(3/2)*d) - (533*atan((sqrt(a)*tan(c + d*x))/(sqrt(2)*sqrt(a + a*sec(c + d*x)))))/(256*sqrt(2)*a^(3/2)*d) - (21*cot(c + d*x)*sqrt(a + a*sec(c + d*x)))/(256*a^2*d) + (277*cot(c + d*x)^3*(a + a*sec(c + d*x))^(3/2))/(384*a^3*d) - (81*cos(c + d*x)*cot(c + d*x)^3*sec((1/2)*(c + d*x))^2*(a + a*sec(c + d*x))^(3/2))/(128*a^3*d) - (7*cos(c + d*x)^2*cot(c + d*x)^3*sec((1/2)*(c + d*x))^4*(a + a*sec(c + d*x))^(3/2))/(64*a^3*d) - (cos(c + d*x)^3*cot(c + d*x)^3*sec((1/2)*(c + d*x))^6*(a + a*sec(c + d*x))^(3/2))/(48*a^3*d)] @test_int [cot(c + d*x)^6/(a + a*sec(c + d*x))^(3/2), x, 11, -((2*atan((sqrt(a)*tan(c + d*x))/sqrt(a + a*sec(c + d*x))))/(a^(3/2)*d)) + (16363*atan((sqrt(a)*tan(c + d*x))/(sqrt(2)*sqrt(a + a*sec(c + d*x)))))/(8192*sqrt(2)*a^(3/2)*d) - (21*cot(c + d*x)*sqrt(a + a*sec(c + d*x)))/(8192*a^2*d) - (8171*cot(c + d*x)^3*(a + a*sec(c + d*x))^(3/2))/(12288*a^3*d) + (12267*cot(c + d*x)^5*(a + a*sec(c + d*x))^(5/2))/(10240*a^4*d) - (2045*cos(c + d*x)*cot(c + d*x)^5*sec((1/2)*(c + d*x))^2*(a + a*sec(c + d*x))^(5/2))/(2048*a^4*d) - (511*cos(c + d*x)^2*cot(c + d*x)^5*sec((1/2)*(c + d*x))^4*(a + a*sec(c + d*x))^(5/2))/(3072*a^4*d) - (29*cos(c + d*x)^3*cot(c + d*x)^5*sec((1/2)*(c + d*x))^6*(a + a*sec(c + d*x))^(5/2))/(768*a^4*d) - (cos(c + d*x)^4*cot(c + d*x)^5*sec((1/2)*(c + d*x))^8*(a + a*sec(c + d*x))^(5/2))/(128*a^4*d)] @test_int [tan(c + d*x)^5/(a + a*sec(c + d*x))^(5/2), x, 5, (-2*atanh(sqrt(a + a*sec(c + d*x))/sqrt(a)))/(a^(5/2)*d) - (6*sqrt(a + a*sec(c + d*x)))/(a^3*d) + (2*(a + a*sec(c + d*x))^(3/2))/(3*a^4*d)] @test_int [tan(c + d*x)^3/(a + a*sec(c + d*x))^(5/2), x, 4, (2*atanh(sqrt(a + a*sec(c + d*x))/sqrt(a)))/(a^(5/2)*d) - 4/(a^2*d*sqrt(a + a*sec(c + d*x)))] @test_int [tan(c + d*x)/(a + a*sec(c + d*x))^(5/2), x, 5, (-2*atanh(sqrt(a + a*sec(c + d*x))/sqrt(a)))/(a^(5/2)*d) + 2/(3*a*d*(a + a*sec(c + d*x))^(3/2)) + 2/(a^2*d*sqrt(a + a*sec(c + d*x)))] @test_int [cot(c + d*x)/(a + a*sec(c + d*x))^(5/2), x, 9, (2*atanh(sqrt(a + a*sec(c + d*x))/sqrt(a)))/(a^(5/2)*d) - atanh(sqrt(a + a*sec(c + d*x))/(sqrt(2)*sqrt(a)))/(4*sqrt(2)*a^(5/2)*d) - 1/(5*d*(a + a*sec(c + d*x))^(5/2)) - 1/(2*a*d*(a + a*sec(c + d*x))^(3/2)) - 7/(4*a^2*d*sqrt(a + a*sec(c + d*x)))] @test_int [cot(c + d*x)^3/(a + a*sec(c + d*x))^(5/2), x, 11, -((2*atanh(sqrt(a + a*sec(c + d*x))/sqrt(a)))/(a^(5/2)*d)) + (13*atanh(sqrt(a + a*sec(c + d*x))/(sqrt(2)*sqrt(a))))/(32*sqrt(2)*a^(5/2)*d) - (5*a)/(28*d*(a + a*sec(c + d*x))^(7/2)) + a/(2*d*(1 - sec(c + d*x))*(a + a*sec(c + d*x))^(7/2)) + 3/(40*d*(a + a*sec(c + d*x))^(5/2)) + 19/(48*a*d*(a + a*sec(c + d*x))^(3/2)) + 51/(32*a^2*d*sqrt(a + a*sec(c + d*x)))] @test_int [cot(c + d*x)^5/(a + a*sec(c + d*x))^(5/2), x, 13, (2*atanh(sqrt(a + a*sec(c + d*x))/sqrt(a)))/(a^(5/2)*d) - (263*atanh(sqrt(a + a*sec(c + d*x))/(sqrt(2)*sqrt(a))))/(512*sqrt(2)*a^(5/2)*d) + (199*a^2)/(288*d*(a + a*sec(c + d*x))^(9/2)) - a^2/(4*d*(1 - sec(c + d*x))^2*(a + a*sec(c + d*x))^(9/2)) - (21*a^2)/(16*d*(1 - sec(c + d*x))*(a + a*sec(c + d*x))^(9/2)) + (135*a)/(448*d*(a + a*sec(c + d*x))^(7/2)) + 7/(640*d*(a + a*sec(c + d*x))^(5/2)) - 83/(256*a*d*(a + a*sec(c + d*x))^(3/2)) - 761/(512*a^2*d*sqrt(a + a*sec(c + d*x)))] @test_int [tan(c + d*x)^6/(a + a*sec(c + d*x))^(5/2), x, 4, -((2*atan((sqrt(a)*tan(c + d*x))/sqrt(a + a*sec(c + d*x))))/(a^(5/2)*d)) + (2*tan(c + d*x))/(a^2*d*sqrt(a + a*sec(c + d*x))) - (2*tan(c + d*x)^3)/(3*a*d*(a + a*sec(c + d*x))^(3/2)) + (2*tan(c + d*x)^5)/(5*d*(a + a*sec(c + d*x))^(5/2))] @test_int [tan(c + d*x)^4/(a + a*sec(c + d*x))^(5/2), x, 5, (2*atan((sqrt(a)*tan(c + d*x))/sqrt(a + a*sec(c + d*x))))/(a^(5/2)*d) - (4*sqrt(2)*atan((sqrt(a)*tan(c + d*x))/(sqrt(2)*sqrt(a + a*sec(c + d*x)))))/(a^(5/2)*d) + (2*tan(c + d*x))/(a^2*d*sqrt(a + a*sec(c + d*x)))] @test_int [tan(c + d*x)^2/(a + a*sec(c + d*x))^(5/2), x, 5, -((2*atan((sqrt(a)*tan(c + d*x))/sqrt(a + a*sec(c + d*x))))/(a^(5/2)*d)) + (3*atan((sqrt(a)*tan(c + d*x))/(sqrt(2)*sqrt(a + a*sec(c + d*x)))))/(sqrt(2)*a^(5/2)*d) + (sec((1/2)*(c + d*x))^2*sin(c + d*x))/(2*a^2*d*sqrt(a + a*sec(c + d*x)))] @test_int [cot(c + d*x)^2/(a + a*sec(c + d*x))^(5/2), x, 8, -((2*atan((sqrt(a)*tan(c + d*x))/sqrt(a + a*sec(c + d*x))))/(a^(5/2)*d)) + (319*atan((sqrt(a)*tan(c + d*x))/(sqrt(2)*sqrt(a + a*sec(c + d*x)))))/(128*sqrt(2)*a^(5/2)*d) + (63*cot(c + d*x)*sqrt(a + a*sec(c + d*x)))/(128*a^3*d) - (191*cos(c + d*x)*cot(c + d*x)*sec((1/2)*(c + d*x))^2*sqrt(a + a*sec(c + d*x)))/(384*a^3*d) - (19*cos(c + d*x)^2*cot(c + d*x)*sec((1/2)*(c + d*x))^4*sqrt(a + a*sec(c + d*x)))/(192*a^3*d) - (cos(c + d*x)^3*cot(c + d*x)*sec((1/2)*(c + d*x))^6*sqrt(a + a*sec(c + d*x)))/(48*a^3*d)] @test_int [cot(c + d*x)^4/(a + a*sec(c + d*x))^(5/2), x, 10, (2*atan((sqrt(a)*tan(c + d*x))/sqrt(a + a*sec(c + d*x))))/(a^(5/2)*d) - (9683*atan((sqrt(a)*tan(c + d*x))/(sqrt(2)*sqrt(a + a*sec(c + d*x)))))/(4096*sqrt(2)*a^(5/2)*d) - (1491*cot(c + d*x)*sqrt(a + a*sec(c + d*x)))/(4096*a^3*d) + (5587*cot(c + d*x)^3*(a + a*sec(c + d*x))^(3/2))/(6144*a^4*d) - (1527*cos(c + d*x)*cot(c + d*x)^3*sec((1/2)*(c + d*x))^2*(a + a*sec(c + d*x))^(3/2))/(2048*a^4*d) - (145*cos(c + d*x)^2*cot(c + d*x)^3*sec((1/2)*(c + d*x))^4*(a + a*sec(c + d*x))^(3/2))/(1024*a^4*d) - (9*cos(c + d*x)^3*cot(c + d*x)^3*sec((1/2)*(c + d*x))^6*(a + a*sec(c + d*x))^(3/2))/(256*a^4*d) - (cos(c + d*x)^4*cot(c + d*x)^3*sec((1/2)*(c + d*x))^8*(a + a*sec(c + d*x))^(3/2))/(128*a^4*d)] @test_int [cot(c + d*x)^6/(a + a*sec(c + d*x))^(5/2), x, 12, -((2*atan((sqrt(a)*tan(c + d*x))/sqrt(a + a*sec(c + d*x))))/(a^(5/2)*d)) + (74461*atan((sqrt(a)*tan(c + d*x))/(sqrt(2)*sqrt(a + a*sec(c + d*x)))))/(32768*sqrt(2)*a^(5/2)*d) + (8925*cot(c + d*x)*sqrt(a + a*sec(c + d*x)))/(32768*a^3*d) - (41693*cot(c + d*x)^3*(a + a*sec(c + d*x))^(3/2))/(49152*a^4*d) + (58077*cot(c + d*x)^5*(a + a*sec(c + d*x))^(5/2))/(40960*a^5*d) - (9467*cos(c + d*x)*cot(c + d*x)^5*sec((1/2)*(c + d*x))^2*(a + a*sec(c + d*x))^(5/2))/(8192*a^5*d) - (2473*cos(c + d*x)^2*cot(c + d*x)^5*sec((1/2)*(c + d*x))^4*(a + a*sec(c + d*x))^(5/2))/(12288*a^5*d) - (155*cos(c + d*x)^3*cot(c + d*x)^5*sec((1/2)*(c + d*x))^6*(a + a*sec(c + d*x))^(5/2))/(3072*a^5*d) - (7*cos(c + d*x)^4*cot(c + d*x)^5*sec((1/2)*(c + d*x))^8*(a + a*sec(c + d*x))^(5/2))/(512*a^5*d) - (cos(c + d*x)^5*cot(c + d*x)^5*sec((1/2)*(c + d*x))^10*(a + a*sec(c + d*x))^(5/2))/(320*a^5*d)] @test_int [tan(e + f*x)^2/(a + a*sec(e + f*x))^(9/2), x, -7, -((2*atan((sqrt(a)*tan(e + f*x))/sqrt(a + a*sec(e + f*x))))/(a^(9/2)*f)) + (91*atan((sqrt(a)*tan(e + f*x))/(sqrt(2)*sqrt(a + a*sec(e + f*x)))))/(32*sqrt(2)*a^(9/2)*f) + tan(e + f*x)/(3*a*f*(a + a*sec(e + f*x))^(7/2)) + (11*tan(e + f*x))/(24*a^2*f*(a + a*sec(e + f*x))^(5/2)) + (27*tan(e + f*x))/(32*a^3*f*(a + a*sec(e + f*x))^(3/2))] #= ::Subsection::Closed:: =# #=Integrands*of*the*form*(d*tan(e+f*x))^n*(a+a*sec(e+f*x))^m*with*m*and/or*n*symbolic=# @test_int [(e*tan(c + d*x))^m*(a + a*sec(c + d*x))^n, x, 1, (2^(1 + m + n)*AppellF1((1 + m)/2, m + n, 1, (3 + m)/2, -((a - a*sec(c + d*x))/(a + a*sec(c + d*x))), (a - a*sec(c + d*x))/(a + a*sec(c + d*x)))*(1/(1 + sec(c + d*x)))^(1 + m + n)*(a + a*sec(c + d*x))^n*(e*tan(c + d*x))^(1 + m))/(d*e*(1 + m))] @test_int [(e*tan(c + d*x))^m*(a + a*sec(c + d*x))^3, x, 8, (3*a^3*(e*tan(c + d*x))^(1 + m))/(d*e*(1 + m)) + (a^3*HypergeometricFunctions._₂F₁(1, (1 + m)/2, (3 + m)/2, -tan(c + d*x)^2)*(e*tan(c + d*x))^(1 + m))/(d*e*(1 + m)) + (3*a^3*(cos(c + d*x)^2)^((2 + m)/2)*HypergeometricFunctions._₂F₁((1 + m)/2, (2 + m)/2, (3 + m)/2, sin(c + d*x)^2)*sec(c + d*x)*(e*tan(c + d*x))^(1 + m))/(d*e*(1 + m)) + (a^3*(cos(c + d*x)^2)^((4 + m)/2)*HypergeometricFunctions._₂F₁((1 + m)/2, (4 + m)/2, (3 + m)/2, sin(c + d*x)^2)*sec(c + d*x)^3*(e*tan(c + d*x))^(1 + m))/(d*e*(1 + m))] @test_int [(e*tan(c + d*x))^m*(a + a*sec(c + d*x))^2, x, 7, (a^2*(e*tan(c + d*x))^(1 + m))/(d*e*(1 + m)) + (a^2*HypergeometricFunctions._₂F₁(1, (1 + m)/2, (3 + m)/2, -tan(c + d*x)^2)*(e*tan(c + d*x))^(1 + m))/(d*e*(1 + m)) + (2*a^2*(cos(c + d*x)^2)^((2 + m)/2)*HypergeometricFunctions._₂F₁((1 + m)/2, (2 + m)/2, (3 + m)/2, sin(c + d*x)^2)*sec(c + d*x)*(e*tan(c + d*x))^(1 + m))/(d*e*(1 + m))] @test_int [(e*tan(c + d*x))^m*(a + a*sec(c + d*x))^1, x, 4, (a*HypergeometricFunctions._₂F₁(1, (1 + m)/2, (3 + m)/2, -tan(c + d*x)^2)*(e*tan(c + d*x))^(1 + m))/(d*e*(1 + m)) + (a*(cos(c + d*x)^2)^((2 + m)/2)*HypergeometricFunctions._₂F₁((1 + m)/2, (2 + m)/2, (3 + m)/2, sin(c + d*x)^2)*sec(c + d*x)*(e*tan(c + d*x))^(1 + m))/(d*e*(1 + m))] @test_int [(e*tan(c + d*x))^m/(a + a*sec(c + d*x))^1, x, 5, (e*HypergeometricFunctions._₂F₁(1, (1/2)*(-1 + m), (1 + m)/2, -tan(c + d*x)^2)*(e*tan(c + d*x))^(-1 + m))/(a*d*(1 - m)) - (e*(cos(c + d*x)^2)^(m/2)*HypergeometricFunctions._₂F₁((1/2)*(-1 + m), m/2, (1 + m)/2, sin(c + d*x)^2)*sec(c + d*x)*(e*tan(c + d*x))^(-1 + m))/(a*d*(1 - m))] @test_int [(e*tan(c + d*x))^m/(a + a*sec(c + d*x))^2, x, 8, -((e^3*(e*tan(c + d*x))^(-3 + m))/(a^2*d*(3 - m))) - (e^3*HypergeometricFunctions._₂F₁(1, (1/2)*(-3 + m), (1/2)*(-1 + m), -tan(c + d*x)^2)*(e*tan(c + d*x))^(-3 + m))/(a^2*d*(3 - m)) + (2*e^3*(cos(c + d*x)^2)^((1/2)*(-2 + m))*HypergeometricFunctions._₂F₁((1/2)*(-3 + m), (1/2)*(-2 + m), (1/2)*(-1 + m), sin(c + d*x)^2)*sec(c + d*x)*(e*tan(c + d*x))^(-3 + m))/(a^2*d*(3 - m))] @test_int [(e*tan(c + d*x))^m/(a + a*sec(c + d*x))^3, x, 9, (3*e^5*(e*tan(c + d*x))^(-5 + m))/(a^3*d*(5 - m)) + (e^5*HypergeometricFunctions._₂F₁(1, (1/2)*(-5 + m), (1/2)*(-3 + m), -tan(c + d*x)^2)*(e*tan(c + d*x))^(-5 + m))/(a^3*d*(5 - m)) - (3*e^5*(cos(c + d*x)^2)^((1/2)*(-4 + m))*HypergeometricFunctions._₂F₁((1/2)*(-5 + m), (1/2)*(-4 + m), (1/2)*(-3 + m), sin(c + d*x)^2)*sec(c + d*x)*(e*tan(c + d*x))^(-5 + m))/(a^3*d*(5 - m)) - (e^5*(cos(c + d*x)^2)^((1/2)*(-2 + m))*HypergeometricFunctions._₂F₁((1/2)*(-5 + m), (1/2)*(-2 + m), (1/2)*(-3 + m), sin(c + d*x)^2)*sec(c + d*x)^3*(e*tan(c + d*x))^(-5 + m))/(a^3*d*(5 - m))] @test_int [(e*tan(c + d*x))^m*(a + a*sec(c + d*x))^(3/2), x, 1, (2^(5/2 + m)*AppellF1((1 + m)/2, 3/2 + m, 1, (3 + m)/2, -((a - a*sec(c + d*x))/(a + a*sec(c + d*x))), (a - a*sec(c + d*x))/(a + a*sec(c + d*x)))*(1/(1 + sec(c + d*x)))^(5/2 + m)*(a + a*sec(c + d*x))^(3/2)*(e*tan(c + d*x))^(1 + m))/(d*e*(1 + m))] @test_int [(e*tan(c + d*x))^m*(a + a*sec(c + d*x))^(1/2), x, 1, (2^(3/2 + m)*AppellF1((1 + m)/2, 1/2 + m, 1, (3 + m)/2, -((a - a*sec(c + d*x))/(a + a*sec(c + d*x))), (a - a*sec(c + d*x))/(a + a*sec(c + d*x)))*(1/(1 + sec(c + d*x)))^(3/2 + m)*sqrt(a + a*sec(c + d*x))*(e*tan(c + d*x))^(1 + m))/(d*e*(1 + m))] @test_int [(e*tan(c + d*x))^m/(a + a*sec(c + d*x))^(1/2), x, 1, (2^(1/2 + m)*AppellF1((1 + m)/2, -(1/2) + m, 1, (3 + m)/2, -((a - a*sec(c + d*x))/(a + a*sec(c + d*x))), (a - a*sec(c + d*x))/(a + a*sec(c + d*x)))*(1/(1 + sec(c + d*x)))^(1/2 + m)*(e*tan(c + d*x))^(1 + m))/(d*e*(1 + m)*sqrt(a + a*sec(c + d*x)))] @test_int [(e*tan(c + d*x))^m/(a + a*sec(c + d*x))^(3/2), x, 1, (2^(-(1/2) + m)*AppellF1((1 + m)/2, -(3/2) + m, 1, (3 + m)/2, -((a - a*sec(c + d*x))/(a + a*sec(c + d*x))), (a - a*sec(c + d*x))/(a + a*sec(c + d*x)))*(1/(1 + sec(c + d*x)))^(-(1/2) + m)*(e*tan(c + d*x))^(1 + m))/(d*e*(1 + m)*(a + a*sec(c + d*x))^(3/2))] @test_int [tan(c + d*x)^7*(a + a*sec(c + d*x))^n, x, 4, (7*(a + a*sec(c + d*x))^(4 + n))/(a^4*d*(4 + n)) + (HypergeometricFunctions._₂F₁(1, 4 + n, 5 + n, 1 + sec(c + d*x))*(a + a*sec(c + d*x))^(4 + n))/(a^4*d*(4 + n)) - (5*(a + a*sec(c + d*x))^(5 + n))/(a^5*d*(5 + n)) + (a + a*sec(c + d*x))^(6 + n)/(a^6*d*(6 + n))] @test_int [tan(c + d*x)^5*(a + a*sec(c + d*x))^n, x, 4, -((3*(a + a*sec(c + d*x))^(3 + n))/(a^3*d*(3 + n))) - (HypergeometricFunctions._₂F₁(1, 3 + n, 4 + n, 1 + sec(c + d*x))*(a + a*sec(c + d*x))^(3 + n))/(a^3*d*(3 + n)) + (a + a*sec(c + d*x))^(4 + n)/(a^4*d*(4 + n))] @test_int [tan(c + d*x)^3*(a + a*sec(c + d*x))^n, x, 3, (a + a*sec(c + d*x))^(2 + n)/(a^2*d*(2 + n)) + (HypergeometricFunctions._₂F₁(1, 2 + n, 3 + n, 1 + sec(c + d*x))*(a + a*sec(c + d*x))^(2 + n))/(a^2*d*(2 + n))] @test_int [tan(c + d*x)^1*(a + a*sec(c + d*x))^n, x, 2, -((HypergeometricFunctions._₂F₁(1, 1 + n, 2 + n, 1 + sec(c + d*x))*(a + a*sec(c + d*x))^(1 + n))/(a*d*(1 + n)))] @test_int [cot(c + d*x)^1*(a + a*sec(c + d*x))^n, x, 4, -((HypergeometricFunctions._₂F₁(1, n, 1 + n, (1/2)*(1 + sec(c + d*x)))*(a + a*sec(c + d*x))^n)/(2*d*n)) + (HypergeometricFunctions._₂F₁(1, n, 1 + n, 1 + sec(c + d*x))*(a + a*sec(c + d*x))^n)/(d*n)] @test_int [cot(c + d*x)^3*(a + a*sec(c + d*x))^n, x, 5, -((a*(4 - n)*HypergeometricFunctions._₂F₁(1, -1 + n, n, (1/2)*(1 + sec(c + d*x)))*(a + a*sec(c + d*x))^(-1 + n))/(4*d*(1 - n))) + (a*HypergeometricFunctions._₂F₁(1, -1 + n, n, 1 + sec(c + d*x))*(a + a*sec(c + d*x))^(-1 + n))/(d*(1 - n)) + (a*(a + a*sec(c + d*x))^(-1 + n))/(2*d*(1 - sec(c + d*x)))] @test_int [tan(c + d*x)^4*(a + a*sec(c + d*x))^n, x, 1, (2^(5 + n)*AppellF1(5/2, 4 + n, 1, 7/2, -((a - a*sec(c + d*x))/(a + a*sec(c + d*x))), (a - a*sec(c + d*x))/(a + a*sec(c + d*x)))*(1/(1 + sec(c + d*x)))^(5 + n)*(a + a*sec(c + d*x))^n*tan(c + d*x)^5)/(5*d)] @test_int [tan(c + d*x)^2*(a + a*sec(c + d*x))^n, x, 1, (2^(3 + n)*AppellF1(3/2, 2 + n, 1, 5/2, -((a - a*sec(c + d*x))/(a + a*sec(c + d*x))), (a - a*sec(c + d*x))/(a + a*sec(c + d*x)))*(1/(1 + sec(c + d*x)))^(3 + n)*(a + a*sec(c + d*x))^n*tan(c + d*x)^3)/(3*d)] @test_int [cot(c + d*x)^2*(a + a*sec(c + d*x))^n, x, 1, -((2^(-1 + n)*AppellF1(-(1/2), -2 + n, 1, 1/2, -((a - a*sec(c + d*x))/(a + a*sec(c + d*x))), (a - a*sec(c + d*x))/(a + a*sec(c + d*x)))*cot(c + d*x)*(1/(1 + sec(c + d*x)))^(-1 + n)*(a + a*sec(c + d*x))^n)/d)] @test_int [cot(c + d*x)^4*(a + a*sec(c + d*x))^n, x, 1, -((2^(-3 + n)*AppellF1(-(3/2), -4 + n, 1, -(1/2), -((a - a*sec(c + d*x))/(a + a*sec(c + d*x))), (a - a*sec(c + d*x))/(a + a*sec(c + d*x)))*cot(c + d*x)^3*(1/(1 + sec(c + d*x)))^(-3 + n)*(a + a*sec(c + d*x))^n)/(3*d))] @test_int [tan(c + d*x)^(3/2)*(a + a*sec(c + d*x))^n, x, 1, (1/(5*d))*(2^(7/2 + n)*AppellF1(5/4, 3/2 + n, 1, 9/4, -((a - a*sec(c + d*x))/(a + a*sec(c + d*x))), (a - a*sec(c + d*x))/(a + a*sec(c + d*x)))*(1/(1 + sec(c + d*x)))^(5/2 + n)*(a + a*sec(c + d*x))^n*tan(c + d*x)^(5/2))] @test_int [tan(c + d*x)^(1/2)*(a + a*sec(c + d*x))^n, x, 1, (1/(3*d))*(2^(5/2 + n)*AppellF1(3/4, 1/2 + n, 1, 7/4, -((a - a*sec(c + d*x))/(a + a*sec(c + d*x))), (a - a*sec(c + d*x))/(a + a*sec(c + d*x)))*(1/(1 + sec(c + d*x)))^(3/2 + n)*(a + a*sec(c + d*x))^n*tan(c + d*x)^(3/2))] @test_int [1/tan(c + d*x)^(1/2)*(a + a*sec(c + d*x))^n, x, 1, (1/d)*(2^(3/2 + n)*AppellF1(1/4, -(1/2) + n, 1, 5/4, -((a - a*sec(c + d*x))/(a + a*sec(c + d*x))), (a - a*sec(c + d*x))/(a + a*sec(c + d*x)))*(1/(1 + sec(c + d*x)))^(1/2 + n)*(a + a*sec(c + d*x))^n*sqrt(tan(c + d*x)))] @test_int [1/tan(c + d*x)^(3/2)*(a + a*sec(c + d*x))^n, x, 1, -((2^(1/2 + n)*AppellF1(-(1/4), -(3/2) + n, 1, 3/4, -((a - a*sec(c + d*x))/(a + a*sec(c + d*x))), (a - a*sec(c + d*x))/(a + a*sec(c + d*x)))*(1/(1 + sec(c + d*x)))^(-(1/2) + n)*(a + a*sec(c + d*x))^n)/(d*sqrt(tan(c + d*x))))] #= ::Section::Closed:: =# #=Integrands*of*the*form*(d*cot(e+f*x))^n*(a+a*sec(e+f*x))^m=# #= ::Subsection::Closed:: =# #=Integrands*of*the*form*(d*cot(e+f*x))^(n/2)*(a+a*sec(e+f*x))^m=# #= ::Subsubsection::Closed:: =# #=n>0=# @test_int [(e*cot(c + d*x))^(5/2)*(a + a*sec(c + d*x)), x, 17, -((2*(e*cot(c + d*x))^(5/2)*(a + a*sec(c + d*x))*tan(c + d*x))/(3*d)) - (a*(e*cot(c + d*x))^(5/2)*Elliptic.F(c - pi/4 + d*x, 2)*sec(c + d*x)*sqrt(sin(2*c + 2*d*x))*tan(c + d*x)^2)/(3*d) + (a*atan(1 - sqrt(2)*sqrt(tan(c + d*x)))*(e*cot(c + d*x))^(5/2)*tan(c + d*x)^(5/2))/(sqrt(2)*d) - (a*atan(1 + sqrt(2)*sqrt(tan(c + d*x)))*(e*cot(c + d*x))^(5/2)*tan(c + d*x)^(5/2))/(sqrt(2)*d) + (a*(e*cot(c + d*x))^(5/2)*log(1 - sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))*tan(c + d*x)^(5/2))/(2*sqrt(2)*d) - (a*(e*cot(c + d*x))^(5/2)*log(1 + sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))*tan(c + d*x)^(5/2))/(2*sqrt(2)*d)] @test_int [(e*cot(c + d*x))^(3/2)*(a + a*sec(c + d*x)), x, 18, -((2*(e*cot(c + d*x))^(3/2)*(a + a*sec(c + d*x))*tan(c + d*x))/d) - (2*a*(e*cot(c + d*x))^(3/2)*Elliptic.E(c - pi/4 + d*x, 2)*sin(c + d*x)*tan(c + d*x))/(d*sqrt(sin(2*c + 2*d*x))) + (a*atan(1 - sqrt(2)*sqrt(tan(c + d*x)))*(e*cot(c + d*x))^(3/2)*tan(c + d*x)^(3/2))/(sqrt(2)*d) - (a*atan(1 + sqrt(2)*sqrt(tan(c + d*x)))*(e*cot(c + d*x))^(3/2)*tan(c + d*x)^(3/2))/(sqrt(2)*d) - (a*(e*cot(c + d*x))^(3/2)*log(1 - sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))*tan(c + d*x)^(3/2))/(2*sqrt(2)*d) + (a*(e*cot(c + d*x))^(3/2)*log(1 + sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))*tan(c + d*x)^(3/2))/(2*sqrt(2)*d) + (2*a*(e*cot(c + d*x))^(3/2)*sin(c + d*x)*tan(c + d*x)^2)/d] @test_int [sqrt(e*cot(c + d*x))*(a + a*sec(c + d*x)), x, 16, (a*sqrt(e*cot(c + d*x))*Elliptic.F(c - pi/4 + d*x, 2)*sec(c + d*x)*sqrt(sin(2*c + 2*d*x)))/d - (a*atan(1 - sqrt(2)*sqrt(tan(c + d*x)))*sqrt(e*cot(c + d*x))*sqrt(tan(c + d*x)))/(sqrt(2)*d) + (a*atan(1 + sqrt(2)*sqrt(tan(c + d*x)))*sqrt(e*cot(c + d*x))*sqrt(tan(c + d*x)))/(sqrt(2)*d) - (a*sqrt(e*cot(c + d*x))*log(1 - sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))*sqrt(tan(c + d*x)))/(2*sqrt(2)*d) + (a*sqrt(e*cot(c + d*x))*log(1 + sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))*sqrt(tan(c + d*x)))/(2*sqrt(2)*d)] @test_int [(a + a*sec(c + d*x))/sqrt(e*cot(c + d*x)), x, 17, (2*a*sin(c + d*x))/(d*sqrt(e*cot(c + d*x))) - (2*a*cos(c + d*x)*Elliptic.E(c - pi/4 + d*x, 2))/(d*sqrt(e*cot(c + d*x))*sqrt(sin(2*c + 2*d*x))) - (a*atan(1 - sqrt(2)*sqrt(tan(c + d*x))))/(sqrt(2)*d*sqrt(e*cot(c + d*x))*sqrt(tan(c + d*x))) + (a*atan(1 + sqrt(2)*sqrt(tan(c + d*x))))/(sqrt(2)*d*sqrt(e*cot(c + d*x))*sqrt(tan(c + d*x))) + (a*log(1 - sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x)))/(2*sqrt(2)*d*sqrt(e*cot(c + d*x))*sqrt(tan(c + d*x))) - (a*log(1 + sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x)))/(2*sqrt(2)*d*sqrt(e*cot(c + d*x))*sqrt(tan(c + d*x)))] @test_int [(a + a*sec(c + d*x))/(e*cot(c + d*x))^(3/2), x, 17, (2*cot(c + d*x)*(3*a + a*sec(c + d*x)))/(3*d*(e*cot(c + d*x))^(3/2)) - (a*cot(c + d*x)*csc(c + d*x)*Elliptic.F(c - pi/4 + d*x, 2)*sqrt(sin(2*c + 2*d*x)))/(3*d*(e*cot(c + d*x))^(3/2)) + (a*atan(1 - sqrt(2)*sqrt(tan(c + d*x))))/(sqrt(2)*d*(e*cot(c + d*x))^(3/2)*tan(c + d*x)^(3/2)) - (a*atan(1 + sqrt(2)*sqrt(tan(c + d*x))))/(sqrt(2)*d*(e*cot(c + d*x))^(3/2)*tan(c + d*x)^(3/2)) + (a*log(1 - sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x)))/(2*sqrt(2)*d*(e*cot(c + d*x))^(3/2)*tan(c + d*x)^(3/2)) - (a*log(1 + sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x)))/(2*sqrt(2)*d*(e*cot(c + d*x))^(3/2)*tan(c + d*x)^(3/2))] @test_int [(e*cot(c + d*x))^(5/2)*(a + a*sec(c + d*x))^2, x, 21, -((4*a^2*(e*cot(c + d*x))^(5/2)*tan(c + d*x))/(3*d)) - (4*a^2*(e*cot(c + d*x))^(5/2)*sec(c + d*x)*tan(c + d*x))/(3*d) - (2*a^2*(e*cot(c + d*x))^(5/2)*Elliptic.F(c - pi/4 + d*x, 2)*sec(c + d*x)*sqrt(sin(2*c + 2*d*x))*tan(c + d*x)^2)/(3*d) + (a^2*atan(1 - sqrt(2)*sqrt(tan(c + d*x)))*(e*cot(c + d*x))^(5/2)*tan(c + d*x)^(5/2))/(sqrt(2)*d) - (a^2*atan(1 + sqrt(2)*sqrt(tan(c + d*x)))*(e*cot(c + d*x))^(5/2)*tan(c + d*x)^(5/2))/(sqrt(2)*d) + (a^2*(e*cot(c + d*x))^(5/2)*log(1 - sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))*tan(c + d*x)^(5/2))/(2*sqrt(2)*d) - (a^2*(e*cot(c + d*x))^(5/2)*log(1 + sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))*tan(c + d*x)^(5/2))/(2*sqrt(2)*d)] @test_int [(e*cot(c + d*x))^(3/2)*(a + a*sec(c + d*x))^2, x, 21, -((4*a^2*(e*cot(c + d*x))^(3/2)*sin(c + d*x))/d) - (4*a^2*(e*cot(c + d*x))^(3/2)*tan(c + d*x))/d - (4*a^2*(e*cot(c + d*x))^(3/2)*Elliptic.E(c - pi/4 + d*x, 2)*sin(c + d*x)*tan(c + d*x))/(d*sqrt(sin(2*c + 2*d*x))) + (a^2*atan(1 - sqrt(2)*sqrt(tan(c + d*x)))*(e*cot(c + d*x))^(3/2)*tan(c + d*x)^(3/2))/(sqrt(2)*d) - (a^2*atan(1 + sqrt(2)*sqrt(tan(c + d*x)))*(e*cot(c + d*x))^(3/2)*tan(c + d*x)^(3/2))/(sqrt(2)*d) - (a^2*(e*cot(c + d*x))^(3/2)*log(1 - sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))*tan(c + d*x)^(3/2))/(2*sqrt(2)*d) + (a^2*(e*cot(c + d*x))^(3/2)*log(1 + sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))*tan(c + d*x)^(3/2))/(2*sqrt(2)*d)] @test_int [sqrt(e*cot(c + d*x))*(a + a*sec(c + d*x))^2, x, 19, (2*a^2*sqrt(e*cot(c + d*x))*Elliptic.F(c - pi/4 + d*x, 2)*sec(c + d*x)*sqrt(sin(2*c + 2*d*x)))/d - (a^2*atan(1 - sqrt(2)*sqrt(tan(c + d*x)))*sqrt(e*cot(c + d*x))*sqrt(tan(c + d*x)))/(sqrt(2)*d) + (a^2*atan(1 + sqrt(2)*sqrt(tan(c + d*x)))*sqrt(e*cot(c + d*x))*sqrt(tan(c + d*x)))/(sqrt(2)*d) - (a^2*sqrt(e*cot(c + d*x))*log(1 - sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))*sqrt(tan(c + d*x)))/(2*sqrt(2)*d) + (a^2*sqrt(e*cot(c + d*x))*log(1 + sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))*sqrt(tan(c + d*x)))/(2*sqrt(2)*d) + (2*a^2*sqrt(e*cot(c + d*x))*tan(c + d*x))/d] @test_int [(a + a*sec(c + d*x))^2/sqrt(e*cot(c + d*x)), x, 20, (4*a^2*sin(c + d*x))/(d*sqrt(e*cot(c + d*x))) - (4*a^2*cos(c + d*x)*Elliptic.E(c - pi/4 + d*x, 2))/(d*sqrt(e*cot(c + d*x))*sqrt(sin(2*c + 2*d*x))) - (a^2*atan(1 - sqrt(2)*sqrt(tan(c + d*x))))/(sqrt(2)*d*sqrt(e*cot(c + d*x))*sqrt(tan(c + d*x))) + (a^2*atan(1 + sqrt(2)*sqrt(tan(c + d*x))))/(sqrt(2)*d*sqrt(e*cot(c + d*x))*sqrt(tan(c + d*x))) + (a^2*log(1 - sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x)))/(2*sqrt(2)*d*sqrt(e*cot(c + d*x))*sqrt(tan(c + d*x))) - (a^2*log(1 + sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x)))/(2*sqrt(2)*d*sqrt(e*cot(c + d*x))*sqrt(tan(c + d*x))) + (2*a^2*tan(c + d*x))/(3*d*sqrt(e*cot(c + d*x)))] @test_int [(a + a*sec(c + d*x))^2/(e*cot(c + d*x))^(3/2), x, 21, (2*a^2*cot(c + d*x))/(d*(e*cot(c + d*x))^(3/2)) + (4*a^2*csc(c + d*x))/(3*d*(e*cot(c + d*x))^(3/2)) - (2*a^2*cot(c + d*x)*csc(c + d*x)*Elliptic.F(c - pi/4 + d*x, 2)*sqrt(sin(2*c + 2*d*x)))/(3*d*(e*cot(c + d*x))^(3/2)) + (a^2*atan(1 - sqrt(2)*sqrt(tan(c + d*x))))/(sqrt(2)*d*(e*cot(c + d*x))^(3/2)*tan(c + d*x)^(3/2)) - (a^2*atan(1 + sqrt(2)*sqrt(tan(c + d*x))))/(sqrt(2)*d*(e*cot(c + d*x))^(3/2)*tan(c + d*x)^(3/2)) + (a^2*log(1 - sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x)))/(2*sqrt(2)*d*(e*cot(c + d*x))^(3/2)*tan(c + d*x)^(3/2)) - (a^2*log(1 + sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x)))/(2*sqrt(2)*d*(e*cot(c + d*x))^(3/2)*tan(c + d*x)^(3/2)) + (2*a^2*tan(c + d*x))/(5*d*(e*cot(c + d*x))^(3/2))] #= ::Subsubsection::Closed:: =# #=n<0=# @test_int [(e*cot(c + d*x))^(3/2)/(a + a*sec(c + d*x)), x, 20, (2*cot(c + d*x)*(e*cot(c + d*x))^(3/2)*(1 - sec(c + d*x)))/(5*a*d) - (2*(e*cot(c + d*x))^(3/2)*(5 - 3*sec(c + d*x))*tan(c + d*x))/(5*a*d) + (6*(e*cot(c + d*x))^(3/2)*Elliptic.E(c - pi/4 + d*x, 2)*sin(c + d*x)*tan(c + d*x))/(5*a*d*sqrt(sin(2*c + 2*d*x))) + (atan(1 - sqrt(2)*sqrt(tan(c + d*x)))*(e*cot(c + d*x))^(3/2)*tan(c + d*x)^(3/2))/(sqrt(2)*a*d) - (atan(1 + sqrt(2)*sqrt(tan(c + d*x)))*(e*cot(c + d*x))^(3/2)*tan(c + d*x)^(3/2))/(sqrt(2)*a*d) - ((e*cot(c + d*x))^(3/2)*log(1 - sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))*tan(c + d*x)^(3/2))/(2*sqrt(2)*a*d) + ((e*cot(c + d*x))^(3/2)*log(1 + sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))*tan(c + d*x)^(3/2))/(2*sqrt(2)*a*d) - (6*(e*cot(c + d*x))^(3/2)*sin(c + d*x)*tan(c + d*x)^2)/(5*a*d)] @test_int [sqrt(e*cot(c + d*x))/(a + a*sec(c + d*x)), x, 18, (2*cot(c + d*x)*sqrt(e*cot(c + d*x))*(1 - sec(c + d*x)))/(3*a*d) - (sqrt(e*cot(c + d*x))*Elliptic.F(c - pi/4 + d*x, 2)*sec(c + d*x)*sqrt(sin(2*c + 2*d*x)))/(3*a*d) - (atan(1 - sqrt(2)*sqrt(tan(c + d*x)))*sqrt(e*cot(c + d*x))*sqrt(tan(c + d*x)))/(sqrt(2)*a*d) + (atan(1 + sqrt(2)*sqrt(tan(c + d*x)))*sqrt(e*cot(c + d*x))*sqrt(tan(c + d*x)))/(sqrt(2)*a*d) - (sqrt(e*cot(c + d*x))*log(1 - sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))*sqrt(tan(c + d*x)))/(2*sqrt(2)*a*d) + (sqrt(e*cot(c + d*x))*log(1 + sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))*sqrt(tan(c + d*x)))/(2*sqrt(2)*a*d)] @test_int [1/(sqrt(e*cot(c + d*x))*(a + a*sec(c + d*x))), x, 19, (2*cot(c + d*x)*(1 - sec(c + d*x)))/(a*d*sqrt(e*cot(c + d*x))) + (2*sin(c + d*x))/(a*d*sqrt(e*cot(c + d*x))) - (2*cos(c + d*x)*Elliptic.E(c - pi/4 + d*x, 2))/(a*d*sqrt(e*cot(c + d*x))*sqrt(sin(2*c + 2*d*x))) - atan(1 - sqrt(2)*sqrt(tan(c + d*x)))/(sqrt(2)*a*d*sqrt(e*cot(c + d*x))*sqrt(tan(c + d*x))) + atan(1 + sqrt(2)*sqrt(tan(c + d*x)))/(sqrt(2)*a*d*sqrt(e*cot(c + d*x))*sqrt(tan(c + d*x))) + log(1 - sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))/(2*sqrt(2)*a*d*sqrt(e*cot(c + d*x))*sqrt(tan(c + d*x))) - log(1 + sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))/(2*sqrt(2)*a*d*sqrt(e*cot(c + d*x))*sqrt(tan(c + d*x)))] @test_int [1/((e*cot(c + d*x))^(3/2)*(a + a*sec(c + d*x))), x, 17, (cot(c + d*x)*csc(c + d*x)*Elliptic.F(c - pi/4 + d*x, 2)*sqrt(sin(2*c + 2*d*x)))/(a*d*(e*cot(c + d*x))^(3/2)) + atan(1 - sqrt(2)*sqrt(tan(c + d*x)))/(sqrt(2)*a*d*(e*cot(c + d*x))^(3/2)*tan(c + d*x)^(3/2)) - atan(1 + sqrt(2)*sqrt(tan(c + d*x)))/(sqrt(2)*a*d*(e*cot(c + d*x))^(3/2)*tan(c + d*x)^(3/2)) + log(1 - sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))/(2*sqrt(2)*a*d*(e*cot(c + d*x))^(3/2)*tan(c + d*x)^(3/2)) - log(1 + sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))/(2*sqrt(2)*a*d*(e*cot(c + d*x))^(3/2)*tan(c + d*x)^(3/2))] @test_int [1/((e*cot(c + d*x))^(5/2)*(a + a*sec(c + d*x))), x, 18, (2*cos(c + d*x)*cot(c + d*x))/(a*d*(e*cot(c + d*x))^(5/2)) - (2*cos(c + d*x)*cot(c + d*x)^2*Elliptic.E(c - pi/4 + d*x, 2))/(a*d*(e*cot(c + d*x))^(5/2)*sqrt(sin(2*c + 2*d*x))) + atan(1 - sqrt(2)*sqrt(tan(c + d*x)))/(sqrt(2)*a*d*(e*cot(c + d*x))^(5/2)*tan(c + d*x)^(5/2)) - atan(1 + sqrt(2)*sqrt(tan(c + d*x)))/(sqrt(2)*a*d*(e*cot(c + d*x))^(5/2)*tan(c + d*x)^(5/2)) - log(1 - sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))/(2*sqrt(2)*a*d*(e*cot(c + d*x))^(5/2)*tan(c + d*x)^(5/2)) + log(1 + sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))/(2*sqrt(2)*a*d*(e*cot(c + d*x))^(5/2)*tan(c + d*x)^(5/2))] @test_int [1/((e*cot(c + d*x))^(7/2)*(a + a*sec(c + d*x))), x, 18, -((2*cot(c + d*x)^3*(3 - sec(c + d*x)))/(3*a*d*(e*cot(c + d*x))^(7/2))) - (cot(c + d*x)^3*csc(c + d*x)*Elliptic.F(c - pi/4 + d*x, 2)*sqrt(sin(2*c + 2*d*x)))/(3*a*d*(e*cot(c + d*x))^(7/2)) - atan(1 - sqrt(2)*sqrt(tan(c + d*x)))/(sqrt(2)*a*d*(e*cot(c + d*x))^(7/2)*tan(c + d*x)^(7/2)) + atan(1 + sqrt(2)*sqrt(tan(c + d*x)))/(sqrt(2)*a*d*(e*cot(c + d*x))^(7/2)*tan(c + d*x)^(7/2)) - log(1 - sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))/(2*sqrt(2)*a*d*(e*cot(c + d*x))^(7/2)*tan(c + d*x)^(7/2)) + log(1 + sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))/(2*sqrt(2)*a*d*(e*cot(c + d*x))^(7/2)*tan(c + d*x)^(7/2))] @test_int [1/((e*cot(c + d*x))^(9/2)*(a + a*sec(c + d*x))), x, 19, -((6*cos(c + d*x)*cot(c + d*x)^3)/(5*a*d*(e*cot(c + d*x))^(9/2))) - (2*cot(c + d*x)^3*(5 - 3*sec(c + d*x)))/(15*a*d*(e*cot(c + d*x))^(9/2)) + (6*cos(c + d*x)*cot(c + d*x)^4*Elliptic.E(c - pi/4 + d*x, 2))/(5*a*d*(e*cot(c + d*x))^(9/2)*sqrt(sin(2*c + 2*d*x))) - atan(1 - sqrt(2)*sqrt(tan(c + d*x)))/(sqrt(2)*a*d*(e*cot(c + d*x))^(9/2)*tan(c + d*x)^(9/2)) + atan(1 + sqrt(2)*sqrt(tan(c + d*x)))/(sqrt(2)*a*d*(e*cot(c + d*x))^(9/2)*tan(c + d*x)^(9/2)) + log(1 - sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))/(2*sqrt(2)*a*d*(e*cot(c + d*x))^(9/2)*tan(c + d*x)^(9/2)) - log(1 + sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))/(2*sqrt(2)*a*d*(e*cot(c + d*x))^(9/2)*tan(c + d*x)^(9/2))] @test_int [1/(sqrt(e*cot(c + d*x))*(a + a*sec(c + d*x))^2), x, 24, (2*cot(c + d*x))/(a^2*d*sqrt(e*cot(c + d*x))) - (12*cos(c + d*x)*cot(c + d*x))/(5*a^2*d*sqrt(e*cot(c + d*x))) - (4*cot(c + d*x)^3)/(5*a^2*d*sqrt(e*cot(c + d*x))) + (4*cot(c + d*x)^2*csc(c + d*x))/(5*a^2*d*sqrt(e*cot(c + d*x))) - (12*cos(c + d*x)*Elliptic.E(c - pi/4 + d*x, 2))/(5*a^2*d*sqrt(e*cot(c + d*x))*sqrt(sin(2*c + 2*d*x))) - atan(1 - sqrt(2)*sqrt(tan(c + d*x)))/(sqrt(2)*a^2*d*sqrt(e*cot(c + d*x))*sqrt(tan(c + d*x))) + atan(1 + sqrt(2)*sqrt(tan(c + d*x)))/(sqrt(2)*a^2*d*sqrt(e*cot(c + d*x))*sqrt(tan(c + d*x))) + log(1 - sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))/(2*sqrt(2)*a^2*d*sqrt(e*cot(c + d*x))*sqrt(tan(c + d*x))) - log(1 + sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))/(2*sqrt(2)*a^2*d*sqrt(e*cot(c + d*x))*sqrt(tan(c + d*x)))] @test_int [1/((e*cot(c + d*x))^(3/2)*(a + a*sec(c + d*x))^2), x, 22, -((4*cot(c + d*x)^3)/(3*a^2*d*(e*cot(c + d*x))^(3/2))) + (4*cot(c + d*x)^2*csc(c + d*x))/(3*a^2*d*(e*cot(c + d*x))^(3/2)) + (2*cot(c + d*x)*csc(c + d*x)*Elliptic.F(c - pi/4 + d*x, 2)*sqrt(sin(2*c + 2*d*x)))/(3*a^2*d*(e*cot(c + d*x))^(3/2)) + atan(1 - sqrt(2)*sqrt(tan(c + d*x)))/(sqrt(2)*a^2*d*(e*cot(c + d*x))^(3/2)*tan(c + d*x)^(3/2)) - atan(1 + sqrt(2)*sqrt(tan(c + d*x)))/(sqrt(2)*a^2*d*(e*cot(c + d*x))^(3/2)*tan(c + d*x)^(3/2)) + log(1 - sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))/(2*sqrt(2)*a^2*d*(e*cot(c + d*x))^(3/2)*tan(c + d*x)^(3/2)) - log(1 + sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))/(2*sqrt(2)*a^2*d*(e*cot(c + d*x))^(3/2)*tan(c + d*x)^(3/2))] @test_int [1/((e*cot(c + d*x))^(5/2)*(a + a*sec(c + d*x))^2), x, 22, -((4*cot(c + d*x)^3)/(a^2*d*(e*cot(c + d*x))^(5/2))) + (4*cos(c + d*x)*cot(c + d*x)^3)/(a^2*d*(e*cot(c + d*x))^(5/2)) + (4*cos(c + d*x)*cot(c + d*x)^2*Elliptic.E(c - pi/4 + d*x, 2))/(a^2*d*(e*cot(c + d*x))^(5/2)*sqrt(sin(2*c + 2*d*x))) + atan(1 - sqrt(2)*sqrt(tan(c + d*x)))/(sqrt(2)*a^2*d*(e*cot(c + d*x))^(5/2)*tan(c + d*x)^(5/2)) - atan(1 + sqrt(2)*sqrt(tan(c + d*x)))/(sqrt(2)*a^2*d*(e*cot(c + d*x))^(5/2)*tan(c + d*x)^(5/2)) - log(1 - sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))/(2*sqrt(2)*a^2*d*(e*cot(c + d*x))^(5/2)*tan(c + d*x)^(5/2)) + log(1 + sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))/(2*sqrt(2)*a^2*d*(e*cot(c + d*x))^(5/2)*tan(c + d*x)^(5/2))] @test_int [1/((e*cot(c + d*x))^(7/2)*(a + a*sec(c + d*x))^2), x, 20, (2*cot(c + d*x)^3)/(a^2*d*(e*cot(c + d*x))^(7/2)) - (2*cot(c + d*x)^3*csc(c + d*x)*Elliptic.F(c - pi/4 + d*x, 2)*sqrt(sin(2*c + 2*d*x)))/(a^2*d*(e*cot(c + d*x))^(7/2)) - atan(1 - sqrt(2)*sqrt(tan(c + d*x)))/(sqrt(2)*a^2*d*(e*cot(c + d*x))^(7/2)*tan(c + d*x)^(7/2)) + atan(1 + sqrt(2)*sqrt(tan(c + d*x)))/(sqrt(2)*a^2*d*(e*cot(c + d*x))^(7/2)*tan(c + d*x)^(7/2)) - log(1 - sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))/(2*sqrt(2)*a^2*d*(e*cot(c + d*x))^(7/2)*tan(c + d*x)^(7/2)) + log(1 + sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))/(2*sqrt(2)*a^2*d*(e*cot(c + d*x))^(7/2)*tan(c + d*x)^(7/2))] @test_int [1/((e*cot(c + d*x))^(9/2)*(a + a*sec(c + d*x))^2), x, 21, (2*cot(c + d*x)^3)/(3*a^2*d*(e*cot(c + d*x))^(9/2)) - (4*cos(c + d*x)*cot(c + d*x)^3)/(a^2*d*(e*cot(c + d*x))^(9/2)) + (4*cos(c + d*x)*cot(c + d*x)^4*Elliptic.E(c - pi/4 + d*x, 2))/(a^2*d*(e*cot(c + d*x))^(9/2)*sqrt(sin(2*c + 2*d*x))) - atan(1 - sqrt(2)*sqrt(tan(c + d*x)))/(sqrt(2)*a^2*d*(e*cot(c + d*x))^(9/2)*tan(c + d*x)^(9/2)) + atan(1 + sqrt(2)*sqrt(tan(c + d*x)))/(sqrt(2)*a^2*d*(e*cot(c + d*x))^(9/2)*tan(c + d*x)^(9/2)) + log(1 - sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))/(2*sqrt(2)*a^2*d*(e*cot(c + d*x))^(9/2)*tan(c + d*x)^(9/2)) - log(1 + sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))/(2*sqrt(2)*a^2*d*(e*cot(c + d*x))^(9/2)*tan(c + d*x)^(9/2))] @test_int [1/((e*cot(c + d*x))^(11/2)*(a + a*sec(c + d*x))^2), x, 22, (2*cot(c + d*x)^3)/(5*a^2*d*(e*cot(c + d*x))^(11/2)) + (2*cot(c + d*x)^5)/(a^2*d*(e*cot(c + d*x))^(11/2)) - (4*cot(c + d*x)^4*csc(c + d*x))/(3*a^2*d*(e*cot(c + d*x))^(11/2)) + (2*cot(c + d*x)^5*csc(c + d*x)*Elliptic.F(c - pi/4 + d*x, 2)*sqrt(sin(2*c + 2*d*x)))/(3*a^2*d*(e*cot(c + d*x))^(11/2)) + atan(1 - sqrt(2)*sqrt(tan(c + d*x)))/(sqrt(2)*a^2*d*(e*cot(c + d*x))^(11/2)*tan(c + d*x)^(11/2)) - atan(1 + sqrt(2)*sqrt(tan(c + d*x)))/(sqrt(2)*a^2*d*(e*cot(c + d*x))^(11/2)*tan(c + d*x)^(11/2)) + log(1 - sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))/(2*sqrt(2)*a^2*d*(e*cot(c + d*x))^(11/2)*tan(c + d*x)^(11/2)) - log(1 + sqrt(2)*sqrt(tan(c + d*x)) + tan(c + d*x))/(2*sqrt(2)*a^2*d*(e*cot(c + d*x))^(11/2)*tan(c + d*x)^(11/2))] #= ::Section::Closed:: =# #=Integrands*of*the*form*(d*tan(e+f*x))^m*(a+b*sec(e+f*x))^m=# #= ::Subsection::Closed:: =# #=Integrands*of*the*form*tan(e+f*x)^n*(a+b*sec(e+f*x))^m=# #= ::Subsubsection::Closed:: =# #=n>0=# @test_int [(a + b*sec(c + d*x))*tan(c + d*x)^7, x, 7, (a*log(cos(c + d*x)))/d - (16*b*sec(c + d*x))/(35*d) + ((35*a + 16*b*sec(c + d*x))*tan(c + d*x)^2)/(70*d) - ((35*a + 24*b*sec(c + d*x))*tan(c + d*x)^4)/(140*d) + ((7*a + 6*b*sec(c + d*x))*tan(c + d*x)^6)/(42*d)] @test_int [(a + b*sec(c + d*x))*tan(c + d*x)^5, x, 6, -((a*log(cos(c + d*x)))/d) + (8*b*sec(c + d*x))/(15*d) - ((15*a + 8*b*sec(c + d*x))*tan(c + d*x)^2)/(30*d) + ((5*a + 4*b*sec(c + d*x))*tan(c + d*x)^4)/(20*d)] @test_int [(a + b*sec(c + d*x))*tan(c + d*x)^3, x, 5, (a*log(cos(c + d*x)))/d - (2*b*sec(c + d*x))/(3*d) + ((3*a + 2*b*sec(c + d*x))*tan(c + d*x)^2)/(6*d)] @test_int [(a + b*sec(c + d*x))*tan(c + d*x)^1, x, 4, -((a*log(cos(c + d*x)))/d) + (b*sec(c + d*x))/d] @test_int [cot(c + d*x)^1*(a + b*sec(c + d*x)), x, 5, ((a + b)*log(1 - cos(c + d*x)))/(2*d) + ((a - b)*log(1 + cos(c + d*x)))/(2*d)] @test_int [cot(c + d*x)^3*(a + b*sec(c + d*x)), x, 6, -(((2*a + b)*log(1 - cos(c + d*x)))/(4*d)) - ((2*a - b)*log(1 + cos(c + d*x)))/(4*d) - (cot(c + d*x)^2*(a + b*sec(c + d*x)))/(2*d)] @test_int [cot(c + d*x)^5*(a + b*sec(c + d*x)), x, 7, ((8*a + 3*b)*log(1 - cos(c + d*x)))/(16*d) + ((8*a - 3*b)*log(1 + cos(c + d*x)))/(16*d) - (cot(c + d*x)^4*(a + b*sec(c + d*x)))/(4*d) + (cot(c + d*x)^2*(4*a + 3*b*sec(c + d*x)))/(8*d)] @test_int [cot(c + d*x)^7*(a + b*sec(c + d*x)), x, 8, -(((16*a + 5*b)*log(1 - cos(c + d*x)))/(32*d)) - ((16*a - 5*b)*log(1 + cos(c + d*x)))/(32*d) - (cot(c + d*x)^6*(a + b*sec(c + d*x)))/(6*d) + (cot(c + d*x)^4*(6*a + 5*b*sec(c + d*x)))/(24*d) - (cot(c + d*x)^2*(8*a + 5*b*sec(c + d*x)))/(16*d)] @test_int [(a + b*sec(c + d*x))*tan(c + d*x)^6, x, 5, (-a)*x - (5*b*atanh(sin(c + d*x)))/(16*d) + ((16*a + 5*b*sec(c + d*x))*tan(c + d*x))/(16*d) - ((8*a + 5*b*sec(c + d*x))*tan(c + d*x)^3)/(24*d) + ((6*a + 5*b*sec(c + d*x))*tan(c + d*x)^5)/(30*d)] @test_int [(a + b*sec(c + d*x))*tan(c + d*x)^4, x, 4, a*x + (3*b*atanh(sin(c + d*x)))/(8*d) - ((8*a + 3*b*sec(c + d*x))*tan(c + d*x))/(8*d) + ((4*a + 3*b*sec(c + d*x))*tan(c + d*x)^3)/(12*d)] @test_int [(a + b*sec(c + d*x))*tan(c + d*x)^2, x, 3, (-a)*x - (b*atanh(sin(c + d*x)))/(2*d) + ((2*a + b*sec(c + d*x))*tan(c + d*x))/(2*d)] @test_int [cot(c + d*x)^2*(a + b*sec(c + d*x)), x, 2, (-a)*x - (cot(c + d*x)*(a + b*sec(c + d*x)))/d] @test_int [cot(c + d*x)^4*(a + b*sec(c + d*x)), x, 3, a*x - (cot(c + d*x)^3*(a + b*sec(c + d*x)))/(3*d) + (cot(c + d*x)*(3*a + 2*b*sec(c + d*x)))/(3*d)] @test_int [cot(c + d*x)^6*(a + b*sec(c + d*x)), x, 4, (-a)*x - (cot(c + d*x)^5*(a + b*sec(c + d*x)))/(5*d) + (cot(c + d*x)^3*(5*a + 4*b*sec(c + d*x)))/(15*d) - (cot(c + d*x)*(15*a + 8*b*sec(c + d*x)))/(15*d)] @test_int [cot(c + d*x)^8*(a + b*sec(c + d*x)), x, 5, a*x - (cot(c + d*x)^7*(a + b*sec(c + d*x)))/(7*d) + (cot(c + d*x)^5*(7*a + 6*b*sec(c + d*x)))/(35*d) + (cot(c + d*x)*(35*a + 16*b*sec(c + d*x)))/(35*d) - (cot(c + d*x)^3*(35*a + 24*b*sec(c + d*x)))/(105*d)] @test_int [(a + b*sec(c + d*x))^2*tan(c + d*x)^9, x, 3, -((a^2*log(cos(c + d*x)))/d) + (2*a*b*sec(c + d*x))/d - (2*a^2*sec(c + d*x)^2)/d - (8*a*b*sec(c + d*x)^3)/(3*d) + (3*a^2*sec(c + d*x)^4)/(2*d) + (12*a*b*sec(c + d*x)^5)/(5*d) - (2*a^2*sec(c + d*x)^6)/(3*d) - (8*a*b*sec(c + d*x)^7)/(7*d) + (a^2*sec(c + d*x)^8)/(8*d) + (2*a*b*sec(c + d*x)^9)/(9*d) + (b^2*tan(c + d*x)^10)/(10*d), -((a^2*log(cos(c + d*x)))/d) + (2*a*b*sec(c + d*x))/d - ((4*a^2 - b^2)*sec(c + d*x)^2)/(2*d) - (8*a*b*sec(c + d*x)^3)/(3*d) + ((3*a^2 - 2*b^2)*sec(c + d*x)^4)/(2*d) + (12*a*b*sec(c + d*x)^5)/(5*d) - ((2*a^2 - 3*b^2)*sec(c + d*x)^6)/(3*d) - (8*a*b*sec(c + d*x)^7)/(7*d) + ((a^2 - 4*b^2)*sec(c + d*x)^8)/(8*d) + (2*a*b*sec(c + d*x)^9)/(9*d) + (b^2*sec(c + d*x)^10)/(10*d)] @test_int [(a + b*sec(c + d*x))^2*tan(c + d*x)^7, x, 3, (a^2*log(cos(c + d*x)))/d - (2*a*b*sec(c + d*x))/d + (3*a^2*sec(c + d*x)^2)/(2*d) + (2*a*b*sec(c + d*x)^3)/d - (3*a^2*sec(c + d*x)^4)/(4*d) - (6*a*b*sec(c + d*x)^5)/(5*d) + (a^2*sec(c + d*x)^6)/(6*d) + (2*a*b*sec(c + d*x)^7)/(7*d) + (b^2*tan(c + d*x)^8)/(8*d), (a^2*log(cos(c + d*x)))/d - (2*a*b*sec(c + d*x))/d + ((3*a^2 - b^2)*sec(c + d*x)^2)/(2*d) + (2*a*b*sec(c + d*x)^3)/d - (3*(a^2 - b^2)*sec(c + d*x)^4)/(4*d) - (6*a*b*sec(c + d*x)^5)/(5*d) + ((a^2 - 3*b^2)*sec(c + d*x)^6)/(6*d) + (2*a*b*sec(c + d*x)^7)/(7*d) + (b^2*sec(c + d*x)^8)/(8*d)] @test_int [(a + b*sec(c + d*x))^2*tan(c + d*x)^5, x, 3, -((a^2*log(cos(c + d*x)))/d) + (2*a*b*sec(c + d*x))/d - (a^2*sec(c + d*x)^2)/d - (4*a*b*sec(c + d*x)^3)/(3*d) + (a^2*sec(c + d*x)^4)/(4*d) + (2*a*b*sec(c + d*x)^5)/(5*d) + (b^2*tan(c + d*x)^6)/(6*d), -((a^2*log(cos(c + d*x)))/d) + (2*a*b*sec(c + d*x))/d - ((2*a^2 - b^2)*sec(c + d*x)^2)/(2*d) - (4*a*b*sec(c + d*x)^3)/(3*d) + ((a^2 - 2*b^2)*sec(c + d*x)^4)/(4*d) + (2*a*b*sec(c + d*x)^5)/(5*d) + (b^2*sec(c + d*x)^6)/(6*d)] @test_int [(a + b*sec(c + d*x))^2*tan(c + d*x)^3, x, 3, (a^2*log(cos(c + d*x)))/d - (2*a*b*sec(c + d*x))/d + ((a^2 - b^2)*sec(c + d*x)^2)/(2*d) + (2*a*b*sec(c + d*x)^3)/(3*d) + (b^2*sec(c + d*x)^4)/(4*d)] @test_int [(a + b*sec(c + d*x))^2*tan(c + d*x)^1, x, 3, -((a^2*log(cos(c + d*x)))/d) + (2*a*b*sec(c + d*x))/d + (b^2*sec(c + d*x)^2)/(2*d)] @test_int [cot(c + d*x)^1*(a + b*sec(c + d*x))^2, x, 3, (a^2*log(cos(c + d*x)))/d + ((a + b)^2*log(1 - sec(c + d*x)))/(2*d) + ((a - b)^2*log(1 + sec(c + d*x)))/(2*d)] @test_int [cot(c + d*x)^3*(a + b*sec(c + d*x))^2, x, 4, -((a^2*log(cos(c + d*x)))/d) - (a*(a + b)*log(1 - sec(c + d*x)))/(2*d) - (a*(a - b)*log(1 + sec(c + d*x)))/(2*d) - (cot(c + d*x)^2*(a^2 + b^2 + 2*a*b*sec(c + d*x)))/(2*d)] @test_int [cot(c + d*x)^5*(a + b*sec(c + d*x))^2, x, 5, (a^2*log(cos(c + d*x)))/d + (a*(4*a + 3*b)*log(1 - sec(c + d*x)))/(8*d) + (a*(4*a - 3*b)*log(1 + sec(c + d*x)))/(8*d) + (a*cot(c + d*x)^2*(2*a + 3*b*sec(c + d*x)))/(4*d) - (cot(c + d*x)^4*(a^2 + b^2 + 2*a*b*sec(c + d*x)))/(4*d)] @test_int [(a + b*sec(c + d*x))^2*tan(c + d*x)^6, x, 12, (-a^2)*x - (5*a*b*atanh(sin(c + d*x)))/(8*d) + (a^2*tan(c + d*x))/d + (5*a*b*sec(c + d*x)*tan(c + d*x))/(8*d) - (a^2*tan(c + d*x)^3)/(3*d) - (5*a*b*sec(c + d*x)*tan(c + d*x)^3)/(12*d) + (a^2*tan(c + d*x)^5)/(5*d) + (a*b*sec(c + d*x)*tan(c + d*x)^5)/(3*d) + (b^2*tan(c + d*x)^7)/(7*d)] @test_int [(a + b*sec(c + d*x))^2*tan(c + d*x)^4, x, 10, a^2*x + (3*a*b*atanh(sin(c + d*x)))/(4*d) - (a^2*tan(c + d*x))/d - (3*a*b*sec(c + d*x)*tan(c + d*x))/(4*d) + (a^2*tan(c + d*x)^3)/(3*d) + (a*b*sec(c + d*x)*tan(c + d*x)^3)/(2*d) + (b^2*tan(c + d*x)^5)/(5*d)] @test_int [(a + b*sec(c + d*x))^2*tan(c + d*x)^2, x, 8, (-a^2)*x - (a*b*atanh(sin(c + d*x)))/d + (a^2*tan(c + d*x))/d + (a*b*sec(c + d*x)*tan(c + d*x))/d + (b^2*tan(c + d*x)^3)/(3*d)] @test_int [cot(c + d*x)^2*(a + b*sec(c + d*x))^2, x, 8, (-a^2)*x - (a^2*cot(c + d*x))/d - (b^2*cot(c + d*x))/d - (2*a*b*csc(c + d*x))/d] @test_int [cot(c + d*x)^4*(a + b*sec(c + d*x))^2, x, 9, a^2*x + (a^2*cot(c + d*x))/d - (a^2*cot(c + d*x)^3)/(3*d) - (b^2*cot(c + d*x)^3)/(3*d) + (2*a*b*csc(c + d*x))/d - (2*a*b*csc(c + d*x)^3)/(3*d)] @test_int [cot(c + d*x)^6*(a + b*sec(c + d*x))^2, x, 11, (-a^2)*x - (a^2*cot(c + d*x))/d + (a^2*cot(c + d*x)^3)/(3*d) - (a^2*cot(c + d*x)^5)/(5*d) - (b^2*cot(c + d*x)^5)/(5*d) - (2*a*b*csc(c + d*x))/d + (4*a*b*csc(c + d*x)^3)/(3*d) - (2*a*b*csc(c + d*x)^5)/(5*d)] @test_int [cot(c + d*x)^8*(a + b*sec(c + d*x))^2, x, 12, a^2*x + (a^2*cot(c + d*x))/d - (a^2*cot(c + d*x)^3)/(3*d) + (a^2*cot(c + d*x)^5)/(5*d) - (a^2*cot(c + d*x)^7)/(7*d) - (b^2*cot(c + d*x)^7)/(7*d) + (2*a*b*csc(c + d*x))/d - (2*a*b*csc(c + d*x)^3)/d + (6*a*b*csc(c + d*x)^5)/(5*d) - (2*a*b*csc(c + d*x)^7)/(7*d)] #= ::Subsubsection::Closed:: =# #=n<0=# @test_int [tan(c + d*x)^9/(a + b*sec(c + d*x)), x, 3, -(log(cos(c + d*x))/(a*d)) - ((a^2 - b^2)^4*log(a + b*sec(c + d*x)))/(a*b^8*d) + ((a^6 - 4*a^4*b^2 + 6*a^2*b^4 - 4*b^6)*sec(c + d*x))/(b^7*d) - (a*(a^4 - 4*a^2*b^2 + 6*b^4)*sec(c + d*x)^2)/(2*b^6*d) + ((a^4 - 4*a^2*b^2 + 6*b^4)*sec(c + d*x)^3)/(3*b^5*d) - (a*(a^2 - 4*b^2)*sec(c + d*x)^4)/(4*b^4*d) + ((a^2 - 4*b^2)*sec(c + d*x)^5)/(5*b^3*d) - (a*sec(c + d*x)^6)/(6*b^2*d) + sec(c + d*x)^7/(7*b*d)] @test_int [tan(c + d*x)^7/(a + b*sec(c + d*x)), x, 3, log(cos(c + d*x))/(a*d) - ((a^2 - b^2)^3*log(a + b*sec(c + d*x)))/(a*b^6*d) + ((a^4 - 3*a^2*b^2 + 3*b^4)*sec(c + d*x))/(b^5*d) - (a*(a^2 - 3*b^2)*sec(c + d*x)^2)/(2*b^4*d) + ((a^2 - 3*b^2)*sec(c + d*x)^3)/(3*b^3*d) - (a*sec(c + d*x)^4)/(4*b^2*d) + sec(c + d*x)^5/(5*b*d)] @test_int [tan(c + d*x)^5/(a + b*sec(c + d*x)), x, 3, -(log(cos(c + d*x))/(a*d)) - ((a^2 - b^2)^2*log(a + b*sec(c + d*x)))/(a*b^4*d) + ((a^2 - 2*b^2)*sec(c + d*x))/(b^3*d) - (a*sec(c + d*x)^2)/(2*b^2*d) + sec(c + d*x)^3/(3*b*d)] @test_int [tan(c + d*x)^3/(a + b*sec(c + d*x)), x, 3, log(cos(c + d*x))/(a*d) - ((a^2 - b^2)*log(a + b*sec(c + d*x)))/(a*b^2*d) + sec(c + d*x)/(b*d)] @test_int [tan(c + d*x)^1/(a + b*sec(c + d*x)), x, 4, -(log(cos(c + d*x))/(a*d)) - log(a + b*sec(c + d*x))/(a*d)] @test_int [cot(c + d*x)^1/(a + b*sec(c + d*x)), x, 3, log(cos(c + d*x))/(a*d) + log(1 - sec(c + d*x))/(2*(a + b)*d) + log(1 + sec(c + d*x))/(2*(a - b)*d) - (b^2*log(a + b*sec(c + d*x)))/(a*(a^2 - b^2)*d)] @test_int [cot(c + d*x)^3/(a + b*sec(c + d*x)), x, 3, -(log(cos(c + d*x))/(a*d)) - ((2*a + 3*b)*log(1 - sec(c + d*x)))/(4*(a + b)^2*d) - ((2*a - 3*b)*log(1 + sec(c + d*x)))/(4*(a - b)^2*d) - (b^4*log(a + b*sec(c + d*x)))/(a*(a^2 - b^2)^2*d) + 1/(4*(a + b)*d*(1 - sec(c + d*x))) + 1/(4*(a - b)*d*(1 + sec(c + d*x)))] @test_int [cot(c + d*x)^5/(a + b*sec(c + d*x)), x, 3, log(cos(c + d*x))/(a*d) + ((8*a^2 + 21*a*b + 15*b^2)*log(1 - sec(c + d*x)))/(16*(a + b)^3*d) + ((8*a^2 - 21*a*b + 15*b^2)*log(1 + sec(c + d*x)))/(16*(a - b)^3*d) - (b^6*log(a + b*sec(c + d*x)))/(a*(a^2 - b^2)^3*d) - 1/(16*(a + b)*d*(1 - sec(c + d*x))^2) - (5*a + 7*b)/(16*(a + b)^2*d*(1 - sec(c + d*x))) - 1/(16*(a - b)*d*(1 + sec(c + d*x))^2) - (5*a - 7*b)/(16*(a - b)^2*d*(1 + sec(c + d*x)))] @test_int [tan(c + d*x)^6/(a + b*sec(c + d*x)), x, 15, -(x/a) + ((8*a^4 - 20*a^2*b^2 + 15*b^4)*atanh(sin(c + d*x)))/(8*b^5*d) - (2*(a - b)^(5/2)*(a + b)^(5/2)*atanh((sqrt(a - b)*tan((1/2)*(c + d*x)))/sqrt(a + b)))/(a*b^5*d) - (a*(a^2 - 2*b^2)*tan(c + d*x))/(b^4*d) + ((4*a^2 - 7*b^2)*sec(c + d*x)*tan(c + d*x))/(8*b^3*d) - (a*tan(c + d*x)^3)/(3*b^2*d) + (sec(c + d*x)*tan(c + d*x)^3)/(4*b*d), -(x/a) + (3*atanh(sin(c + d*x)))/(8*b*d) + ((a^2 - 3*b^2)*atanh(sin(c + d*x)))/(2*b^3*d) + ((a^4 - 3*a^2*b^2 + 3*b^4)*atanh(sin(c + d*x)))/(b^5*d) - (2*(a - b)^(5/2)*(a + b)^(5/2)*atanh((sqrt(a - b)*tan((1/2)*(c + d*x)))/sqrt(a + b)))/(a*b^5*d) - (a*tan(c + d*x))/(b^2*d) - (a*(a^2 - 3*b^2)*tan(c + d*x))/(b^4*d) + (3*sec(c + d*x)*tan(c + d*x))/(8*b*d) + ((a^2 - 3*b^2)*sec(c + d*x)*tan(c + d*x))/(2*b^3*d) + (sec(c + d*x)^3*tan(c + d*x))/(4*b*d) - (a*tan(c + d*x)^3)/(3*b^2*d)] @test_int [tan(c + d*x)^4/(a + b*sec(c + d*x)), x, 6, x/a + ((2*a^2 - 3*b^2)*atanh(sin(c + d*x)))/(2*b^3*d) - (2*(a - b)^(3/2)*(a + b)^(3/2)*atanh((sqrt(a - b)*tan((1/2)*(c + d*x)))/sqrt(a + b)))/(a*b^3*d) - (a*tan(c + d*x))/(b^2*d) + (sec(c + d*x)*tan(c + d*x))/(2*b*d)] @test_int [tan(c + d*x)^2/(a + b*sec(c + d*x)), x, 7, -(x/a) + atanh(sin(c + d*x))/(b*d) - (2*sqrt(a - b)*sqrt(a + b)*atanh((sqrt(a - b)*tan((1/2)*(c + d*x)))/sqrt(a + b)))/(a*b*d)] @test_int [cot(c + d*x)^2/(a + b*sec(c + d*x)), x, 9, -(x/a) - (2*b^3*atanh((sqrt(a^2 - b^2)*tan((1/2)*(c + d*x)))/(a + b)))/(a*(a^2 - b^2)^(3/2)*d) - (a*cot(c + d*x))/((a^2 - b^2)*d) + (b*csc(c + d*x))/((a^2 - b^2)*d), -((a*x)/(a^2 - b^2)) + (b^2*x)/(a*(a^2 - b^2)) - (2*b^3*atanh((sqrt(a - b)*tan((1/2)*(c + d*x)))/sqrt(a + b)))/(a*(a - b)^(3/2)*(a + b)^(3/2)*d) - (a*cot(c + d*x))/((a^2 - b^2)*d) + (b*csc(c + d*x))/((a^2 - b^2)*d)] @test_int [cot(c + d*x)^4/(a + b*sec(c + d*x)), x, 15, x/a - (2*b^5*atanh((sqrt(a^2 - b^2)*tan((1/2)*(c + d*x)))/(a + b)))/(a*(a^2 - b^2)^(5/2)*d) + (a*(a^2 - 2*b^2)*cot(c + d*x))/((a^2 - b^2)^2*d) - (a*cot(c + d*x)^3)/(3*(a^2 - b^2)*d) - (b*(a^2 - 2*b^2)*csc(c + d*x))/((a^2 - b^2)^2*d) + (b*csc(c + d*x)^3)/(3*(a^2 - b^2)*d), -((a*b^2*x)/(a^2 - b^2)^2) + (b^4*x)/(a*(a^2 - b^2)^2) + (a*x)/(a^2 - b^2) - (2*b^5*atanh((sqrt(a - b)*tan((1/2)*(c + d*x)))/sqrt(a + b)))/(a*(a - b)^(5/2)*(a + b)^(5/2)*d) - (a*b^2*cot(c + d*x))/((a^2 - b^2)^2*d) + (a*cot(c + d*x))/((a^2 - b^2)*d) - (a*cot(c + d*x)^3)/(3*(a^2 - b^2)*d) + (b^3*csc(c + d*x))/((a^2 - b^2)^2*d) - (b*csc(c + d*x))/((a^2 - b^2)*d) + (b*csc(c + d*x)^3)/(3*(a^2 - b^2)*d)] @test_int [tan(c + d*x)^9/(a + b*sec(c + d*x))^2, x, 3, -(log(cos(c + d*x))/(a^2*d)) + ((a^2 - b^2)^3*(7*a^2 + b^2)*log(a + b*sec(c + d*x)))/(a^2*b^8*d) - (2*a*(3*a^4 - 8*a^2*b^2 + 6*b^4)*sec(c + d*x))/(b^7*d) + ((5*a^4 - 12*a^2*b^2 + 6*b^4)*sec(c + d*x)^2)/(2*b^6*d) - (4*a*(a^2 - 2*b^2)*sec(c + d*x)^3)/(3*b^5*d) + ((3*a^2 - 4*b^2)*sec(c + d*x)^4)/(4*b^4*d) - (2*a*sec(c + d*x)^5)/(5*b^3*d) + sec(c + d*x)^6/(6*b^2*d) + (a^2 - b^2)^4/(a*b^8*d*(a + b*sec(c + d*x)))] @test_int [tan(c + d*x)^7/(a + b*sec(c + d*x))^2, x, 3, log(cos(c + d*x))/(a^2*d) + ((a^2 - b^2)^2*(5*a^2 + b^2)*log(a + b*sec(c + d*x)))/(a^2*b^6*d) - (2*a*(2*a^2 - 3*b^2)*sec(c + d*x))/(b^5*d) + (3*(a^2 - b^2)*sec(c + d*x)^2)/(2*b^4*d) - (2*a*sec(c + d*x)^3)/(3*b^3*d) + sec(c + d*x)^4/(4*b^2*d) + (a^2 - b^2)^3/(a*b^6*d*(a + b*sec(c + d*x)))] @test_int [tan(c + d*x)^5/(a + b*sec(c + d*x))^2, x, 3, -(log(cos(c + d*x))/(a^2*d)) + ((a^2 - b^2)*(3*a^2 + b^2)*log(a + b*sec(c + d*x)))/(a^2*b^4*d) - (2*a*sec(c + d*x))/(b^3*d) + sec(c + d*x)^2/(2*b^2*d) + (a^2 - b^2)^2/(a*b^4*d*(a + b*sec(c + d*x)))] @test_int [tan(c + d*x)^3/(a + b*sec(c + d*x))^2, x, 3, log(cos(c + d*x))/(a^2*d) + ((a^2 + b^2)*log(a + b*sec(c + d*x)))/(a^2*b^2*d) + (a^2 - b^2)/(a*b^2*d*(a + b*sec(c + d*x)))] @test_int [tan(c + d*x)^1/(a + b*sec(c + d*x))^2, x, 3, -(log(cos(c + d*x))/(a^2*d)) - log(a + b*sec(c + d*x))/(a^2*d) + 1/(a*d*(a + b*sec(c + d*x)))] @test_int [cot(c + d*x)^1/(a + b*sec(c + d*x))^2, x, 3, log(cos(c + d*x))/(a^2*d) + log(1 - sec(c + d*x))/(2*(a + b)^2*d) + log(1 + sec(c + d*x))/(2*(a - b)^2*d) - (b^2*(3*a^2 - b^2)*log(a + b*sec(c + d*x)))/(a^2*(a^2 - b^2)^2*d) + b^2/(a*(a^2 - b^2)*d*(a + b*sec(c + d*x)))] @test_int [cot(c + d*x)^3/(a + b*sec(c + d*x))^2, x, 3, -(log(cos(c + d*x))/(a^2*d)) - ((a + 2*b)*log(1 - sec(c + d*x)))/(2*(a + b)^3*d) - ((a - 2*b)*log(1 + sec(c + d*x)))/(2*(a - b)^3*d) - (b^4*(5*a^2 - b^2)*log(a + b*sec(c + d*x)))/(a^2*(a^2 - b^2)^3*d) + 1/(4*(a + b)^2*d*(1 - sec(c + d*x))) + 1/(4*(a - b)^2*d*(1 + sec(c + d*x))) + b^4/(a*(a^2 - b^2)^2*d*(a + b*sec(c + d*x)))] @test_int [cot(c + d*x)^5/(a + b*sec(c + d*x))^2, x, 3, log(cos(c + d*x))/(a^2*d) + ((4*a^2 + 13*a*b + 12*b^2)*log(1 - sec(c + d*x)))/(8*(a + b)^4*d) + ((4*a^2 - 13*a*b + 12*b^2)*log(1 + sec(c + d*x)))/(8*(a - b)^4*d) - (b^6*(7*a^2 - b^2)*log(a + b*sec(c + d*x)))/(a^2*(a^2 - b^2)^4*d) - 1/(16*(a + b)^2*d*(1 - sec(c + d*x))^2) - (5*a + 9*b)/(16*(a + b)^3*d*(1 - sec(c + d*x))) - 1/(16*(a - b)^2*d*(1 + sec(c + d*x))^2) - (5*a - 9*b)/(16*(a - b)^3*d*(1 + sec(c + d*x))) + b^6/(a*(a^2 - b^2)^3*d*(a + b*sec(c + d*x)))] @test_int [tan(c + d*x)^6/(a + b*sec(c + d*x))^2, x, 16, -(x/a^2) - (a*(4*a^2 - 5*b^2)*atanh(sin(c + d*x)))/(b^5*d) + (2*(a - b)^(3/2)*(a + b)^(3/2)*(4*a^2 + b^2)*atanh((sqrt(a - b)*tan((1/2)*(c + d*x)))/sqrt(a + b)))/(a^2*b^5*d) + ((a^2 - b^2)^2*sin(c + d*x))/(a*b^4*d*(b + a*cos(c + d*x))) + ((3*a^2 - 2*b^2)*tan(c + d*x))/(b^4*d) - (a*sec(c + d*x)*tan(c + d*x))/(b^3*d) + tan(c + d*x)^3/(3*b^2*d), -(x/a^2) - (a*atanh(sin(c + d*x)))/(b^3*d) - (2*a*(2*a^2 - 3*b^2)*atanh(sin(c + d*x)))/(b^5*d) - (2*(a - b)^(3/2)*(a + b)^(3/2)*atanh((sqrt(a - b)*tan((1/2)*(c + d*x)))/sqrt(a + b)))/(a^2*b^3*d) + (4*(a - b)^(3/2)*(a + b)^(3/2)*(2*a^2 + b^2)*atanh((sqrt(a - b)*tan((1/2)*(c + d*x)))/sqrt(a + b)))/(a^2*b^5*d) + ((a^2 - b^2)^2*sin(c + d*x))/(a*b^4*d*(b + a*cos(c + d*x))) + tan(c + d*x)/(b^2*d) + (3*(a^2 - b^2)*tan(c + d*x))/(b^4*d) - (a*sec(c + d*x)*tan(c + d*x))/(b^3*d) + tan(c + d*x)^3/(3*b^2*d)] @test_int [tan(c + d*x)^4/(a + b*sec(c + d*x))^2, x, 6, x/a^2 - (2*a*atanh(sin(c + d*x)))/(b^3*d) + (2*sqrt(a - b)*sqrt(a + b)*(2*a^2 + b^2)*atanh((sqrt(a - b)*tan((1/2)*(c + d*x)))/sqrt(a + b)))/(a^2*b^3*d) + ((2*a^2 - b^2)*sin(c + d*x))/(a*b^2*d*(b + a*cos(c + d*x))) + tan(c + d*x)/(b*d*(b + a*cos(c + d*x)))] @test_int [tan(c + d*x)^2/(a + b*sec(c + d*x))^2, x, 6, -(x/a^2) + (2*b*atanh((sqrt(a - b)*tan((1/2)*(c + d*x)))/sqrt(a + b)))/(a^2*sqrt(a - b)*sqrt(a + b)*d) + tan(c + d*x)/(a*d*(a + b*sec(c + d*x)))] @test_int [cot(c + d*x)^2/(a + b*sec(c + d*x))^2, x, 11, -(x/a^2) - (2*b^5*atanh((sqrt(a - b)*tan((1/2)*(c + d*x)))/sqrt(a + b)))/(a^2*(a - b)^(5/2)*(a + b)^(5/2)*d) - (4*b^3*(2*a^2 - b^2)*atanh((sqrt(a - b)*tan((1/2)*(c + d*x)))/sqrt(a + b)))/(a^2*(a - b)^(5/2)*(a + b)^(5/2)*d) - sin(c + d*x)/(2*(a + b)^2*d*(1 - cos(c + d*x))) + sin(c + d*x)/(2*(a - b)^2*d*(1 + cos(c + d*x))) + (b^4*sin(c + d*x))/(a*(a^2 - b^2)^2*d*(b + a*cos(c + d*x)))] @test_int [cot(c + d*x)^4/(a + b*sec(c + d*x))^2, x, 15, x/a^2 - (2*b^7*atanh((sqrt(a - b)*tan((1/2)*(c + d*x)))/sqrt(a + b)))/(a^2*(a - b)^(7/2)*(a + b)^(7/2)*d) - (4*b^5*(3*a^2 - b^2)*atanh((sqrt(a - b)*tan((1/2)*(c + d*x)))/sqrt(a + b)))/(a^2*(a - b)^(7/2)*(a + b)^(7/2)*d) - sin(c + d*x)/(12*(a + b)^2*d*(1 - cos(c + d*x))^2) - sin(c + d*x)/(12*(a + b)^2*d*(1 - cos(c + d*x))) + ((3*a + 5*b)*sin(c + d*x))/(4*(a + b)^3*d*(1 - cos(c + d*x))) + sin(c + d*x)/(12*(a - b)^2*d*(1 + cos(c + d*x))^2) - ((3*a - 5*b)*sin(c + d*x))/(4*(a - b)^3*d*(1 + cos(c + d*x))) + sin(c + d*x)/(12*(a - b)^2*d*(1 + cos(c + d*x))) + (b^6*sin(c + d*x))/(a*(a^2 - b^2)^3*d*(b + a*cos(c + d*x)))] #= ::Subsection::Closed:: =# #=Integrands*of*the*form*(d*tan(e+f*x))^(n/2)*(a+b*sec(e+f*x))^m=# #= ::Subsubsection:: =# #=n>0=# #= ::Subsubsection::Closed:: =# #=n<0=# @test_int [(e*tan(c + d*x))^(5/2)/(a + b*sec(c + d*x)), x, 38, (a*e^(5/2)*atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*b^2*d) - ((a^2 - b^2)*e^(5/2)*atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*a*b^2*d) - (a*e^(5/2)*atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*b^2*d) + ((a^2 - b^2)*e^(5/2)*atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*a*b^2*d) - (a*e^(5/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*b^2*d) + ((a^2 - b^2)*e^(5/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*a*b^2*d) + (a*e^(5/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*b^2*d) - ((a^2 - b^2)*e^(5/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*a*b^2*d) + (2*sqrt(2)*sqrt(a - b)*sqrt(a + b)*e^2*sqrt(cos(c + d*x))*Elliptic.Pi(-(sqrt(a - b)/sqrt(a + b)), asin(sqrt(sin(c + d*x))/sqrt(1 + cos(c + d*x))), -1)*sqrt(e*tan(c + d*x)))/(a*b*d*sqrt(sin(c + d*x))) - (2*sqrt(2)*sqrt(a - b)*sqrt(a + b)*e^2*sqrt(cos(c + d*x))*Elliptic.Pi(sqrt(a - b)/sqrt(a + b), asin(sqrt(sin(c + d*x))/sqrt(1 + cos(c + d*x))), -1)*sqrt(e*tan(c + d*x)))/(a*b*d*sqrt(sin(c + d*x))) - (2*e^2*cos(c + d*x)*Elliptic.E(c - pi/4 + d*x, 2)*sqrt(e*tan(c + d*x)))/(b*d*sqrt(sin(2*c + 2*d*x))) + (2*e*cos(c + d*x)*(e*tan(c + d*x))^(3/2))/(b*d)] @test_int [(e*tan(c + d*x))^(3/2)/(a + b*sec(c + d*x)), x, 35, (a*e^(3/2)*atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*b^2*d) - ((a^2 - b^2)*e^(3/2)*atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*a*b^2*d) - (a*e^(3/2)*atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*b^2*d) + ((a^2 - b^2)*e^(3/2)*atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*a*b^2*d) + (a*e^(3/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*b^2*d) - ((a^2 - b^2)*e^(3/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*a*b^2*d) - (a*e^(3/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*b^2*d) + ((a^2 - b^2)*e^(3/2)*log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*a*b^2*d) - (2*sqrt(2)*sqrt(a^2 - b^2)*e^2*Elliptic.Pi(b/(a - sqrt(a^2 - b^2)), asin(sqrt(-cos(c + d*x))/sqrt(1 + sin(c + d*x))), -1)*sqrt(sin(c + d*x)))/(a*b*d*sqrt(-cos(c + d*x))*sqrt(e*tan(c + d*x))) + (2*sqrt(2)*sqrt(a^2 - b^2)*e^2*Elliptic.Pi(b/(a + sqrt(a^2 - b^2)), asin(sqrt(-cos(c + d*x))/sqrt(1 + sin(c + d*x))), -1)*sqrt(sin(c + d*x)))/(a*b*d*sqrt(-cos(c + d*x))*sqrt(e*tan(c + d*x))) + (e^2*Elliptic.F(c - pi/4 + d*x, 2)*sec(c + d*x)*sqrt(sin(2*c + 2*d*x)))/(b*d*sqrt(e*tan(c + d*x)))] @test_int [(e*tan(c + d*x))^(1/2)/(a + b*sec(c + d*x)), x, 21, -((sqrt(e)*atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*a*d)) + (sqrt(e)*atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*a*d) + (sqrt(e)*log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*a*d) - (sqrt(e)*log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*a*d) + (2*sqrt(2)*b*sqrt(cos(c + d*x))*Elliptic.Pi(-(sqrt(a - b)/sqrt(a + b)), asin(sqrt(sin(c + d*x))/sqrt(1 + cos(c + d*x))), -1)*sqrt(e*tan(c + d*x)))/(a*sqrt(a - b)*sqrt(a + b)*d*sqrt(sin(c + d*x))) - (2*sqrt(2)*b*sqrt(cos(c + d*x))*Elliptic.Pi(sqrt(a - b)/sqrt(a + b), asin(sqrt(sin(c + d*x))/sqrt(1 + cos(c + d*x))), -1)*sqrt(e*tan(c + d*x)))/(a*sqrt(a - b)*sqrt(a + b)*d*sqrt(sin(c + d*x)))] @test_int [1/(e*tan(c + d*x))^(1/2)/(a + b*sec(c + d*x)), x, 19, -(atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e))/(sqrt(2)*a*d*sqrt(e))) + atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e))/(sqrt(2)*a*d*sqrt(e)) - log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x)))/(2*sqrt(2)*a*d*sqrt(e)) + log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x)))/(2*sqrt(2)*a*d*sqrt(e)) - (2*sqrt(2)*b*Elliptic.Pi(b/(a - sqrt(a^2 - b^2)), asin(sqrt(-cos(c + d*x))/sqrt(1 + sin(c + d*x))), -1)*sqrt(sin(c + d*x)))/(a*sqrt(a^2 - b^2)*d*sqrt(-cos(c + d*x))*sqrt(e*tan(c + d*x))) + (2*sqrt(2)*b*Elliptic.Pi(b/(a + sqrt(a^2 - b^2)), asin(sqrt(-cos(c + d*x))/sqrt(1 + sin(c + d*x))), -1)*sqrt(sin(c + d*x)))/(a*sqrt(a^2 - b^2)*d*sqrt(-cos(c + d*x))*sqrt(e*tan(c + d*x)))] @test_int [1/(e*tan(c + d*x))^(3/2)/(a + b*sec(c + d*x)), x, 39, (a*atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*(a^2 - b^2)*d*e^(3/2)) - (b^2*atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*a*(a^2 - b^2)*d*e^(3/2)) - (a*atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*(a^2 - b^2)*d*e^(3/2)) + (b^2*atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*a*(a^2 - b^2)*d*e^(3/2)) - (a*log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*(a^2 - b^2)*d*e^(3/2)) + (b^2*log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*a*(a^2 - b^2)*d*e^(3/2)) + (a*log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*(a^2 - b^2)*d*e^(3/2)) - (b^2*log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*a*(a^2 - b^2)*d*e^(3/2)) - (2*(a - b*sec(c + d*x)))/((a^2 - b^2)*d*e*sqrt(e*tan(c + d*x))) + (2*sqrt(2)*b^3*sqrt(cos(c + d*x))*Elliptic.Pi(-(sqrt(a - b)/sqrt(a + b)), asin(sqrt(sin(c + d*x))/sqrt(1 + cos(c + d*x))), -1)*sqrt(e*tan(c + d*x)))/(a*(a - b)^(3/2)*(a + b)^(3/2)*d*e^2*sqrt(sin(c + d*x))) - (2*sqrt(2)*b^3*sqrt(cos(c + d*x))*Elliptic.Pi(sqrt(a - b)/sqrt(a + b), asin(sqrt(sin(c + d*x))/sqrt(1 + cos(c + d*x))), -1)*sqrt(e*tan(c + d*x)))/(a*(a - b)^(3/2)*(a + b)^(3/2)*d*e^2*sqrt(sin(c + d*x))) + (2*b*cos(c + d*x)*Elliptic.E(c - pi/4 + d*x, 2)*sqrt(e*tan(c + d*x)))/((a^2 - b^2)*d*e^2*sqrt(sin(2*c + 2*d*x))) - (2*b*cos(c + d*x)*(e*tan(c + d*x))^(3/2))/((a^2 - b^2)*d*e^3)] @test_int [1/(e*tan(c + d*x))^(5/2)/(a + b*sec(c + d*x)), x, 36, (a*atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*(a^2 - b^2)*d*e^(5/2)) - (b^2*atan(1 - (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*a*(a^2 - b^2)*d*e^(5/2)) - (a*atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*(a^2 - b^2)*d*e^(5/2)) + (b^2*atan(1 + (sqrt(2)*sqrt(e*tan(c + d*x)))/sqrt(e)))/(sqrt(2)*a*(a^2 - b^2)*d*e^(5/2)) + (a*log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*(a^2 - b^2)*d*e^(5/2)) - (b^2*log(sqrt(e) + sqrt(e)*tan(c + d*x) - sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*a*(a^2 - b^2)*d*e^(5/2)) - (a*log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*(a^2 - b^2)*d*e^(5/2)) + (b^2*log(sqrt(e) + sqrt(e)*tan(c + d*x) + sqrt(2)*sqrt(e*tan(c + d*x))))/(2*sqrt(2)*a*(a^2 - b^2)*d*e^(5/2)) - (2*(a - b*sec(c + d*x)))/(3*(a^2 - b^2)*d*e*(e*tan(c + d*x))^(3/2)) - (2*sqrt(2)*b^3*Elliptic.Pi(b/(a - sqrt(a^2 - b^2)), asin(sqrt(-cos(c + d*x))/sqrt(1 + sin(c + d*x))), -1)*sqrt(sin(c + d*x)))/(a*(a^2 - b^2)^(3/2)*d*e^2*sqrt(-cos(c + d*x))*sqrt(e*tan(c + d*x))) + (2*sqrt(2)*b^3*Elliptic.Pi(b/(a + sqrt(a^2 - b^2)), asin(sqrt(-cos(c + d*x))/sqrt(1 + sin(c + d*x))), -1)*sqrt(sin(c + d*x)))/(a*(a^2 - b^2)^(3/2)*d*e^2*sqrt(-cos(c + d*x))*sqrt(e*tan(c + d*x))) + (b*Elliptic.F(c - pi/4 + d*x, 2)*sec(c + d*x)*sqrt(sin(2*c + 2*d*x)))/(3*(a^2 - b^2)*d*e^2*sqrt(e*tan(c + d*x)))] #= Mathematica*indicates*these*have*a*closed-form*antiderivative*in*terms*of*Elliptic.Pi. =# #= [(e*tan(c + d*x))^(5/2)/(a + b*sec(c + d*x))^2, x, 21, 0] @test_int [(e*tan(c + d*x))^(3/2)/(a + b*sec(c + d*x))^2, x, 20, 0] @test_int [(e*tan(c + d*x))^(1/2)/(a + b*sec(c + d*x))^2, x, 9, 0] @test_int [1/(e*tan(c + d*x))^(1/2)/(a + b*sec(c + d*x))^2, x, 9, 0] @test_int [1/(e*tan(c + d*x))^(3/2)/(a + b*sec(c + d*x))^2, x, 22, 0] @test_int [1/(e*tan(c + d*x))^(5/2)/(a + b*sec(c + d*x))^2, x, 22, 0] =# #= ::Subsection::Closed:: =# #=Integrands*of*the*form*tan(e+f*x)^n*(a+b*sec(e+f*x))^(m/2)=# #= ::Subsubsection::Closed:: =# #=m>0=# @test_int [tan(c + d*x)^5*sqrt(a + b*sec(c + d*x)), x, 5, -((2*sqrt(a)*atanh(sqrt(a + b*sec(c + d*x))/sqrt(a)))/d) + (2*sqrt(a + b*sec(c + d*x)))/d - (2*a*(a^2 - 2*b^2)*(a + b*sec(c + d*x))^(3/2))/(3*b^4*d) + (2*(3*a^2 - 2*b^2)*(a + b*sec(c + d*x))^(5/2))/(5*b^4*d) - (6*a*(a + b*sec(c + d*x))^(7/2))/(7*b^4*d) + (2*(a + b*sec(c + d*x))^(9/2))/(9*b^4*d)] @test_int [tan(c + d*x)^3*sqrt(a + b*sec(c + d*x)), x, 5, (2*sqrt(a)*atanh(sqrt(a + b*sec(c + d*x))/sqrt(a)))/d - (2*sqrt(a + b*sec(c + d*x)))/d - (2*a*(a + b*sec(c + d*x))^(3/2))/(3*b^2*d) + (2*(a + b*sec(c + d*x))^(5/2))/(5*b^2*d)] @test_int [tan(c + d*x)^1*sqrt(a + b*sec(c + d*x)), x, 4, -((2*sqrt(a)*atanh(sqrt(a + b*sec(c + d*x))/sqrt(a)))/d) + (2*sqrt(a + b*sec(c + d*x)))/d] @test_int [cot(c + d*x)^1*sqrt(a + b*sec(c + d*x)), x, 7, (2*sqrt(a)*atanh(sqrt(a + b*sec(c + d*x))/sqrt(a)))/d - (sqrt(a - b)*atanh(sqrt(a + b*sec(c + d*x))/sqrt(a - b)))/d - (sqrt(a + b)*atanh(sqrt(a + b*sec(c + d*x))/sqrt(a + b)))/d] @test_int [cot(c + d*x)^3*sqrt(a + b*sec(c + d*x)), x, 13, -((2*sqrt(a)*atanh(sqrt(a + b*sec(c + d*x))/sqrt(a)))/d) + (a*atanh(sqrt(a + b*sec(c + d*x))/sqrt(a - b)))/(sqrt(a - b)*d) - (3*b*atanh(sqrt(a + b*sec(c + d*x))/sqrt(a - b)))/(4*sqrt(a - b)*d) + (a*atanh(sqrt(a + b*sec(c + d*x))/sqrt(a + b)))/(sqrt(a + b)*d) + (3*b*atanh(sqrt(a + b*sec(c + d*x))/sqrt(a + b)))/(4*sqrt(a + b)*d) - (cot(c + d*x)^2*sqrt(a + b*sec(c + d*x)))/(2*d)] @test_int [tan(c + d*x)^2*sqrt(a + b*sec(c + d*x)), x, 7, -((2*a*(a - b)*sqrt(a + b)*cot(c + d*x)*Elliptic.E(asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt((b*(1 - sec(c + d*x)))/(a + b))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(3*b^2*d)) - (2*sqrt(a + b)*(a + 2*b)*cot(c + d*x)*Elliptic.F(asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt((b*(1 - sec(c + d*x)))/(a + b))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(3*b*d) + (2*sqrt(a + b)*cot(c + d*x)*Elliptic.Pi((a + b)/a, asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt((b*(1 - sec(c + d*x)))/(a + b))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/d + (2*sqrt(a + b*sec(c + d*x))*tan(c + d*x))/(3*d)] @test_int [tan(c + d*x)^0*sqrt(a + b*sec(c + d*x)), x, 1, -((2*cot(c + d*x)*Elliptic.Pi(a/(a + b), asin(sqrt(a + b)/sqrt(a + b*sec(c + d*x))), (a - b)/(a + b))*sqrt(-((b*(1 - sec(c + d*x)))/(a + b*sec(c + d*x))))*sqrt((b*(1 + sec(c + d*x)))/(a + b*sec(c + d*x)))*(a + b*sec(c + d*x)))/(sqrt(a + b)*d))] @test_int [cot(c + d*x)^2*sqrt(a + b*sec(c + d*x)), x, 5, (sqrt(a + b)*cot(c + d*x)*Elliptic.F(asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt((b*(1 - sec(c + d*x)))/(a + b))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/d - (cot(c + d*x)*sqrt(a + b*sec(c + d*x)))/d + (2*cot(c + d*x)*Elliptic.Pi(a/(a + b), asin(sqrt(a + b)/sqrt(a + b*sec(c + d*x))), (a - b)/(a + b))*sqrt(-((b*(1 - sec(c + d*x)))/(a + b*sec(c + d*x))))*sqrt((b*(1 + sec(c + d*x)))/(a + b*sec(c + d*x)))*(a + b*sec(c + d*x)))/(sqrt(a + b)*d)] #= ::Subsubsection::Closed:: =# #=m<0=# @test_int [tan(c + d*x)^5/sqrt(a + b*sec(c + d*x)), x, 5, -((2*atanh(sqrt(a + b*sec(c + d*x))/sqrt(a)))/(sqrt(a)*d)) - (2*a*(a^2 - 2*b^2)*sqrt(a + b*sec(c + d*x)))/(b^4*d) + (2*(3*a^2 - 2*b^2)*(a + b*sec(c + d*x))^(3/2))/(3*b^4*d) - (6*a*(a + b*sec(c + d*x))^(5/2))/(5*b^4*d) + (2*(a + b*sec(c + d*x))^(7/2))/(7*b^4*d)] @test_int [tan(c + d*x)^3/sqrt(a + b*sec(c + d*x)), x, 5, (2*atanh(sqrt(a + b*sec(c + d*x))/sqrt(a)))/(sqrt(a)*d) - (2*a*sqrt(a + b*sec(c + d*x)))/(b^2*d) + (2*(a + b*sec(c + d*x))^(3/2))/(3*b^2*d)] @test_int [tan(c + d*x)^1/sqrt(a + b*sec(c + d*x)), x, 3, -((2*atanh(sqrt(a + b*sec(c + d*x))/sqrt(a)))/(sqrt(a)*d))] @test_int [cot(c + d*x)^1/sqrt(a + b*sec(c + d*x)), x, 7, (2*atanh(sqrt(a + b*sec(c + d*x))/sqrt(a)))/(sqrt(a)*d) - atanh(sqrt(a + b*sec(c + d*x))/sqrt(a - b))/(sqrt(a - b)*d) - atanh(sqrt(a + b*sec(c + d*x))/sqrt(a + b))/(sqrt(a + b)*d)] @test_int [cot(c + d*x)^3/sqrt(a + b*sec(c + d*x)), x, 11, -((2*atanh(sqrt(a + b*sec(c + d*x))/sqrt(a)))/(sqrt(a)*d)) + atanh(sqrt(a + b*sec(c + d*x))/sqrt(a - b))/(sqrt(a - b)*d) - (b*atanh(sqrt(a + b*sec(c + d*x))/sqrt(a - b)))/(4*(a - b)^(3/2)*d) + (b*atanh(sqrt(a + b*sec(c + d*x))/sqrt(a + b)))/(4*(a + b)^(3/2)*d) + atanh(sqrt(a + b*sec(c + d*x))/sqrt(a + b))/(sqrt(a + b)*d) + sqrt(a + b*sec(c + d*x))/(4*(a + b)*d*(1 - sec(c + d*x))) + sqrt(a + b*sec(c + d*x))/(4*(a - b)*d*(1 + sec(c + d*x)))] @test_int [tan(c + d*x)^4/sqrt(a + b*sec(c + d*x)), x, 11, -((2*sqrt(a + b)*cot(c + d*x)*Elliptic.Pi((a + b)/a, asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt((b*(1 - sec(c + d*x)))/(a + b))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(a*d)) - (2*(a - b)*sqrt(a + b)*(8*a^2 - 21*b^2)*cot(c + d*x)*Elliptic.E(asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt(-((b*(-1 + sec(c + d*x)))/(a + b)))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(15*b^4*d) + (2*sqrt(a + b)*(-8*a^2 + 2*a*b + 21*b^2)*cot(c + d*x)*Elliptic.F(asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt(-((b*(-1 + sec(c + d*x)))/(a + b)))*sqrt((b*(1 + sec(c + d*x)))/(-a + b)))/(15*b^3*d) - (8*a*sqrt(a + b*sec(c + d*x))*tan(c + d*x))/(15*b^2*d) + (2*sec(c + d*x)*sqrt(a + b*sec(c + d*x))*tan(c + d*x))/(5*b*d), (4*(a - b)*sqrt(a + b)*cot(c + d*x)*Elliptic.E(asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt((b*(1 - sec(c + d*x)))/(a + b))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(b^2*d) - (2*(a - b)*sqrt(a + b)*(8*a^2 + 9*b^2)*cot(c + d*x)*Elliptic.E(asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt((b*(1 - sec(c + d*x)))/(a + b))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(15*b^4*d) + (4*sqrt(a + b)*cot(c + d*x)*Elliptic.F(asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt((b*(1 - sec(c + d*x)))/(a + b))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(b*d) - (2*sqrt(a + b)*(8*a^2 - 2*a*b + 9*b^2)*cot(c + d*x)*Elliptic.F(asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt((b*(1 - sec(c + d*x)))/(a + b))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(15*b^3*d) - (2*sqrt(a + b)*cot(c + d*x)*Elliptic.Pi((a + b)/a, asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt((b*(1 - sec(c + d*x)))/(a + b))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(a*d) - (8*a*sqrt(a + b*sec(c + d*x))*tan(c + d*x))/(15*b^2*d) + (2*sec(c + d*x)*sqrt(a + b*sec(c + d*x))*tan(c + d*x))/(5*b*d)] @test_int [tan(c + d*x)^2/sqrt(a + b*sec(c + d*x)), x, 6, -((2*(a - b)*sqrt(a + b)*cot(c + d*x)*Elliptic.E(asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt((b*(1 - sec(c + d*x)))/(a + b))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(b^2*d)) - (2*sqrt(a + b)*cot(c + d*x)*Elliptic.F(asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt((b*(1 - sec(c + d*x)))/(a + b))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(b*d) + (2*sqrt(a + b)*cot(c + d*x)*Elliptic.Pi((a + b)/a, asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt((b*(1 - sec(c + d*x)))/(a + b))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(a*d)] @test_int [tan(c + d*x)^0/sqrt(a + b*sec(c + d*x)), x, 1, -((2*sqrt(a + b)*cot(c + d*x)*Elliptic.Pi((a + b)/a, asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt((b*(1 - sec(c + d*x)))/(a + b))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(a*d))] @test_int [cot(c + d*x)^2/sqrt(a + b*sec(c + d*x)), x, 9, (cot(c + d*x)*Elliptic.E(asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt((b*(1 - sec(c + d*x)))/(a + b))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(sqrt(a + b)*d) - (cot(c + d*x)*Elliptic.F(asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt((b*(1 - sec(c + d*x)))/(a + b))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(sqrt(a + b)*d) + (2*sqrt(a + b)*cot(c + d*x)*Elliptic.Pi((a + b)/a, asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt((b*(1 - sec(c + d*x)))/(a + b))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(a*d) - cot(c + d*x)/(d*sqrt(a + b*sec(c + d*x))) + (b^2*tan(c + d*x))/((a^2 - b^2)*d*sqrt(a + b*sec(c + d*x)))] @test_int [tan(c + d*x)^5/(a + b*sec(c + d*x))^(3/2), x, 5, -((2*atanh(sqrt(a + b*sec(c + d*x))/sqrt(a)))/(a^(3/2)*d)) + (2*(a^2 - b^2)^2)/(a*b^4*d*sqrt(a + b*sec(c + d*x))) + (2*(3*a^2 - 2*b^2)*sqrt(a + b*sec(c + d*x)))/(b^4*d) - (2*a*(a + b*sec(c + d*x))^(3/2))/(b^4*d) + (2*(a + b*sec(c + d*x))^(5/2))/(5*b^4*d)] @test_int [tan(c + d*x)^3/(a + b*sec(c + d*x))^(3/2), x, 5, (2*atanh(sqrt(a + b*sec(c + d*x))/sqrt(a)))/(a^(3/2)*d) + (2*(a^2 - b^2))/(a*b^2*d*sqrt(a + b*sec(c + d*x))) + (2*sqrt(a + b*sec(c + d*x)))/(b^2*d)] @test_int [tan(c + d*x)^1/(a + b*sec(c + d*x))^(3/2), x, 4, -((2*atanh(sqrt(a + b*sec(c + d*x))/sqrt(a)))/(a^(3/2)*d)) + 2/(a*d*sqrt(a + b*sec(c + d*x)))] @test_int [cot(c + d*x)^1/(a + b*sec(c + d*x))^(3/2), x, 7, (2*atanh(sqrt(a + b*sec(c + d*x))/sqrt(a)))/(a^(3/2)*d) - atanh(sqrt(a + b*sec(c + d*x))/sqrt(a - b))/((a - b)^(3/2)*d) - atanh(sqrt(a + b*sec(c + d*x))/sqrt(a + b))/((a + b)^(3/2)*d) + (2*b^2)/(a*(a^2 - b^2)*d*sqrt(a + b*sec(c + d*x)))] @test_int [cot(c + d*x)^3/(a + b*sec(c + d*x))^(3/2), x, 11, -((2*atanh(sqrt(a + b*sec(c + d*x))/sqrt(a)))/(a^(3/2)*d)) + ((4*a - 7*b)*atanh(sqrt(a + b*sec(c + d*x))/sqrt(a - b)))/(4*(a - b)^(5/2)*d) + ((4*a + 7*b)*atanh(sqrt(a + b*sec(c + d*x))/sqrt(a + b)))/(4*(a + b)^(5/2)*d) + (2*b^4)/(a*(a^2 - b^2)^2*d*sqrt(a + b*sec(c + d*x))) + sqrt(a + b*sec(c + d*x))/(4*(a + b)^2*d*(1 - sec(c + d*x))) + sqrt(a + b*sec(c + d*x))/(4*(a - b)^2*d*(1 + sec(c + d*x))), -((2*atanh(sqrt(a + b*sec(c + d*x))/sqrt(a)))/(a^(3/2)*d)) + ((2*a - 3*b)*atanh(sqrt(a + b*sec(c + d*x))/sqrt(a - b)))/(2*(a - b)^(5/2)*d) - (b*atanh(sqrt(a + b*sec(c + d*x))/sqrt(a - b)))/(4*(a - b)^(5/2)*d) + (b*atanh(sqrt(a + b*sec(c + d*x))/sqrt(a + b)))/(4*(a + b)^(5/2)*d) + ((2*a + 3*b)*atanh(sqrt(a + b*sec(c + d*x))/sqrt(a + b)))/(2*(a + b)^(5/2)*d) + (2*b^4)/(a*(a^2 - b^2)^2*d*sqrt(a + b*sec(c + d*x))) + sqrt(a + b*sec(c + d*x))/(4*(a + b)^2*d*(1 - sec(c + d*x))) + sqrt(a + b*sec(c + d*x))/(4*(a - b)^2*d*(1 + sec(c + d*x)))] @test_int [tan(c + d*x)^4/(a + b*sec(c + d*x))^(3/2), x, 17, (2*(8*a^4 - 11*a^2*b^2 + 3*b^4)*cot(c + d*x)*Elliptic.E(asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt(-((b*(-1 + sec(c + d*x)))/(a + b)))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(3*a*b^4*sqrt(a + b)*d) + (2*(2*a + b)*(4*a^2 + a*b - 3*b^2)*cot(c + d*x)*Elliptic.F(asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt(-((b*(-1 + sec(c + d*x)))/(a + b)))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(3*a*b^3*sqrt(a + b)*d) - (2*sqrt(a + b)*cot(c + d*x)*Elliptic.Pi((a + b)/a, asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt((b*(1 - sec(c + d*x)))/(a + b))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(a^2*d) - (4*a*tan(c + d*x))/((a^2 - b^2)*d*sqrt(a + b*sec(c + d*x))) + (2*b^2*tan(c + d*x))/(a*(a^2 - b^2)*d*sqrt(a + b*sec(c + d*x))) - (2*a^2*sec(c + d*x)*tan(c + d*x))/(b*(a^2 - b^2)*d*sqrt(a + b*sec(c + d*x))) + (2*(4*a^2 - b^2)*sqrt(a + b*sec(c + d*x))*tan(c + d*x))/(3*b^2*(a^2 - b^2)*d), (2*cot(c + d*x)*Elliptic.E(asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt((b*(1 - sec(c + d*x)))/(a + b))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(a*sqrt(a + b)*d) - (4*a*cot(c + d*x)*Elliptic.E(asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt((b*(1 - sec(c + d*x)))/(a + b))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(b^2*sqrt(a + b)*d) + (2*a*(8*a^2 - 5*b^2)*cot(c + d*x)*Elliptic.E(asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt((b*(1 - sec(c + d*x)))/(a + b))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(3*b^4*sqrt(a + b)*d) - (2*cot(c + d*x)*Elliptic.F(asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt((b*(1 - sec(c + d*x)))/(a + b))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(a*sqrt(a + b)*d) - (4*cot(c + d*x)*Elliptic.F(asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt((b*(1 - sec(c + d*x)))/(a + b))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(b*sqrt(a + b)*d) + (2*(2*a + b)*(4*a + b)*cot(c + d*x)*Elliptic.F(asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt((b*(1 - sec(c + d*x)))/(a + b))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(3*b^3*sqrt(a + b)*d) - (2*sqrt(a + b)*cot(c + d*x)*Elliptic.Pi((a + b)/a, asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt((b*(1 - sec(c + d*x)))/(a + b))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(a^2*d) - (4*a*tan(c + d*x))/((a^2 - b^2)*d*sqrt(a + b*sec(c + d*x))) + (2*b^2*tan(c + d*x))/(a*(a^2 - b^2)*d*sqrt(a + b*sec(c + d*x))) - (2*a^2*sec(c + d*x)*tan(c + d*x))/(b*(a^2 - b^2)*d*sqrt(a + b*sec(c + d*x))) + (2*(4*a^2 - b^2)*sqrt(a + b*sec(c + d*x))*tan(c + d*x))/(3*b^2*(a^2 - b^2)*d)] @test_int [tan(c + d*x)^2/(a + b*sec(c + d*x))^(3/2), x, 7, (2*(a - b)*sqrt(a + b)*cot(c + d*x)*Elliptic.E(asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt((b*(1 - sec(c + d*x)))/(a + b))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(a*b^2*d) + (2*sqrt(a + b)*cot(c + d*x)*Elliptic.F(asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt((b*(1 - sec(c + d*x)))/(a + b))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(a*b*d) + (2*sqrt(a + b)*cot(c + d*x)*Elliptic.Pi((a + b)/a, asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt((b*(1 - sec(c + d*x)))/(a + b))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(a^2*d) + (2*tan(c + d*x))/(a*d*sqrt(a + b*sec(c + d*x)))] @test_int [tan(c + d*x)^0/(a + b*sec(c + d*x))^(3/2), x, 6, (2*cot(c + d*x)*Elliptic.E(asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt((b*(1 - sec(c + d*x)))/(a + b))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(a*sqrt(a + b)*d) - (2*cot(c + d*x)*Elliptic.F(asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt((b*(1 - sec(c + d*x)))/(a + b))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(a*sqrt(a + b)*d) - (2*sqrt(a + b)*cot(c + d*x)*Elliptic.Pi((a + b)/a, asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt((b*(1 - sec(c + d*x)))/(a + b))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(a^2*d) + (2*b^2*tan(c + d*x))/(a*(a^2 - b^2)*d*sqrt(a + b*sec(c + d*x)))] @test_int [cot(c + d*x)^2/(a + b*sec(c + d*x))^(3/2), x, 14, (2*(a^2 + b^2)*cot(c + d*x)*Elliptic.E(asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt(-((b*(-1 + sec(c + d*x)))/(a + b)))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(a*(a - b)*(a + b)^(3/2)*d) - ((a^2 - a*b + 2*b^2)*cot(c + d*x)*Elliptic.F(asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt(-((b*(-1 + sec(c + d*x)))/(a + b)))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(a*(a - b)*(a + b)^(3/2)*d) + (2*sqrt(a + b)*cot(c + d*x)*Elliptic.Pi((a + b)/a, asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt((b*(1 - sec(c + d*x)))/(a + b))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(a^2*d) - cot(c + d*x)/(d*(a + b*sec(c + d*x))^(3/2)) + (b^2*tan(c + d*x))/((a^2 - b^2)*d*(a + b*sec(c + d*x))^(3/2)) + (2*b^2*(a^2 + b^2)*tan(c + d*x))/(a*(a^2 - b^2)^2*d*sqrt(a + b*sec(c + d*x))), (4*a*cot(c + d*x)*Elliptic.E(asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt((b*(1 - sec(c + d*x)))/(a + b))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/((a - b)*(a + b)^(3/2)*d) - (2*cot(c + d*x)*Elliptic.E(asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt((b*(1 - sec(c + d*x)))/(a + b))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(a*sqrt(a + b)*d) - ((3*a - b)*cot(c + d*x)*Elliptic.F(asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt((b*(1 - sec(c + d*x)))/(a + b))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/((a - b)*(a + b)^(3/2)*d) + (2*cot(c + d*x)*Elliptic.F(asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt((b*(1 - sec(c + d*x)))/(a + b))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(a*sqrt(a + b)*d) + (2*sqrt(a + b)*cot(c + d*x)*Elliptic.Pi((a + b)/a, asin(sqrt(a + b*sec(c + d*x))/sqrt(a + b)), (a + b)/(a - b))*sqrt((b*(1 - sec(c + d*x)))/(a + b))*sqrt(-((b*(1 + sec(c + d*x)))/(a - b))))/(a^2*d) - cot(c + d*x)/(d*(a + b*sec(c + d*x))^(3/2)) + (b^2*tan(c + d*x))/((a^2 - b^2)*d*(a + b*sec(c + d*x))^(3/2)) + (4*a*b^2*tan(c + d*x))/((a^2 - b^2)^2*d*sqrt(a + b*sec(c + d*x))) - (2*b^2*tan(c + d*x))/(a*(a^2 - b^2)*d*sqrt(a + b*sec(c + d*x)))] #= ::Subsection::Closed:: =# #=Integrands*of*the*form*(d*tan(e+f*x))^n*(a+b*sec(e+f*x))^m*with*n*symbolic=# @test_int [(d*tan(e + f*x))^n*(a + b*sec(e + f*x))^3, x, 8, (3*a*b^2*(d*tan(e + f*x))^(1 + n))/(d*f*(1 + n)) + (a^3*HypergeometricFunctions._₂F₁(1, (1 + n)/2, (3 + n)/2, -tan(e + f*x)^2)*(d*tan(e + f*x))^(1 + n))/(d*f*(1 + n)) + (3*a^2*b*(cos(e + f*x)^2)^((2 + n)/2)*HypergeometricFunctions._₂F₁((1 + n)/2, (2 + n)/2, (3 + n)/2, sin(e + f*x)^2)*sec(e + f*x)*(d*tan(e + f*x))^(1 + n))/(d*f*(1 + n)) + (b^3*(cos(e + f*x)^2)^((4 + n)/2)*HypergeometricFunctions._₂F₁((1 + n)/2, (4 + n)/2, (3 + n)/2, sin(e + f*x)^2)*sec(e + f*x)^3*(d*tan(e + f*x))^(1 + n))/(d*f*(1 + n))] @test_int [(d*tan(e + f*x))^n*(a + b*sec(e + f*x))^2, x, 7, (b^2*(d*tan(e + f*x))^(1 + n))/(d*f*(1 + n)) + (a^2*HypergeometricFunctions._₂F₁(1, (1 + n)/2, (3 + n)/2, -tan(e + f*x)^2)*(d*tan(e + f*x))^(1 + n))/(d*f*(1 + n)) + (2*a*b*(cos(e + f*x)^2)^((2 + n)/2)*HypergeometricFunctions._₂F₁((1 + n)/2, (2 + n)/2, (3 + n)/2, sin(e + f*x)^2)*sec(e + f*x)*(d*tan(e + f*x))^(1 + n))/(d*f*(1 + n))] @test_int [(d*tan(e + f*x))^n*(a + b*sec(e + f*x))^1, x, 4, (a*HypergeometricFunctions._₂F₁(1, (1 + n)/2, (3 + n)/2, -tan(e + f*x)^2)*(d*tan(e + f*x))^(1 + n))/(d*f*(1 + n)) + (b*(cos(e + f*x)^2)^((2 + n)/2)*HypergeometricFunctions._₂F₁((1 + n)/2, (2 + n)/2, (3 + n)/2, sin(e + f*x)^2)*sec(e + f*x)*(d*tan(e + f*x))^(1 + n))/(d*f*(1 + n))] @test_int [(d*tan(e + f*x))^n/(a + b*sec(e + f*x))^1, x, -1, (d*AppellF1(1 - n, (1 - n)/2, (1 - n)/2, 2 - n, (a + b)/(a + b*sec(e + f*x)), (a - b)/(a + b*sec(e + f*x)))*(-((b*(1 - sec(e + f*x)))/(a + b*sec(e + f*x))))^((1 - n)/2)*((b*(1 + sec(e + f*x)))/(a + b*sec(e + f*x)))^((1 - n)/2)*(d*tan(e + f*x))^(-1 + n)*(-tan(e + f*x)^2)^((1 - n)/2 + (1/2)*(-1 + n)))/(a*f*(1 - n)) - (d*HypergeometricFunctions._₂F₁(1, (1 + n)/2, (3 + n)/2, -tan(e + f*x)^2)*(d*tan(e + f*x))^(-1 + n)*(-tan(e + f*x)^2)^((1 - n)/2 + (1 + n)/2))/(a*f*(1 + n))] #= [(d*tan(e + f*x))^n/(a + b*sec(e + f*x))^2, x, 0, 0] =# @test_int [(e*tan(c + d*x))^m*(a + b*sec(c + d*x))^(3/2), x, 0, Unintegrable((a + b*sec(c + d*x))^(3/2)*(e*tan(c + d*x))^m, x)] @test_int [(e*tan(c + d*x))^m*(a + b*sec(c + d*x))^(1/2), x, 0, Unintegrable(sqrt(a + b*sec(c + d*x))*(e*tan(c + d*x))^m, x)] @test_int [(e*tan(c + d*x))^m/(a + b*sec(c + d*x))^(1/2), x, 0, Unintegrable((e*tan(c + d*x))^m/sqrt(a + b*sec(c + d*x)), x)] @test_int [(e*tan(c + d*x))^m/(a + b*sec(c + d*x))^(3/2), x, 0, Unintegrable((e*tan(c + d*x))^m/(a + b*sec(c + d*x))^(3/2), x)] #= ::Subsection::Closed:: =# #=Integrands*of*the*form*(d*tan(e+f*x))^n*(a+b*sec(e+f*x))^m*with*m*symbolic=# @test_int [(e*tan(c + d*x))^m*(a + b*sec(c + d*x))^n, x, 0, Unintegrable((a + b*sec(c + d*x))^n*(e*tan(c + d*x))^m, x)] @test_int [tan(c + d*x)^5*(a + b*sec(c + d*x))^n, x, 5, -((a*(a^2 - 2*b^2)*(a + b*sec(c + d*x))^(1 + n))/(b^4*d*(1 + n))) - (HypergeometricFunctions._₂F₁(1, 1 + n, 2 + n, 1 + (b*sec(c + d*x))/a)*(a + b*sec(c + d*x))^(1 + n))/(a*d*(1 + n)) + ((3*a^2 - 2*b^2)*(a + b*sec(c + d*x))^(2 + n))/(b^4*d*(2 + n)) - (3*a*(a + b*sec(c + d*x))^(3 + n))/(b^4*d*(3 + n)) + (a + b*sec(c + d*x))^(4 + n)/(b^4*d*(4 + n))] @test_int [tan(c + d*x)^3*(a + b*sec(c + d*x))^n, x, 4, -((a*(a + b*sec(c + d*x))^(1 + n))/(b^2*d*(1 + n))) + (HypergeometricFunctions._₂F₁(1, 1 + n, 2 + n, 1 + (b*sec(c + d*x))/a)*(a + b*sec(c + d*x))^(1 + n))/(a*d*(1 + n)) + (a + b*sec(c + d*x))^(2 + n)/(b^2*d*(2 + n))] @test_int [tan(c + d*x)^1*(a + b*sec(c + d*x))^n, x, 2, -((HypergeometricFunctions._₂F₁(1, 1 + n, 2 + n, 1 + (b*sec(c + d*x))/a)*(a + b*sec(c + d*x))^(1 + n))/(a*d*(1 + n)))] @test_int [cot(c + d*x)^1*(a + b*sec(c + d*x))^n, x, 8, -((HypergeometricFunctions._₂F₁(1, 1 + n, 2 + n, (a + b*sec(c + d*x))/(a - b))*(a + b*sec(c + d*x))^(1 + n))/(2*(a - b)*d*(1 + n))) - (HypergeometricFunctions._₂F₁(1, 1 + n, 2 + n, (a + b*sec(c + d*x))/(a + b))*(a + b*sec(c + d*x))^(1 + n))/(2*(a + b)*d*(1 + n)) + (HypergeometricFunctions._₂F₁(1, 1 + n, 2 + n, 1 + (b*sec(c + d*x))/a)*(a + b*sec(c + d*x))^(1 + n))/(a*d*(1 + n))] @test_int [cot(c + d*x)^3*(a + b*sec(c + d*x))^n, x, 10, (HypergeometricFunctions._₂F₁(1, 1 + n, 2 + n, (a + b*sec(c + d*x))/(a - b))*(a + b*sec(c + d*x))^(1 + n))/(2*(a - b)*d*(1 + n)) + (HypergeometricFunctions._₂F₁(1, 1 + n, 2 + n, (a + b*sec(c + d*x))/(a + b))*(a + b*sec(c + d*x))^(1 + n))/(2*(a + b)*d*(1 + n)) - (HypergeometricFunctions._₂F₁(1, 1 + n, 2 + n, 1 + (b*sec(c + d*x))/a)*(a + b*sec(c + d*x))^(1 + n))/(a*d*(1 + n)) - (b*HypergeometricFunctions._₂F₁(2, 1 + n, 2 + n, (a + b*sec(c + d*x))/(a - b))*(a + b*sec(c + d*x))^(1 + n))/(4*(a - b)^2*d*(1 + n)) + (b*HypergeometricFunctions._₂F₁(2, 1 + n, 2 + n, (a + b*sec(c + d*x))/(a + b))*(a + b*sec(c + d*x))^(1 + n))/(4*(a + b)^2*d*(1 + n))] @test_int [tan(c + d*x)^4*(a + b*sec(c + d*x))^n, x, 0, Unintegrable((a + b*sec(c + d*x))^n*tan(c + d*x)^4, x)] @test_int [tan(c + d*x)^2*(a + b*sec(c + d*x))^n, x, 9, (sqrt(2)*(a + b)*AppellF1(1/2, 1/2, -1 - n, 3/2, (1/2)*(1 - sec(c + d*x)), (b*(1 - sec(c + d*x)))/(a + b))*(a + b*sec(c + d*x))^n*tan(c + d*x))/(((a + b*sec(c + d*x))/(a + b))^n*(b*d*sqrt(1 + sec(c + d*x)))) - (sqrt(2)*a*AppellF1(1/2, 1/2, -n, 3/2, (1/2)*(1 - sec(c + d*x)), (b*(1 - sec(c + d*x)))/(a + b))*(a + b*sec(c + d*x))^n*tan(c + d*x))/(((a + b*sec(c + d*x))/(a + b))^n*(b*d*sqrt(1 + sec(c + d*x)))) - Unintegrable((a + b*sec(c + d*x))^n, x)] @test_int [cot(c + d*x)^2*(a + b*sec(c + d*x))^n, x, 0, Unintegrable(cot(c + d*x)^2*(a + b*sec(c + d*x))^n, x)] @test_int [cot(c + d*x)^4*(a + b*sec(c + d*x))^n, x, 0, Unintegrable(cot(c + d*x)^4*(a + b*sec(c + d*x))^n, x)] @test_int [tan(c + d*x)^(3/2)*(a + b*sec(c + d*x))^n, x, 0, Unintegrable((a + b*sec(c + d*x))^n*tan(c + d*x)^(3/2), x)] @test_int [tan(c + d*x)^(1/2)*(a + b*sec(c + d*x))^n, x, 0, Unintegrable((a + b*sec(c + d*x))^n*sqrt(tan(c + d*x)), x)] @test_int [1/tan(c + d*x)^(1/2)*(a + b*sec(c + d*x))^n, x, 0, Unintegrable((a + b*sec(c + d*x))^n/sqrt(tan(c + d*x)), x)] @test_int [1/tan(c + d*x)^(3/2)*(a + b*sec(c + d*x))^n, x, 0, Unintegrable((a + b*sec(c + d*x))^n/tan(c + d*x)^(3/2), x)] #= ::Section:: =# #=Integrands*of*the*form*(d*cot(e+f*x))^m*(a+b*sec(e+f*x))^m=# end
{ "alphanum_fraction": 0.429993517, "author": null, "avg_line_length": 241.0941176471, "converted": null, "ext": "jl", "file": null, "hexsha": "b8e466ca0c0eec9224cedf6379435594883af5ae", "include": null, "lang": "Julia", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "dd117985dea699be7ddf14c3e6bdf4f407bb8da6", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "gronniger/RubiSymbolics.jl", "max_forks_repo_path": "test/4 Trig functions/4.5 Secant/4.5.1.4 (d tan)^n (a+b sec)^m.jl", "max_issues_count": null, "max_issues_repo_head_hexsha": "dd117985dea699be7ddf14c3e6bdf4f407bb8da6", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "gronniger/RubiSymbolics.jl", "max_issues_repo_path": "test/4 Trig functions/4.5 Secant/4.5.1.4 (d tan)^n (a+b sec)^m.jl", "max_line_length": 2655, "max_stars_count": null, "max_stars_repo_head_hexsha": "dd117985dea699be7ddf14c3e6bdf4f407bb8da6", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "gronniger/RubiSymbolics.jl", "max_stars_repo_path": "test/4 Trig functions/4.5 Secant/4.5.1.4 (d tan)^n (a+b sec)^m.jl", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 73039, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 143451 }
from pandex import * import pandas as pd import numpy as np import plotly_express as px import dash_core_components as dcc def rand_df(rows=5, cols=3, cumsum=True): if cumsum == 'False': return pd.DataFrame(np.random.randn(rows, cols)) return pd.DataFrame(np.random.randn(rows, cols)).cumsum() dbaord = SimpleDashboard( title='Example Pandex Dashboard', charts=[ [ BarChart('Bar', rand_df), LineChart('Line', rand_df, {'rows': 200}) ], [ ScatterChart('Scatter', rand_df, {'rows': 20, 'cumsum': False}) ], ] ) if __name__ == '__main__': dbaord.run(debug=True, port=8055)
{ "alphanum_fraction": 0.6219879518, "author": null, "avg_line_length": 22.8965517241, "converted": null, "ext": "py", "file": null, "hexsha": "f140140047a98000954ef2431603e22e22876fb2", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "8d1184e50992eda490ae135769df07c5f83eb014", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "tdrobbin/pandex", "max_forks_repo_path": "example1.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "8d1184e50992eda490ae135769df07c5f83eb014", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "tdrobbin/pandex", "max_issues_repo_path": "example1.py", "max_line_length": 79, "max_stars_count": null, "max_stars_repo_head_hexsha": "8d1184e50992eda490ae135769df07c5f83eb014", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "tdrobbin/pandex", "max_stars_repo_path": "example1.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 172, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 664 }
import numpy as np import pandas as pd import scipy from sklearn import metrics from FPMax import FPMax from Apriori import Apriori from MASPC import MASPC import csv from scipy.cluster.hierarchy import fcluster from scipy.cluster.hierarchy import linkage from optbinning import ContinuousOptimalBinning # pd.set_option('display.max_colwidth', -1) # pd.options.display.max_columns = None pd.options.display.width = 0 class MASPC_Engine(): def __init__(self, inputFileName, myMinAc=None, myMinOv=None, myMinSup=None, myK=None,myContainsTemporal=False, myAutoDiscretize=False,myDiscretizeStrategy=None,myQ=4): print("inside maspc_engine constructor with "+inputFileName) self.inputFileName = inputFileName self.outputFileFolder = '/tmp/' self.sortedInputFile = self.outputFileFolder+'sortedInputFile.csv' self.myMinAc = myMinAc self.myMinOv = myMinOv self.myMinSup = myMinSup self.myK = myK self.myContainsTemporal = myContainsTemporal self.myAutoDiscretize = myAutoDiscretize self.myDiscretizeStrategy = myDiscretizeStrategy self.myQ = myQ # First thing we do is sort input file self.__sortInputFile(self.inputFileName) self.rtDataFrame = pd.read_csv(self.sortedInputFile, dtype=str) # remove any rows that may have empty diagnosis, sex, or age self.rtDataFrame.dropna(subset=['DX1', 'age', 'sex'], inplace=True) self.rtDataFrame.reset_index(drop=True, inplace=True) # Extract diagnoses for processing (Diagnosis DataFrame) rtDataFrameDiags = self.rtDataFrame.drop(['age', 'sex'], axis=1).copy() diagColumns = [str(i) for i in list(rtDataFrameDiags.columns.values) if i.startswith('D')] uvalues = list(pd.unique(self.rtDataFrame[diagColumns].values.ravel('K'))) uniqueDiags = [x for x in uvalues if str(x) != 'nan'] # process the list of unique diagnoses to update diagnosis dataframe and write file for fpmax and apriori self.__writeInputFile(uniqueDiags, rtDataFrameDiags) # # To do one hot encoding of sex: self.demographic = pd.get_dummies(self.rtDataFrame['sex']) self.rtDataFrame = self.rtDataFrame.drop('sex',axis=1) # This runs for when input values are just integers and needs to be discretized if (self.myAutoDiscretize): self.rtDataFrame = self.__autoDiscretize(self.rtDataFrame,self.myDiscretizeStrategy,self.myQ) # one hot encoding for age (This is assuming that the autodiscretization has already happened or # the data came pre-binned) one_hot = pd.get_dummies(self.rtDataFrame['age']) self.rtDataFrame = self.rtDataFrame.drop('age',axis=1) self.demographic = self.demographic.join(one_hot) # print(demographic) # Read dignosisCodes.txt as input for dignosis codes # dignosis = open('dignosisCodes.txt', 'r') # dignosisCodes = [line[:-2].split(' ') for line in dignosis.readlines()] # print(type(dignosisCodes)) # this is for pandas 1.0.5 or versions that give ValueErrors for the next statement self.dignosisCodes = self.rtDataFrame.stack().groupby(level=0).apply(list).tolist() # this is for pandas 1.24.0+ I believe but will complain about different size lists # in other versions # dignosisCodes = rtDataFrame.T.apply(lambda x: x.dropna().tolist()).tolist() # print(dignosisCodes) # Check diagnosis codes # Each row is diagnosis codes of a patient # print('all diagnosis codes') # for i in dignosisCodes: # print(i) def processOneDiagPerLine(self): allRows = [] maxNumOfDiagColumns = 0 with open(self.inputFileName) as csvfile: fileReader = csv.reader(csvfile) header = next(fileReader) processingRow = [] for row in fileReader: if (len(processingRow)): if (row[0] == processingRow[0] and row[1] == processingRow[1]): processingRow.append(row[2]) else: allRows.append(processingRow) if ((len(processingRow)-2) > maxNumOfDiagColumns): maxNumOfDiagColumns = len(processingRow)-2 processingRow = [] for element in row: processingRow.append(element) else: for element in row: processingRow.append(element) # for last element outside of loop allRows.append(processingRow) if ((len(processingRow)-2) > maxNumOfDiagColumns): maxNumOfDiagColumns = len(processingRow)-2 # dynamically create diag columns from processed input allColumns = ['sex','age'] for i in range(1, (maxNumOfDiagColumns+1)): allColumns.append('DX'+str(i)) # use pandas dataframe from list of lists for easy output df = pd.DataFrame(allRows, columns = allColumns) df.to_csv(self.inputFileName,index=False) def processTemporal(self): self.rtDataFrame = pd.read_csv(self.inputFileName, dtype=str) for col in self.rtDataFrame.columns: if ('Date' in col): self.rtDataFrame.drop([col],axis=1,inplace=True) print(self.rtDataFrame) self.rtDataFrame.to_csv(self.inputFileName,index=False) def autogenerateParameters(self): accept = False numOfRecords = len(self.rtDataFrame.index) clusteredRecordsThreshold = .1 self.myMinOv = 3 temp_ms = (1-(1/numOfRecords))/10 maspc1 = MASPC(self.demographic, self.dignosisCodes) maspc2 = MASPC(self.demographic, self.dignosisCodes) maspc3 = MASPC(self.demographic, self.dignosisCodes) while (not accept): print('trying ... '+str(temp_ms)) run = [] # run 3 all-confidence parameters maspc1.MAS(minSup=float(temp_ms),minAc=float(0.5),minOv=float(self.myMinOv)) if (len(maspc1.MFAs) > 1): maspc1.PC(k=float(len(maspc1.MFAs)),method='average',metric='cosine') if (len(maspc1.binaryData.index)/numOfRecords > clusteredRecordsThreshold): run.append({'mfa':len(maspc1.MFAs),'ac':0.5,'clustered':len(maspc1.binaryData.index)}) maspc2.MAS(minSup=float(temp_ms),minAc=float(0.25),minOv=float(self.myMinOv)) if (len(maspc2.MFAs) > 1): maspc2.PC(k=float(len(maspc2.MFAs)),method='average',metric='cosine') if (len(maspc2.binaryData.index)/numOfRecords > clusteredRecordsThreshold): run.append({'mfa':len(maspc2.MFAs),'ac':0.25,'clustered':len(maspc2.binaryData.index)}) maspc3.MAS(minSup=float(temp_ms),minAc=float(0.1),minOv=float(self.myMinOv)) if (len(maspc3.MFAs) > 1): maspc3.PC(k=float(len(maspc3.MFAs)),method='average',metric='cosine') if (len(maspc3.binaryData.index)/numOfRecords > clusteredRecordsThreshold): run.append({'mfa':len(maspc3.MFAs),'ac':0.1,'clustered':len(maspc3.binaryData.index)}) # after 3 runs, check to see if one passed our threshold if (len(run) > 0): self.myMinSup = temp_ms self.myMinAc = run[0]['ac'] self.myK = run[0]['mfa'] accept = True else: # cycle temp_ms = temp_ms/10 print('k = '+str(self.myK)) print('minOv = '+str(self.myMinOv)) print('minAc = '+str(self.myMinAc)) print('minSup = '+str(self.myMinSup)) print('num of records = '+str(numOfRecords)) print('num clustered = '+str(run[0]['clustered'])) def runAnalyzer(self): # Run MASPC # Input parameters for given dataset: minSup=0.33, minAc=0.5, minOv=3, k=3 # method='average' and metric='cosine' are parameters for agglomerative average-linkage hierarchical clustering maspc = MASPC(self.demographic, self.dignosisCodes) maspc.MAS(minSup=float(self.myMinSup),minAc=float(self.myMinAc),minOv=float(self.myMinOv)) # check number of MFAs before continuing processing if (len(maspc.MFAs) < 1): return False maspc.PC(k=float(self.myK),method='average',metric='cosine') # Check results of MASPC # Check MFAs # print("MFAs:") # print(maspc.MFAs) # Check clustering results # print("Cluster Results:") # print(maspc.ClusterResult) # Add label to binary representation maspc.binaryData['label']=maspc.ClusterResult maspc.binaryData['parameters'] = None maspc.binaryData['parameters'].iloc[0] = 'minOv = '+str(self.myMinOv) maspc.binaryData['parameters'].iloc[1] = 'minAc = '+str(self.myMinAc) maspc.binaryData['parameters'].iloc[2] = 'minSup = '+str(round(float(self.myMinSup), 2)) maspc.binaryData['parameters'].iloc[3] = 'k = '+str(self.myK) #print(maspc.binaryData) maspc.binaryData.to_csv('/tmp/clusteringResults.csv',index=False) # print(maspc.binaryData.label.unique()) # for i in range(1, len(maspc.binaryData.label.unique())+1): # print('Cluster '+str(i)) # print(maspc.binaryData.groupby(['label']).get_group(i)) # SI and CI # Get all unique diagnosis codes and build a binary representation for evaluation allUniqueCodes=[] for i in self.dignosisCodes: for j in i: allUniqueCodes.append(j) allUniqueCodes=list(set(allUniqueCodes)) new_list = [allUniqueCodes[i:i+1] for i in range(0, len(allUniqueCodes), 1)] w, h = len(allUniqueCodes), len(self.dignosisCodes); atables=[[0 for x in range(w)] for y in range(h)] # project maximum set of independent frequnet patterns for i,j in enumerate(self.dignosisCodes): temp=set(j) #print temp for a,b in enumerate(new_list): while(set(b)<=temp): temp=temp.difference(set(b)) #print temp atables[i][a]+=1 diga_codes=pd.DataFrame(atables, columns=[str(sublist) for sublist in allUniqueCodes]) # Binary representation for evaluation testdata=pd.concat([self.demographic, diga_codes], axis=1, sort=False) # CI and SI # print('CI: '+str(metrics.calinski_harabasz_score(testdata.values, maspc.ClusterResult.tolist()))) # print('SI: '+str(metrics.silhouette_score(testdata.values, maspc.ClusterResult.tolist(), metric='cosine'))) return True def __writeInputFile(self, uniqueDiags, rtDataFrameDiags): #print(uniqueDiags) uniqueDiags.sort() print('# of unique diag values = '+str(len(uniqueDiags))) # # iterate through the values and put them into a format for the apriori diagHeaderFileName = "dignosisCodes.txt" diagHeaderFile = open(diagHeaderFileName, "w") print('@CONVERTED_FROM_TEXT',file=diagHeaderFile) itemCounter = 1 for v in uniqueDiags: # print(str(v)) # print(type(v)) if (str(v) != 'nan'): print('@ITEM='+str(itemCounter)+'='+str(v), file=diagHeaderFile) rtDataFrameDiags.replace(v, str(itemCounter),inplace=True) itemCounter = itemCounter + 1 diagHeaderFile.close() print('1/2 Apriori and FPMax input file written.') # we need to create the bottom half of the diagnosis file for the Apriori and FPMax rtDataFrameDiags.to_csv('dignosisCodes2.txt',sep=' ',index=False, header=False) # append the diagnosesCodes2 to dignosisCodes f1 = open(diagHeaderFileName,'a+') f2 = open('dignosisCodes2.txt','r') f1.write(f2.read()) f2.close() f1.close() print('2/2 Complete Apriori and FPMax input file written.') def __autoDiscretize(self,rtDataFrame,myDiscretizeStrategy,myQ): rtDataFrame["age"] = pd.to_numeric(rtDataFrame["age"]) if ('optbin' == myDiscretizeStrategy): optb = ContinuousOptimalBinning(name='age',dtype='numerical') optb.fit(rtDataFrame.age.values,rtDataFrame.age.values) correspondingBins = optb.transform(rtDataFrame.age.values,metric='bins') newColumn = pd.DataFrame({'age':correspondingBins}) rtDataFrame.drop(['age'], axis=1, inplace=True) rtDataFrame = pd.concat([rtDataFrame, newColumn], axis=1) elif ('pqcut' == myDiscretizeStrategy): rtDataFrame['age'] = pd.qcut(rtDataFrame.age, q=int(myQ)) rtDataFrame.age.astype(str) # Combine original dataframe with replaced values of binning process # pandas.concat([df1, df2], axis=1) return rtDataFrame def __sortInputFile(self,inputFileName): with open(inputFileName) as csvfile: with open(self.sortedInputFile, 'w') as outputCSVFile: inputFileReader = csv.reader(csvfile) outputFileWriter = csv.writer(outputCSVFile) header = next(inputFileReader) # skipping header outputFileWriter.writerow(header) numOfColumns = len(header) for row in inputFileReader: rowDiags = [ x for x in row[2:] if x != '' ] rowDiags.sort() listToWrite = row[:2] listToWrite.extend(rowDiags) # pad = numOfColumns-len(listToWrite) # for i in range(0,pad): # listToWrite.append('') outputFileWriter.writerow(listToWrite)
{ "alphanum_fraction": 0.6142380581, "author": null, "avg_line_length": 42.6939393939, "converted": null, "ext": "py", "file": null, "hexsha": "80a6384fe2a75d97fac98e4b46cb352f64286356", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "0ce893a666974674fad36591f0156bd720910b4d", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "dominicamartinez/clustehr", "max_forks_repo_path": "src/MASPC_Engine.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "0ce893a666974674fad36591f0156bd720910b4d", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "dominicamartinez/clustehr", "max_issues_repo_path": "src/MASPC_Engine.py", "max_line_length": 119, "max_stars_count": null, "max_stars_repo_head_hexsha": "0ce893a666974674fad36591f0156bd720910b4d", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "dominicamartinez/clustehr", "max_stars_repo_path": "src/MASPC_Engine.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 3426, "path": null, "reason": "import numpy,import scipy,from scipy", "repo": null, "save_path": null, "sha": null, "size": 14089 }
using Flux # ResidualBlock is used with `SkipConnection` # using Parallel, first extract features, then combine them together # separate filters struct ResidualBlock block end Flux.@functor ResidualBlock (b::ResidualBlock)(x) = x |> b.block .|> leakyrelu function basic_block() layer = Chain(Conv((3, 3, 3), 16 => 16, leakyrelu; stride = 1, pad = 1), Conv((3, 3, 3), 16 => 16; stride = 1, pad = 1)) return ResidualBlock(SkipConnection(layer, +)) end function residual_block() layer = Chain(Conv((1, 1, 1), 8 => 16, leakyrelu; stride = 1), basic_block(), Conv((1, 1, 1), 16 => 8; stride = 1)) return ResidualBlock(SkipConnection(layer, +)) end function residual_net() init_conv = Conv((1, 1, 1), 1 => 8, leakyrelu; stride = 1) resblock1 = residual_block() resblock2 = residual_block() resblock3 = residual_block() final_conv = Conv((1, 1, 1), 8 => 1; stride = 1) return Chain(init_conv, resblock1, resblock2, resblock3, final_conv) end # final_conv = Conv((1, 1, 1), 16 => 2; stride = 1) # basic_conv = Chain(Conv((3, 3, 3), 16 => 16, leakyrelu; stride = 1, pad = 1), # Conv((3, 3, 3), 16 => 16; stride = 1, pad = 1)) # # function mymodel() # return Chain(Parallel((x, y) -> cat(x, y, dims = 4), # residual_net(), # residual_net(), # ), # basic_block(), # basic_block(), # basic_block(), # final_conv # ) # end # xx = randn(Float32, 16, 16, 16, 2, 1) # xtrue = ones(Float32, 16, 16, 16, 2, 1) # model = residual_net() # model(xx) # ps = Flux.params(model) # opt = ADAM(0.001) # loss(x, y) = Flux.Losses.mse(model(x), y) # evalcb() = @show(loss(xx, xtrue)) # data = [(xx, xtrue)] # @epochs 500 Flux.train!(loss, ps, data, opt, cb = evalcb)
{ "alphanum_fraction": 0.549870801, "author": null, "avg_line_length": 30.7142857143, "converted": null, "ext": "jl", "file": null, "hexsha": "0adce2470694137cb40bf498a20efb0767ac73a4", "include": null, "lang": "Julia", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "db722f13229285da4a127cefbdd251b3ee05a3a9", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "ZongyuLi-umich/SPECTrecon.jl", "max_forks_repo_path": "flux/model.jl", "max_issues_count": null, "max_issues_repo_head_hexsha": "db722f13229285da4a127cefbdd251b3ee05a3a9", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "ZongyuLi-umich/SPECTrecon.jl", "max_issues_repo_path": "flux/model.jl", "max_line_length": 79, "max_stars_count": null, "max_stars_repo_head_hexsha": "db722f13229285da4a127cefbdd251b3ee05a3a9", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "ZongyuLi-umich/SPECTrecon.jl", "max_stars_repo_path": "flux/model.jl", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 616, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 1935 }
[STATEMENT] theorem exists_split: "(\<exists>x y. P x \<and> Q y) = ((\<exists>x. P x) \<and> (\<exists>y. Q y))" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (\<exists>x y. P x \<and> Q y) = ((\<exists>x. P x) \<and> (\<exists>y. Q y)) [PROOF STEP] by simp
{ "alphanum_fraction": null, "author": null, "avg_line_length": null, "converted": null, "ext": null, "file": "Auto2_HOL_HOL_Logic_Thms", "hexsha": null, "include": null, "lang": null, "length": 1, "llama_tokens": 120, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": null, "max_forks_repo_licenses": null, "max_forks_repo_name": null, "max_forks_repo_path": null, "max_issues_count": null, "max_issues_repo_head_hexsha": null, "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": null, "max_issues_repo_name": null, "max_issues_repo_path": null, "max_line_length": null, "max_stars_count": null, "max_stars_repo_head_hexsha": null, "max_stars_repo_licenses": null, "max_stars_repo_name": null, "max_stars_repo_path": null, "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": null, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": null }
//---------------------------------------------------------------------------- /// @file test_spinsort.cpp /// @brief test program of the spinsort algorithm /// /// @author Copyright (c) 2016 Francisco José Tapia (fjtapia@gmail.com )\n /// Distributed under the Boost Software License, Version 1.0.\n /// ( See accompanying file LICENSE_1_0.txt or copy at /// http://www.boost.org/LICENSE_1_0.txt ) /// @version 0.1 /// /// @remarks //----------------------------------------------------------------------------- #include <ciso646> #include <algorithm> #include <iostream> #include <cstdio> #include <cstdlib> #include <ctime> #include <vector> #include <random> #include <boost/sort/spinsort/spinsort.hpp> #include <boost/test/included/test_exec_monitor.hpp> #include <boost/test/test_tools.hpp> using namespace boost::sort; using spin_detail::check_stable_sort; using spin_detail::range_sort; using common::range; void test1 ( ); void test2 ( ); void test3 ( ); void test4 ( ); //---------------- stability test ----------------------------------- struct xk { unsigned tail : 4; unsigned num : 28; xk ( uint32_t n =0 , uint32_t t =0): tail (t), num(n){}; bool operator< (xk A) const { return (num < A.num); }; }; void test1 ( ) { typedef std::less< xk > compare_t; std::mt19937_64 my_rand (0); const uint32_t NMAX = 100000; std::vector< xk > V1, V2; V1.reserve (NMAX); for (uint32_t i = 0; i < 8; ++i) { for (uint32_t k = 0; k < NMAX; ++k) { uint32_t NM = my_rand ( ); xk G; G.num = NM >> 3; G.tail = i; V1.push_back (G); }; }; V2 = V1; spinsort (V1.begin ( ), V1.end ( ), compare_t ( )); std::stable_sort (V2.begin ( ), V2.end ( )); BOOST_CHECK (V1.size ( ) == V2.size ( )); for (uint32_t i = 0; i < V1.size ( ); ++i) { BOOST_CHECK (V1[ i ].num == V2[ i ].num and V1[ i ].tail == V2[ i ].tail); }; }; void test2 (void) { typedef std::less< uint64_t > compare_t; const uint32_t NElem = 100000; std::vector< uint64_t > V1,V2; std::mt19937_64 my_rand (0); compare_t comp; // ------------------------ random elements ------------------------------- for (uint32_t i = 0; i < NElem; ++i) V1.push_back (my_rand ( ) % NElem); V2 = V1; spinsort (V1.begin ( ), V1.end ( ), comp); std::stable_sort (V2.begin ( ), V2.end ( ), comp); for (unsigned i = 0; i < NElem; i++) { BOOST_CHECK (V2[ i ] == V1[ i ]); }; // --------------------------- sorted elements ---------------------------- V1.clear ( ); for (uint32_t i = 0; i < NElem; ++i) V1.push_back (i); spinsort (V1.begin ( ), V1.end ( ), comp); for (unsigned i = 1; i < NElem; i++) { BOOST_CHECK (V1[ i - 1 ] <= V1[ i ]); }; //-------------------------- reverse sorted elements ---------------------- V1.clear ( ); for (uint32_t i = 0; i < NElem; ++i) V1.push_back (NElem - i); spinsort (V1.begin ( ), V1.end ( ), comp); for (unsigned i = 1; i < NElem; i++) { BOOST_CHECK (V1[ i - 1 ] <= V1[ i ]); }; //---------------------------- equal elements ---------------------------- V1.clear ( ); for (uint32_t i = 0; i < NElem; ++i) V1.push_back (1000); spinsort (V1.begin ( ), V1.end ( ), comp); for (unsigned i = 1; i < NElem; i++) { BOOST_CHECK (V1[ i - 1 ] == V1[ i ]); }; }; void test3 (void) { typedef typename std::vector<uint64_t>::iterator iter_t ; typedef range<iter_t> range_it ; std::less<uint64_t> comp ; std::vector<uint64_t> V = { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59, 14, 2, 4, 6, 8, 10, 12, 16, 18, 20}; range_it rdata (V.begin() , V.end()); std::vector<uint64_t> aux (40,0 ); range_it raux ( aux.begin() , aux.end()); check_stable_sort ( rdata, raux, comp ); for ( uint32_t i =0 ; i < V.size() ; ++i) std::cout<<V[i]<<", "; std::cout<<std::endl; V = {59, 57, 55, 53, 51, 49, 47, 45, 43, 41, 39, 37, 35, 33, 31, 29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1, 14, 2, 6, 16, 18, 20, 12, 4, 8, 10}; rdata = range_it (V.begin() , V.end()); aux.assign (40,0); raux = range_it ( aux.begin() , aux.end()); check_stable_sort ( rdata, raux, comp ); for ( uint32_t i =0 ; i < V.size() ; ++i) std::cout<<V[i]<<", "; std::cout<<std::endl; } void test4 (void) { typedef typename std::vector<xk>::iterator iter_t; typedef std::less<xk> compare_t; std::mt19937 my_rand (0); std::vector<xk> V ; const uint32_t NELEM = 100000; V.reserve(NELEM * 10); for (uint32_t k =0 ; k < 10 ; ++k) { for ( uint32_t i =0 ; i < NELEM ; ++i) { V.emplace_back(i , k); }; iter_t first = V.begin() + (k * NELEM); iter_t last = first + NELEM ; std::shuffle( first, last, my_rand); }; spinsort( V.begin() , V.end(), compare_t()); for ( uint32_t i =0 ; i < ( NELEM * 10); ++i) { BOOST_CHECK ( V[i].num == (i / 10) and V[i].tail == (i %10) ); }; } int test_main (int, char *[]) { test1 ( ); test2 ( ); test3 ( ); test4 ( ); return 0; };
{ "alphanum_fraction": 0.4604938272, "author": null, "avg_line_length": 31.3259668508, "converted": null, "ext": "cpp", "file": null, "hexsha": "a80d46697327effb6850bfef1aaa3329a26c7a0f", "include": null, "lang": "C++", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 5, "max_forks_repo_forks_event_max_datetime": "2022-03-01T18:23:49.000Z", "max_forks_repo_forks_event_min_datetime": "2019-08-20T13:45:04.000Z", "max_forks_repo_head_hexsha": "eb0f7ef64e188fe871f47c2ef9cdef36d8a66bc8", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Wultyc/ISEP_1718_2A2S_REDSI_TrabalhoGrupo", "max_forks_repo_path": "REDSI_1160929_1161573/boost_1_67_0/libs/sort/test/test_spinsort.cpp", "max_issues_count": 1, "max_issues_repo_head_hexsha": "eb0f7ef64e188fe871f47c2ef9cdef36d8a66bc8", "max_issues_repo_issues_event_max_datetime": "2019-05-24T01:36:31.000Z", "max_issues_repo_issues_event_min_datetime": "2019-03-04T11:21:00.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Wultyc/ISEP_1718_2A2S_REDSI_TrabalhoGrupo", "max_issues_repo_path": "REDSI_1160929_1161573/boost_1_67_0/libs/sort/test/test_spinsort.cpp", "max_line_length": 80, "max_stars_count": 32, "max_stars_repo_head_hexsha": "eb0f7ef64e188fe871f47c2ef9cdef36d8a66bc8", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Wultyc/ISEP_1718_2A2S_REDSI_TrabalhoGrupo", "max_stars_repo_path": "REDSI_1160929_1161573/boost_1_67_0/libs/sort/test/test_spinsort.cpp", "max_stars_repo_stars_event_max_datetime": "2021-08-29T10:56:19.000Z", "max_stars_repo_stars_event_min_datetime": "2019-02-27T06:57:07.000Z", "num_tokens": 1796, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 5670 }
# coding:UTF-8 import numpy import talib import math from sklearn.preprocessing import scale from sklearn.preprocessing import minmax_scale class ChartFeature(object): def __init__(self, selector): self.selector = selector self.supported = { "ROCP", "MACD", "RSI", "VROCP", "BOLL", "MA", "VMA", "PRICE_VOLUME", "TXFTWSE_DIFF_ROCP","EXFFXF_DIFF_ROCP","UDV_ROCP","BAV_ROCP","TBA_ROCP" "TXFTWSE_RATIO_ROCP","EXFFXF_RATIO_ROCP","UDV_RATIO_ROCP","BAV_RATIO_ROCP","TBA_RATIO_ROCP" } self.feature = [] def moving_extract(self, window=30, close_prices=None, TXFTWSE_DIFF=None, EXFFXF_DIFF=None, UDV=None,BAV=None,TBA=None, TXFTWSE_RATIO=None, EXFFXF_RATIO=None, UDV_RATIO=None,BAV_RATIO=None,TBA_RATIO=None, volumes=None, with_label=True, flatten=True): self._window_ = window self.extract( close_prices=close_prices, TXFTWSE_DIFF=TXFTWSE_DIFF, EXFFXF_DIFF=EXFFXF_DIFF, UDV=UDV, BAV=BAV, TBA=TBA, TXFTWSE_RATIO=TXFTWSE_RATIO, EXFFXF_RATIO=EXFFXF_RATIO, UDV_RATIO=UDV_RATIO, BAV_RATIO=BAV_RATIO, TBA_RATIO=TBA_RATIO, volumes=volumes ) feature_arr = numpy.asarray(self.feature) p = 0 rows = feature_arr.shape[0] print("feature dimension: %s" % rows) moving_features = [] moving_labels = [] dataLen = feature_arr.shape[1] while p + window < dataLen : x = feature_arr[:, p:p + window] tgt_idx = min( [p + window * 2 , dataLen-1]) fRngArr = close_prices[p + window : tgt_idx] if(len(fRngArr)): fRngMax = max(fRngArr) fRngMin = min(fRngArr) curClose_idx = max([ p + window -1 , 0]) curClose = close_prices[curClose_idx] if(curClose > 0.01): difUp = fRngMax - curClose difDw = fRngMin - curClose tgtFutureClose = fRngMax if abs(difUp)>abs(difDw) else fRngMin p_change = (tgtFutureClose - curClose) / curClose * 100.0 p_change = max(-1, min(p_change , 1)) y = p_change if flatten: x = x.flatten("F") moving_features.append(numpy.nan_to_num(x)) moving_labels.append(y) p += 1 return numpy.asarray(moving_features), moving_labels , rows def extract(self, close_prices=None, TXFTWSE_DIFF=None,EXFFXF_DIFF=None,UDV=None,BAV=None,TBA=None, TXFTWSE_RATIO=None,EXFFXF_RATIO=None,UDV_RATIO=None,BAV_RATIO=None,TBA_RATIO=None, volumes=None): self.feature = [] for feature_type in self.selector: if feature_type in self.supported: print("extracting feature : %s" % feature_type) self.extract_by_type(feature_type, close_prices, TXFTWSE_DIFF,EXFFXF_DIFF,UDV,BAV,TBA, TXFTWSE_RATIO,EXFFXF_RATIO,UDV_RATIO,BAV_RATIO,TBA_RATIO, volumes) else: print("feature type not supported: %s" % feature_type) return self.feature def normalise_windows(window_data): normalised_data = [] for window in window_data: normalised_window = [((float(p) / float(window[0])) - 1) for p in window] normalised_data.append(normalised_window) return normalised_data def extract_by_type(self, feature_type, close_prices=None, TXFTWSE_DIFF=None,EXFFXF_DIFF=None,UDV=None,BAV=None,TBA=None, TXFTWSE_RATIO=None,EXFFXF_RATIO=None,UDV_RATIO=None,BAV_RATIO=None,TBA_RATIO=None, volumes=None): if feature_type == 'ROCP': rocp = numpy.nan_to_num(talib.ROCP(close_prices, timeperiod=1)) rocp_n = minmax_scale(rocp,feature_range=(-1, 1)) self.feature.append(rocp) # for ad in rocp : print(ad) if feature_type == 'OROCP': orocp = talib.ROCP(open_prices, timeperiod=1) self.feature.append(orocp) if feature_type == 'HROCP': hrocp = talib.ROCP(high_prices, timeperiod=1) self.feature.append(hrocp) if feature_type == 'LROCP': lrocp = talib.ROCP(low_prices, timeperiod=1) self.feature.append(lrocp) if feature_type == 'MACD': macd, signal, hist = talib.MACD(close_prices, fastperiod=12, slowperiod=26, signalperiod=9) macd = numpy.nan_to_num(macd) norm_macd = minmax_scale(macd ,feature_range=(-1, 1)) self.feature.append(norm_macd) signal = numpy.nan_to_num(signal) norm_signal = minmax_scale(signal ,feature_range=(-1, 1)) self.feature.append(norm_signal) hist = numpy.nan_to_num(hist) norm_hist = minmax_scale(hist ,feature_range=(-1, 1)) self.feature.append(norm_hist) if feature_type == 'RSI': rsi6 = talib.RSI(close_prices, timeperiod=6) rsi12 = talib.RSI(close_prices, timeperiod=12) rsi24 = talib.RSI(close_prices, timeperiod=24) rsi6rocp = talib.ROCP(rsi6 + 100., timeperiod=1) rsi12rocp = talib.ROCP(rsi12 + 100., timeperiod=1) rsi24rocp = talib.ROCP(rsi24 + 100., timeperiod=1) rsi6 = numpy.nan_to_num(rsi6) rsi6 = rsi6 / 100.0 - 0.5 norm_rsi6 = minmax_scale(rsi6 ,feature_range=(-1, 1)) self.feature.append(norm_rsi6) rsi12 = numpy.nan_to_num(rsi12) rsi12 = rsi12 / 100.0 - 0.5 norm_rsi12 = minmax_scale(rsi12 ,feature_range=(-1, 1)) self.feature.append(norm_rsi12) rsi24 = numpy.nan_to_num(rsi24) rsi24 = rsi24 / 100.0 - 0.5 norm_rsi24 = minmax_scale(rsi24 ,feature_range=(-1, 1)) self.feature.append(norm_rsi24) if feature_type == 'VROCP': norm_volumes = minmax_scale(volumes ,feature_range=(-1, 1)) self.feature.append(norm_volumes) if feature_type == 'BOLL': upperband, middleband, lowerband = talib.BBANDS(close_prices, timeperiod=self._window_ , nbdevup=2, nbdevdn=2, matype=0) upperband = (upperband - close_prices) / close_prices upperband = numpy.nan_to_num(upperband) norm_upperband = minmax_scale(upperband ,feature_range=(-1, 1)) self.feature.append(norm_upperband) middleband = (middleband - close_prices) / close_prices middleband = numpy.nan_to_num(middleband) norm_middleband = minmax_scale(middleband ,feature_range=(-1, 1)) self.feature.append(norm_middleband) lowerband = (lowerband - close_prices) / close_prices lowerband = numpy.nan_to_num(lowerband) norm_lowerband = minmax_scale(lowerband,feature_range=(-1, 1)) self.feature.append(norm_lowerband) if feature_type == 'MA': ma5 = talib.MA(close_prices, timeperiod=5) ma5_clo = (ma5 - close_prices) / close_prices ma5_clo = numpy.nan_to_num(ma5_clo) norm_ma5_clo = minmax_scale(ma5_clo,feature_range=(-1, 1)) self.feature.append(norm_ma5_clo) ma10 = talib.MA(close_prices, timeperiod=10) ma10_clo = (ma10 - close_prices) / close_prices ma10_clo = numpy.nan_to_num(ma10_clo) norm_ma10_clo = minmax_scale(ma10_clo,feature_range=(-1, 1)) self.feature.append(norm_ma10_clo) ma20 = talib.MA(close_prices, timeperiod=20) ma20_clo = (ma20 - close_prices) / close_prices ma20_clo = numpy.nan_to_num(ma20_clo) norm_ma20_clo = minmax_scale(ma20_clo,feature_range=(-1, 1)) self.feature.append(norm_ma20_clo) ma30 = talib.MA(close_prices, timeperiod=30) ma30_clo = (ma30 - close_prices) / close_prices ma30_clo = numpy.nan_to_num(ma30_clo) norm_ma30_clo = minmax_scale(ma30_clo,feature_range=(-1, 1)) self.feature.append(norm_ma30_clo) ma60 = talib.MA(close_prices, timeperiod=60) ma60_clo = (ma60 - close_prices) / close_prices ma60_clo = numpy.nan_to_num(ma60_clo) norm_ma60_clo = minmax_scale(ma60_clo,feature_range=(-1, 1)) self.feature.append(norm_ma60_clo) if feature_type == 'VMA': ma5 = talib.MA(volumes, timeperiod=5) ma5_clo = ((ma5 - volumes) / (volumes + 1)) ma5_clo = numpy.nan_to_num(ma5_clo) norm_ma5_clo = minmax_scale(ma5_clo,feature_range=(-1, 1)) self.feature.append(norm_ma5_clo) ma10 = talib.MA(volumes, timeperiod=10) ma10_clo = ((ma5 - volumes) / (volumes + 1)) ma10_clo = numpy.nan_to_num(ma10_clo) norm_ma10_clo = minmax_scale(ma10_clo ,feature_range=(-1, 1)) self.feature.append(norm_ma10_clo) ma20 = talib.MA(volumes, timeperiod=20) ma20_clo = ((ma5 - volumes) / (volumes + 1)) ma20_clo = numpy.nan_to_num(ma20_clo) norm_ma20_clo = minmax_scale(ma20_clo ,feature_range=(-1, 1)) self.feature.append(norm_ma20_clo) if feature_type == 'PRICE_VOLUME': rocp = talib.ROCP(close_prices, timeperiod=1) norm_volumes = (volumes - numpy.mean(volumes)) / math.sqrt(numpy.var(volumes)) vrocp = talib.ROCP(norm_volumes + numpy.max(norm_volumes) - numpy.min(norm_volumes), timeperiod=1) pv = rocp * vrocp * 100 pv = numpy.nan_to_num(pv) norm_pv = minmax_scale(pv ,feature_range=(-1, 1)) self.feature.append(norm_pv) if feature_type == 'TXFTWSE_DIFF_ROCP': norm_volumes = minmax_scale(TXFTWSE_DIFF ,feature_range=(-1, 1)) self.feature.append(norm_volumes) if feature_type == 'TXFTWSE_RATIO_ROCP': norm_volumesr = minmax_scale(TXFTWSE_RATIO ,feature_range=(-1, 1)) self.feature.append(norm_volumesr) # for ad in norm_volumesr :print(ad) if feature_type == 'EXFFXF_DIFF_ROCP': norm_volumes = minmax_scale(EXFFXF_DIFF,feature_range=(-1, 1)) self.feature.append(norm_volumes) if feature_type == 'EXFFXF_RATIO_ROCP': norm_volumes = minmax_scale(EXFFXF_RATIO ,feature_range=(-1, 1)) self.feature.append(norm_volumes) # for ad in norm_volumes:print(ad) if feature_type == 'UDV_ROCP': UDV = numpy.nan_to_num(UDV) norm_volumes = minmax_scale(UDV,feature_range=(-1, 1)) self.feature.append(norm_volumes) # for ad in norm_volumes:print(ad) if feature_type == 'UDV_RATIO_ROCP': norm_volumes = minmax_scale(UDV_RATIO ,feature_range=(-1, 1)) self.feature.append(norm_volumes) ma5 = talib.MA(UDV_RATIO, timeperiod=5) ma5_clo = (ma5 - UDV_RATIO) / UDV_RATIO ma5_clo = numpy.nan_to_num(ma5_clo) norm_ma5_clo = minmax_scale(ma5_clo ,feature_range=(-1, 1)) self.feature.append(norm_ma5_clo)# for ad in norm_ma5_clo:print(ad) ma10 = talib.MA(UDV_RATIO, timeperiod=10) ma10_clo = (ma10 - UDV_RATIO) / UDV_RATIO ma10_clo = numpy.nan_to_num(ma10_clo) norm_ma10_clo = minmax_scale(ma10_clo ,feature_range=(-1, 1)) self.feature.append(norm_ma10_clo)# for ad in norm_ma10_clo:print(ad) ma20 = talib.MA(UDV_RATIO, timeperiod=20) ma20_clo = (ma20 - UDV_RATIO) / UDV_RATIO ma20_clo = numpy.nan_to_num(ma20_clo) norm_ma20_clo = minmax_scale(ma20_clo ,feature_range=(-1, 1))# for ad in norm_ma20_clo: print(ad) self.feature.append(norm_ma20_clo) if feature_type == 'BAV_ROCP': BAV = numpy.nan_to_num(BAV) norm_volumes = minmax_scale(BAV,feature_range=(-1, 1)) self.feature.append(norm_volumes) if feature_type == 'BAV_RATIO_ROCP': BAV_RATIO = numpy.nan_to_num(BAV_RATIO) norm_volumes = minmax_scale(BAV_RATIO ,feature_range=(-1, 1)) self.feature.append(norm_volumes) ma5 = talib.MA(BAV_RATIO, timeperiod=5) ma5_clo = (ma5 - BAV_RATIO) / BAV_RATIO ma5_clo = numpy.nan_to_num(ma5_clo) norm_ma5_clo = minmax_scale(ma5_clo,feature_range=(-1, 1)) self.feature.append(norm_ma5_clo) ma10 = talib.MA(BAV_RATIO, timeperiod=10) ma10_clo = (ma10 - BAV_RATIO) / BAV_RATIO ma10_clo = numpy.nan_to_num(ma10_clo) norm_ma10_clo = minmax_scale(ma10_clo,feature_range=(-1, 1)) self.feature.append(norm_ma10_clo) ma20 = talib.MA(BAV_RATIO, timeperiod=20) ma20_clo = (ma20 - BAV_RATIO) / BAV_RATIO ma20_clo = numpy.nan_to_num(ma20_clo) norm_ma20_clo = minmax_scale(ma20_clo,feature_range=(-1, 1)) self.feature.append(norm_ma20_clo) if feature_type == 'TBA_ROCP': norm_volumes = minmax_scale(TBA,feature_range=(-1, 1)) self.feature.append(norm_volumes) if feature_type == 'TBA_RATIO_ROCP': norm_volumes = minmax_scale(TBA_RATIO,feature_range=(-1, 1)) self.feature.append(norm_volumes) ma5 = talib.MA(TBA_RATIO , timeperiod=5) ma5_clo = (ma5 - TBA_RATIO ) / TBA_RATIO ma5_clo = numpy.nan_to_num(ma5_clo) norm_ma5_clo = minmax_scale(ma5_clo ,feature_range=(-1, 1))# for ad in norm_ma5_clo: print(ad) self.feature.append(norm_ma5_clo) ma10 = talib.MA(TBA_RATIO , timeperiod=10) ma10_clo = (ma10 - TBA_RATIO ) / TBA_RATIO ma10_clo = numpy.nan_to_num(ma10_clo) norm_ma10_clo = minmax_scale(ma10_clo ,feature_range=(-1, 1))# for ad in norm_ma10_clo: print(ad) self.feature.append(norm_ma10_clo) ma20 = talib.MA(TBA_RATIO , timeperiod=20) ma20_clo = (ma20 - TBA_RATIO ) / TBA_RATIO ma20_clo = numpy.nan_to_num(ma20_clo) norm_ma20_clo = minmax_scale(ma20_clo ,feature_range=(-1, 1))# for ad in norm_ma10_clo: print(ad) self.feature.append(norm_ma20_clo) def extract_feature(raw_data,selector, window=30, with_label=True, flatten=True): chart_feature = ChartFeature(selector) closes = raw_data.Close.values # len(closes) volumes = raw_data.TotalVolume.values # len(volumes) txftwse_dif = raw_data.txfTwseDiff.values # len(txftwse_dif) exffxf_diff = raw_data.exfFxfDiff.values # len(exffxf_diff) udv=raw_data.uvdv.values # len(udv) bav=raw_data.bvav.values # len(bav) tba=raw_data.tbta.values # len(tba) txftwse_ratio = raw_data.txfTwseRatio.values # len(txftwse_dif) exffxf_ratio = raw_data.exfFxfRatio.values # len(exffxf_diff) udv_ratio=raw_data.uvdv_ratio.values # len(udv) bav_ratio=raw_data.bvavRatio.values # len(bav) tba_ratio=raw_data.tbtaRatio.values # len(tba) if with_label: moving_features,moving_labels,numRows = chart_feature.moving_extract( window=window, close_prices=closes, TXFTWSE_DIFF=txftwse_dif, EXFFXF_DIFF=exffxf_diff, UDV=udv, BAV=bav, TBA=tba, TXFTWSE_RATIO=txftwse_ratio, EXFFXF_RATIO=exffxf_ratio, UDV_RATIO=udv_ratio, BAV_RATIO=bav_ratio, TBA_RATIO=tba_ratio, volumes=volumes, with_label=with_label, flatten=flatten ) return moving_features, moving_labels,numRows
{ "alphanum_fraction": 0.5320156811, "author": null, "avg_line_length": 49.1069518717, "converted": null, "ext": "py", "file": null, "hexsha": "c74c54b1836f121ff5b61bf13739d1c5534d355e", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 3, "max_forks_repo_forks_event_max_datetime": "2018-05-28T06:42:39.000Z", "max_forks_repo_forks_event_min_datetime": "2017-10-16T16:38:18.000Z", "max_forks_repo_head_hexsha": "2d0297a2b50f8e66d718bdc2a8b0473b71f8053a", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "kangchihlun/DeepTradeRiskEsti_TXF", "max_forks_repo_path": "DeepTradeRiskEsti_TXF/datasetLoader/chart.py", "max_issues_count": 1, "max_issues_repo_head_hexsha": "2d0297a2b50f8e66d718bdc2a8b0473b71f8053a", "max_issues_repo_issues_event_max_datetime": "2017-11-22T07:04:47.000Z", "max_issues_repo_issues_event_min_datetime": "2017-11-22T07:04:47.000Z", "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "kangchihlun/DeepTradeRiskEsti_TXF", "max_issues_repo_path": "DeepTradeRiskEsti_TXF/datasetLoader/chart.py", "max_line_length": 165, "max_stars_count": 13, "max_stars_repo_head_hexsha": "2d0297a2b50f8e66d718bdc2a8b0473b71f8053a", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "kangchihlun/DeepTradeRiskEsti_TXF", "max_stars_repo_path": "DeepTradeRiskEsti_TXF/datasetLoader/chart.py", "max_stars_repo_stars_event_max_datetime": "2021-02-22T08:51:53.000Z", "max_stars_repo_stars_event_min_datetime": "2017-10-14T04:40:45.000Z", "num_tokens": 4497, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 18366 }
### Introduction to Random Variables and Statistical Distributions ```python from scipy.stats import bernoulli, poisson, binom, norm, mvn import numpy as np from matplotlib import pyplot as plt import matplotlib %matplotlib inline ``` ```python headimg = plt.imread('../data/quarterheads.jpg') tailimg = plt.imread('../data/quartertails.jpg') ``` ### Discrete random variables A <i>discrete random</i> variable can take a finite number of possible values. A toss of a two-sided coin can be thought of as a random variable, or a roll of a 6 sided dice. #### Probability distributions for discrete random variables A random variable is generated from a probability distribution. There are different types of distributions defined for discrete random variables. These include: - Bernoulli distribution - Binomial distribution - Multinoulli distribution - Poisson distribution #### Bernoulli distribution Bernoulli distribution represents a binary random variable which takes value 1 with success probability $p$ and value 0 with failure probability $q=1-p$. A bernoulli distribution has only one parameter: $p$. ```python theta = 0.5 # let us draw a sample from a bernoulli distribution b = bernoulli.rvs(theta,size=1) print(b) if b[0] == 0: plt.imshow(tailimg) plt.axis('off') else: plt.imshow(headimg) plt.axis('off') ``` ```python # you can also draw samples simultaneously samples = bernoulli.rvs(theta,size=1000) print(samples) # count the number of successes (sample = 1). What happens when you change p? print(np.count_nonzero(samples==1)) ``` [0 0 0 1 0 1 0 0 1 0 1 1 0 1 1 1 0 0 1 0 0 0 0 0 1 1 1 0 1 1 1 0 0 0 0 0 0 1 0 1 0 1 0 1 0 0 1 1 1 0 0 0 1 1 0 0 0 1 0 0 1 0 0 1 1 1 1 1 0 0 1 0 0 1 0 0 0 1 1 0 1 0 0 1 0 1 1 0 1 1 1 1 1 1 1 1 1 1 0 0 1 1 0 0 0 0 0 0 0 1 0 1 1 1 0 0 1 1 1 0 0 1 1 0 0 1 1 1 1 0 0 1 1 0 1 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 1 0 1 1 1 1 1 0 0 1 1 0 1 0 1 0 0 0 0 1 0 0 0 1 0 1 0 0 1 0 0 1 0 1 0 1 1 1 0 0 0 0 1 1 0 0 1 1 0 0 1 0 1 0 0 1 0 1 0 0 0 1 1 1 1 1 1 0 1 0 1 0 1 0 1 0 1 1 0 0 0 0 1 0 1 1 1 0 1 1 0 0 0 1 0 0 0 1 1 0 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 1 0 1 1 0 0 1 1 0 1 0 0 1 1 1 0 0 1 0 1 1 1 0 0 1 0 0 0 1 1 0 0 0 0 1 0 0 1 1 1 0 0 0 0 0 1 1 1 0 1 0 1 1 1 1 0 1 0 0 1 0 1 1 1 1 1 0 0 1 1 1 0 0 1 1 1 1 0 1 0 0 0 1 0 0 1 0 1 0 0 0 0 1 0 1 0 1 1 0 0 1 1 0 1 0 0 1 1 1 1 0 1 1 0 0 1 1 1 1 1 1 0 1 0 1 1 0 1 1 1 1 1 0 0 1 0 0 0 0 1 0 0 0 1 0 0 1 1 0 1 0 0 0 1 0 0 0 1 0 1 0 1 0 1 1 1 1 0 0 0 1 1 0 0 1 0 1 0 1 0 0 1 1 1 1 0 0 0 1 1 0 0 0 1 0 0 1 1 0 0 0 0 0 1 0 0 1 0 0 1 1 0 1 1 1 0 1 1 1 1 1 1 0 1 1 0 0 0 0 1 1 1 1 1 1 1 0 1 0 1 0 0 0 1 0 0 0 1 0 0 1 0 1 0 0 1 1 0 0 1 1 0 1 1 1 0 0 0 1 0 0 1 0 1 0 0 0 1 1 1 1 1 1 1 0 1 1 0 0 0 0 0 1 1 0 0 1 1 1 1 0 0 0 0 0 1 1 0 1 1 1 0 0 0 1 1 1 1 0 0 0 1 0 1 0 1 1 1 0 1 1 0 1 1 0 0 0 1 0 0 0 1 0 1 1 1 0 0 1 1 0 0 1 0 1 1 0 1 0 0 1 1 1 1 1 0 1 1 1 0 1 1 0 0 0 1 1 0 1 0 0 0 1 0 1 0 0 1 1 0 1 0 1 1 0 1 1 1 0 0 1 1 1 1 1 1 0 0 1 1 0 0 1 1 0 0 0 1 0 1 1 0 0 0 0 0 1 1 1 0 1 1 1 1 0 0 0 0 1 1 1 0 0 1 0 0 0 1 0 1 1 0 0 1 0 0 0 1 0 0 0 0 0 1 1 0 1 0 1 1 0 0 0 1 1 1 1 1 1 0 1 1 0 0 0 0 0 0 1 0 0 0 1 0 0 0 1 1 1 1 0 1 0 1 1 0 1 1 1 1 1 1 0 0 0 0 0 0 1 1 1 0 1 0 1 0 0 0 1 0 1 0 0 0 0 0 0 1 1 1 0 0 0 0 0 1 0 1 0 0 1 1 0 0 1 1 1 0 0 0 1 0 1 1 0 0 0 0 0 0 0 1 1 0 1 0 0 1 0 1 0 0 1 0 1 1 0 1 1 1 1 1 0 0 1 1 0 1 1 1 0 0 1 0 1 1 1 1 1 1 1 1 1 1 1 0 0 1 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 1 0 1 1 1 1 0 0 0 0 1 0 0 1 0 1 0 1 0 1 0 0 1 0 1 1 1 1 0 1 0 0 1 1 0 0 1 1 1 1 0 0 1 0 1 0 0 0 1 0 0 1 0 1 1 0 1 1 0 1 1 0 1 0 1 0 1 0 0] 487 ```python # plotting the probability mass function for the Bernoulli distribution a = np.arange(2) colors = matplotlib.rcParams['axes.color_cycle'] plt.figure(figsize=(12,8)) for i, theta in enumerate([0.1, 0.2, 0.6, 0.7]): ax = plt.subplot(1, 4, i+1) plt.bar(a, bernoulli.pmf(a, theta), label=theta, color=colors[i], alpha=0.2) ax.xaxis.set_ticks(a) plt.legend(loc=0) if i == 0: plt.ylabel("PDF at $k$") plt.suptitle("Bernoulli probability") ``` #### Binomial distribution Another popular distribution for a discrete random variable is the <b>binomial distribution</b>. A binomial distribution has two parameters $n$ and $\theta$, where $0 \le \theta \le 1$. The sample generated by a binomial distribution denotes the number of successes observed in a sequence of $n$ binary trials (e.g., toss of a coin) when the probability of each success is $\theta$. The samples that are drawn from a binomial distribution range between 0 and $n$. The probability distribution is defined as: \begin{equation} p(k;n,\theta) = P(X = k) = \binom{n}{k}\theta^k (1 - \theta)^{n-k} \end{equation} ```python #sampling from a binomial distribution sample = binom.rvs(20,0.4,1) print sample ``` 10 ```python #plotting the pmf for a bernoulli distribution plt.figure(figsize=(12,6)) k = np.arange(0, 22) for p, color in zip([0.1, 0.3, 0.6, 0.8], colors): rv = binom(20, p) plt.plot(k, rv.pmf(k), lw=2, color=color, label=p) plt.fill_between(k, rv.pmf(k), color=color, alpha=0.3) plt.legend() plt.title("Binomial distribution") plt.tight_layout() plt.ylabel("PDF at $k$") plt.xlabel("$k$") ``` #### Multinoulli distribution A multinoulli distribution is a generalization of Bernoulli distribution for trials which can take more than two possibilities ($k > 2$). The parameter for multinoulli distribution is a vector ${\bf \theta}$ which has $k$ entries. Each entry $\theta_i$ indicates the probability of observing the category $i$ in a single trial. ```python # generate samples from a multinoulli distribution. Essentially simulated a single roll of dice. Note that the output is a vector of length $k = 6$ np.random.multinomial(1, [1/6.]*6, size=1) ``` #### Multinomial distribution A mulinomial distribution is a generalization of Binomial distribution for trials which can take more than two possibilities. The parameters for the multinomial distribution is a vector ${\bf \theta}$ and $n$. ```python # generate samples from a multinomial distribution. Note that the output is a vector of length $k = 6$ np.random.multinomial(20, [1/6.]*6, size=1) ``` ### Continuous random variables A <i>continuous random</i> variable can take an infinite number of possible values. Several interesting distributions exist: - alpha An alpha continuous random variable. - beta A beta continuous random variable. - gamma A gamma continuous random variable. - expon An exponential continuous random variable. - gauss Gaussian random variable #### Gaussian distribution One of the most popular distribution is the Gaussian distribution. This distribution is defined for any number of variables. For a single variable case, the distribution is defined using two parameters: $\mu$ and $\sigma$. $\mu$ or the <b>mean</b> can take any value and $\sigma$ or the <b>standard deviation</b> is $\ge 0$. For a continuous distribution, you cannot compute the probability mass at any value of the random variable. Instead, you can compute the <i>density</i> using the <b>probability density function</b>: $$p(x) = \frac{1}{\sigma\sqrt{2\pi}}\exp[-\frac{1}{2}(\frac{x - \mu}{\sigma})^2]$$ The random variable represented using a Gaussian distribution can take any value from $-\infty$ to $\infty$. ```python # set the parameters mu = 0 sigma = 1 # draw 1000 samples from this distribution samples = norm(mu, sigma).rvs(1000) # plot an empirical distribution, i.e., a histogram #plt.hist(samples, 30, normed=True, alpha=.3) # Compute the density at several instances of the random variable x = np.linspace(-10, 10, 10001) # plot the density plt.plot(x, norm(mu, sigma).pdf(x), linewidth=2) ``` #### Multi-dimensional or multivariate Gaussian distribution A distribution can be defined for multivariate random variables. One example is the multivariate Gaussian. In general, the random variable is a $D$ length vector ${\bf X}$. The two parameters of this distribution are a mean vector ${\bf \mu}$ and a covariance matrix $\Sigma$. The pdf at any value of ${\bf X}$ is given by: $$ \mathcal{N}({\bf X}| {\bf \mu,\Sigma}) \triangleq \frac{1}{(2\pi)^{D/2}|{\bf \Sigma}|^{D/2}}exp\left[-\frac{1}{2}{\bf (x-\mu)^\top\Sigma^{-1}(x-\mu)}\right] $$ Note that if $D = 1$, it reduces to a univariate Gaussian distribution. ```python #define the parameters for D = 2 mu = np.array([10,10]) Sigma = np.array([[1,0.],[0.,1]]) rv = np.random.multivariate_normal(mu,Sigma) #sample some points s = np.random.multivariate_normal(mu,Sigma,10000) fig = plt.figure() plt.subplot(111) plt.scatter(s[:,0],s[:,1]) # add a contour plot smin = np.min(s,axis=0) smax = np.max(s,axis=0) t1=np.linspace(smin[0],smax[0],1000) t2=np.linspace(smin[1],smax[1],1000) # evaluate pdf at each of these mesh points ``` ```python np.cov(s.transpose()) ``` array([[ 9.89485661e-01, -1.43077154e-02], [ -1.43077154e-02, 9.97270669e+01]])
{ "alphanum_fraction": 0.9273715483, "author": null, "avg_line_length": 448.5640495868, "converted": true, "ext": "ipynb", "file": null, "hexsha": "3c8a9235b0e8367140141fddd94589c87c619872", "include": null, "lang": "Jupyter Notebook", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "9cf9d94ba5063feb3f9e00684fcceff4bbf64a68", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Gaurav14cs17/Python-Book", "max_forks_repo_path": "ML/Notebook/notebooks/RandomVariablesIntroduction.ipynb", "max_issues_count": null, "max_issues_repo_head_hexsha": "9cf9d94ba5063feb3f9e00684fcceff4bbf64a68", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Gaurav14cs17/Python-Book", "max_issues_repo_path": "ML/Notebook/notebooks/RandomVariablesIntroduction.ipynb", "max_line_length": 95954, "max_stars_count": null, "max_stars_repo_head_hexsha": "9cf9d94ba5063feb3f9e00684fcceff4bbf64a68", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Gaurav14cs17/Python-Book", "max_stars_repo_path": "ML/Notebook/notebooks/RandomVariablesIntroduction.ipynb", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 3952, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 217105 }
# This file is a part of Julia. License is MIT: https://julialang.org/license # –––––––– # Emphasis # –––––––– mutable struct Italic text end @trigger '*' -> function asterisk_italic(stream::IO, md::MD) result = parse_inline_wrapper(stream, "*") return result === nothing ? nothing : Italic(parseinline(result, md)) end @trigger '_' -> function underscore_italic(stream::IO, md::MD) result = parse_inline_wrapper(stream, "_") return result === nothing ? nothing : Italic(parseinline(result, md)) end mutable struct Bold text end @trigger '*' -> function asterisk_bold(stream::IO, md::MD) result = parse_inline_wrapper(stream, "**") return result === nothing ? nothing : Bold(parseinline(result, md)) end @trigger '_' -> function underscore_bold(stream::IO, md::MD) result = parse_inline_wrapper(stream, "__") return result === nothing ? nothing : Bold(parseinline(result, md)) end # –––– # Code # –––– @trigger '`' -> function inline_code(stream::IO, md::MD) withstream(stream) do ticks = startswith(stream, r"^(`+)") result = readuntil(stream, ticks) if result === nothing nothing else result = strip(result) # An odd number of backticks wrapping the text will produce a `Code` node, while # an even number will result in a `LaTeX` node. This allows for arbitary # backtick combinations to be embedded inside the resulting node, i.e. # # `a`, ``a``, `` `a` ``, ``` ``a`` ```, ``` `a` ```, etc. # ^ ^ ^ ^ ^ # C L L C C with C=Code and L=LaTeX. # isodd(length(ticks)) ? Code(result) : LaTeX(result) end end end # –––––––––––––– # Images & Links # –––––––––––––– mutable struct Image url::String alt::String end @trigger '!' -> function image(stream::IO, md::MD) withstream(stream) do startswith(stream, "![") || return alt = readuntil(stream, ']', match = '[') alt ≡ nothing && return skipwhitespace(stream) startswith(stream, '(') || return url = readuntil(stream, ')', match = '(') url ≡ nothing && return return Image(url, alt) end end mutable struct Link text url::String end @trigger '[' -> function link(stream::IO, md::MD) withstream(stream) do startswith(stream, '[') || return text = readuntil(stream, ']', match = '[') text ≡ nothing && return skipwhitespace(stream) startswith(stream, '(') || return url = readuntil(stream, ')', match = '(') url ≡ nothing && return return Link(parseinline(text, md), url) end end @trigger '[' -> function footnote_link(stream::IO, md::MD) withstream(stream) do regex = r"^\[\^(\w+)\]" str = startswith(stream, regex) if isempty(str) return else ref = match(regex, str).captures[1] return Footnote(ref, nothing) end end end @trigger '<' -> function autolink(stream::IO, md::MD) withstream(stream) do startswith(stream, '<') || return url = readuntil(stream, '>') url ≡ nothing && return _is_link(url) && return Link(url, url) _is_mailto(url) && return Link(url, url) return end end # This list is taken from the commonmark spec # http://spec.commonmark.org/0.19/#absolute-uri const _allowable_schemes = Set(split("coap doi javascript aaa aaas about acap cap cid crid data dav dict dns file ftp geo go gopher h323 http https iax icap im imap info ipp iris iris.beep iris.xpc iris.xpcs iris.lwz ldap mailto mid msrp msrps mtqp mupdate news nfs ni nih nntp opaquelocktoken pop pres rtsp service session shttp sieve sip sips sms snmp,soap.beep soap.beeps tag tel telnet tftp thismessage tn3270 tip tv urn vemmi ws wss xcon xcon-userid xmlrpc.beep xmlrpc.beeps xmpp z39.50r z39.50s adiumxtra afp afs aim apt,attachment aw beshare bitcoin bolo callto chrome,chrome-extension com-eventbrite-attendee content cvs,dlna-playsingle dlna-playcontainer dtn dvb ed2k facetime feed finger fish gg git gizmoproject gtalk hcp icon ipn irc irc6 ircs itms jar jms keyparc lastfm ldaps magnet maps market,message mms ms-help msnim mumble mvn notes oid palm paparazzi platform proxy psyc query res resource rmi rsync rtmp secondlife sftp sgn skype smb soldat spotify ssh steam svn teamspeak things udp unreal ut2004 ventrilo view-source webcal wtai wyciwyg xfire xri ymsgr")) function _is_link(s::AbstractString) '<' in s && return false m = match(r"^(.*)://(\S+?)(:\S*)?$", s) m ≡ nothing && return false scheme = lowercase(m.captures[1]) return scheme in _allowable_schemes end # non-normative regex from the HTML5 spec const _email_regex = r"^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$" function _is_mailto(s::AbstractString) length(s) < 6 && return false # slicing strings is a bit risky, but this equality check is safe lowercase(s[1:6]) == "mailto:" || return false return occursin(_email_regex, s[6:end]) end # ––––––––––– # Punctuation # ––––––––––– mutable struct LineBreak end @trigger '\\' -> function linebreak(stream::IO, md::MD) if startswith(stream, "\\\n") return LineBreak() end end @trigger '-' -> function en_dash(stream::IO, md::MD) if startswith(stream, "--") return "–" end end const escape_chars = "\\`*_#+-.!{}[]()\$" @trigger '\\' -> function escapes(stream::IO, md::MD) withstream(stream) do if startswith(stream, "\\") && !eof(stream) && (c = read(stream, Char)) in escape_chars return string(c) end end end
{ "alphanum_fraction": 0.6104205687, "author": null, "avg_line_length": 31.2393617021, "converted": null, "ext": "jl", "file": null, "hexsha": "0e9b0bfb2bbac37315326f717ca997f79ed850ca", "include": null, "lang": "Julia", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "dd3d14e5e7d24985cba6185e2d07a62ee9943d4e", "max_forks_repo_licenses": [ "Zlib" ], "max_forks_repo_name": "djsegal/julia-fork", "max_forks_repo_path": "stdlib/Markdown/src/Common/inline.jl", "max_issues_count": null, "max_issues_repo_head_hexsha": "dd3d14e5e7d24985cba6185e2d07a62ee9943d4e", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Zlib" ], "max_issues_repo_name": "djsegal/julia-fork", "max_issues_repo_path": "stdlib/Markdown/src/Common/inline.jl", "max_line_length": 500, "max_stars_count": 1, "max_stars_repo_head_hexsha": "dd3d14e5e7d24985cba6185e2d07a62ee9943d4e", "max_stars_repo_licenses": [ "Zlib" ], "max_stars_repo_name": "djsegal/julia-fork", "max_stars_repo_path": "stdlib/Markdown/src/Common/inline.jl", "max_stars_repo_stars_event_max_datetime": "2018-04-16T17:50:52.000Z", "max_stars_repo_stars_event_min_datetime": "2018-04-16T17:50:52.000Z", "num_tokens": 1641, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 5873 }
import ml_collections from jaxdl.rl.networks.actor_nets import create_normal_dist_policy_fn from jaxdl.rl.networks.critic_nets import create_double_critic_network_fn def get_config(): config = ml_collections.ConfigDict() config.algorithm = 'TD3' config.critic_net_fn = create_double_critic_network_fn( hidden_dims=[256, 256]) config.actor_net_fn = create_normal_dist_policy_fn( hidden_dims=[256, 256]) config.actor_lr = 3e-4 config.critic_lr = 3e-4 config.tau = 0.005 config.discount = 0.99 config.target_update_period = 1 config.replay_buffer_size = 10000 return config
{ "alphanum_fraction": 0.7860696517, "author": null, "avg_line_length": 30.15, "converted": null, "ext": "py", "file": null, "hexsha": "89bc2c8eda74828b7a2570dad3bed9621191ff6c", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "032df55292410c2976703213e67fff7bcafaedbe", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "patrickhart/jaxdl", "max_forks_repo_path": "examples/configs/td3_config.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "032df55292410c2976703213e67fff7bcafaedbe", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "patrickhart/jaxdl", "max_issues_repo_path": "examples/configs/td3_config.py", "max_line_length": 73, "max_stars_count": 1, "max_stars_repo_head_hexsha": "032df55292410c2976703213e67fff7bcafaedbe", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "patrickhart/jaxdl", "max_stars_repo_path": "examples/configs/td3_config.py", "max_stars_repo_stars_event_max_datetime": "2022-02-09T09:19:40.000Z", "max_stars_repo_stars_event_min_datetime": "2022-02-09T09:19:40.000Z", "num_tokens": 169, "path": null, "reason": "from jax", "repo": null, "save_path": null, "sha": null, "size": 603 }
#include <string> #include <sstream> #include <map> #include <memory> #include <vector> #include <gtest/gtest.h> #include <boost/graph/adjacency_list.hpp> #include "Alignment.hpp" #include "AlnGraphBoost.hpp" TEST(AlnGraphBoostTest, RawConsensus) { std::string backbone = "ATATTAGGC"; AlnGraphBoost ag(backbone); std::unique_ptr<dagcon::Alignment[]> algs(new dagcon::Alignment[5]); algs[0].tstr = "ATATTA---GGC"; algs[0].qstr = "ATAT-AGCCGGC"; algs[1].tstr = "ATATTA-GGC"; algs[1].qstr = "ATAT-ACGGC"; algs[2].tstr = "AT-ATTA--GGC"; algs[2].qstr = "ATCAT--CCGGC"; algs[3].tstr = "ATATTA--G-GC"; algs[3].qstr = "ATAT-ACCGAG-"; algs[4].tstr = "ATATTA---GGC"; algs[4].qstr = "ATAT-AGCCGGC"; for(int i=0; i < 5; i++) { dagcon::Alignment& ra = algs[i]; ra.id = "target"; ra.tlen = 9; ra.start = 1; } ag.addAln(algs[0]); ag.addAln(algs[1]); ag.addAln(algs[2]); ag.addAln(algs[3]); ag.addAln(algs[4]); ag.mergeNodes(); std::string expected = "ATATAGCCGGC"; const std::string actual = ag.consensus(); EXPECT_EQ(expected, actual); } TEST(AlnGraphBoostTest, DanglingNodes) { AlnGraphBoost ag(12); dagcon::Alignment a; a.tstr = "C-GCGGA-T-G-"; a.qstr = "CCGCGG-G-A-T"; ag.addAln(a); EXPECT_FALSE(ag.danglingNodes()); }
{ "alphanum_fraction": 0.600729927, "author": null, "avg_line_length": 23.6206896552, "converted": null, "ext": "cpp", "file": null, "hexsha": "7704ce947344bbeac8aba903326fbda4d3e8ba67", "include": null, "lang": "C++", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 31, "max_forks_repo_forks_event_max_datetime": "2022-02-23T18:45:40.000Z", "max_forks_repo_forks_event_min_datetime": "2015-01-26T09:59:29.000Z", "max_forks_repo_head_hexsha": "c14c422e609a914f0139f7222202ac1bce7e3ef1", "max_forks_repo_licenses": [ "BSD-3-Clause-Clear" ], "max_forks_repo_name": "dthadi3/pbdagcon", "max_forks_repo_path": "test/cpp/AlnGraphBoostTest.cpp", "max_issues_count": 44, "max_issues_repo_head_hexsha": "c14c422e609a914f0139f7222202ac1bce7e3ef1", "max_issues_repo_issues_event_max_datetime": "2018-04-27T05:16:59.000Z", "max_issues_repo_issues_event_min_datetime": "2015-06-04T00:03:39.000Z", "max_issues_repo_licenses": [ "BSD-3-Clause-Clear" ], "max_issues_repo_name": "dthadi3/pbdagcon", "max_issues_repo_path": "test/cpp/AlnGraphBoostTest.cpp", "max_line_length": 72, "max_stars_count": 31, "max_stars_repo_head_hexsha": "c14c422e609a914f0139f7222202ac1bce7e3ef1", "max_stars_repo_licenses": [ "BSD-3-Clause-Clear" ], "max_stars_repo_name": "dthadi3/pbdagcon", "max_stars_repo_path": "test/cpp/AlnGraphBoostTest.cpp", "max_stars_repo_stars_event_max_datetime": "2022-02-23T18:45:36.000Z", "max_stars_repo_stars_event_min_datetime": "2015-02-24T19:17:22.000Z", "num_tokens": 490, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 1370 }
import numpy as np import pandas as pd import time import sys import emcee from sklearn.model_selection import GridSearchCV from sklearn.neighbors import KernelDensity def get_all_bandwidths(h5_file, thin_by=100, data_path = '', n_cores=8, prior='uninformative', **kwargs): '''optimal bandwidth for marginilized KDEs warning - lots of hard coding''' sn = h5_file.split('/')[-1].split('_')[0] reader = emcee.backends.HDFBackend(h5_file) nsteps = thin_by*np.shape(reader.get_chain())[0] tau = reader.get_autocorr_time(tol=0) burnin = int(5*np.max(tau)) samples = reader.get_chain(discard=burnin, thin=np.max([int(np.max(tau)), 1]), flat=True) lnpost = reader.get_log_prob(discard=burnin, thin=np.max([int(np.max(tau)), 1]), flat=True) t_fl = samples[:,0] time_bw = opt_bandwidth(t_fl, log_min_grid=-2.7, log_max_grid=-0.3, n_jobs=n_cores) if prior == 'uninformative': alpha_g = samples[:,2] alpha_r = samples[:,4] delta_df = pd.read_hdf(data_path + '{}_{}_deltas.h5'.format(sn, prior)) delta = delta_df.delta.values alpha_g_bw = opt_bandwidth(alpha_g, log_min_grid=-2.7, log_max_grid=-0.3, n_jobs=n_cores) alpha_r_bw = opt_bandwidth(alpha_r, log_min_grid=-2.7, log_max_grid=-0.3, n_jobs=n_cores) delta_bw = opt_bandwidth(delta, log_min_grid=-2.7, log_max_grid=-0.3, n_jobs=n_cores) with open(data_path + '{}_{}_bandwidth.txt'.format(sn, prior), 'w') as fw: print('{} = bw for time_fl'.format(time_bw),file=fw) if prior == 'uninformative': print('{} = bw for alpha_g'.format(alpha_g_bw),file=fw) print('{} = bw for alpha_r'.format(alpha_r_bw),file=fw) print('{} = bw for delta'.format(delta_bw),file=fw) return def opt_bandwidth(marg_samples, log_min_grid=-2.5, log_max_grid=0, grid_points=15, n_cv=3, n_jobs=-1): '''determine the optimal KDE bandwidth via gridsearch CV''' params = {'bandwidth': np.logspace(log_min_grid, log_max_grid, grid_points)} grid_cv = GridSearchCV(KernelDensity(rtol=1e-4), params, cv=n_cv, n_jobs=n_jobs) if len(np.shape(marg_samples)) == 1: X_marg = np.reshape(marg_samples, (len(marg_samples), 1)) else: X_marg = marg_samples grid_cv.fit(X_marg) return grid_cv.best_estimator_.bandwidth if __name__== "__main__": ztf_name = str(sys.argv[1]) n_cores = 8 prior = 'uninformative' if len(sys.argv) > 2: n_cores = int(sys.argv[2]) if len(sys.argv) > 3: prior = 'delta2' data_path = "/projects/p30796/ZTF/early_Ia/forced_lightcurves/sample_lc_v2/big_unc/" if prior == 'uninformative': backend_filename = data_path + "/{}_emcee_40_varchange.h5".format(ztf_name) get_all_bandwidths(backend_filename, data_path=data_path, n_cores=n_cores) elif prior == 'delta2': backend_filename = data_path + "/{}_emcee_40_tsquared.h5".format(ztf_name) get_all_bandwidths(backend_filename, data_path=data_path, n_cores=n_cores, prior=prior)
{ "alphanum_fraction": 0.5206861756, "author": null, "avg_line_length": 37.3962264151, "converted": null, "ext": "py", "file": null, "hexsha": "1fc36c11845b33cacda6c39e96940160a4b232a1", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2021-05-09T23:16:21.000Z", "max_forks_repo_forks_event_min_datetime": "2021-05-09T23:16:21.000Z", "max_forks_repo_head_hexsha": "4caf7660a3971fa32b63a1d41f82e78fc0599982", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "adamamiller/ztf_early_Ia_2018", "max_forks_repo_path": "playground/cv_kde_bandwidth.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "4caf7660a3971fa32b63a1d41f82e78fc0599982", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "adamamiller/ztf_early_Ia_2018", "max_issues_repo_path": "playground/cv_kde_bandwidth.py", "max_line_length": 88, "max_stars_count": 3, "max_stars_repo_head_hexsha": "4caf7660a3971fa32b63a1d41f82e78fc0599982", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "adamamiller/ztf_early_Ia_2018", "max_stars_repo_path": "playground/cv_kde_bandwidth.py", "max_stars_repo_stars_event_max_datetime": "2019-10-17T16:01:03.000Z", "max_stars_repo_stars_event_min_datetime": "2019-02-24T22:34:02.000Z", "num_tokens": 928, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 3964 }
import os import numpy as np import tempfile import shutil import pickle import ntpath import nipype.pipeline.engine as pe import nipype.interfaces.utility as niu from nipype.interfaces.base import (TraitedSpec, File, traits, InputMultiPath, BaseInterface, OutputMultiPath, BaseInterfaceInputSpec, isdefined) from nipype.utils.filemanip import (load_json, save_json, split_filename, fname_presuffix, copyfile) from src.obj import transform_objectCommand, volume_object_evaluateCommand def get_surf_workflow(name, infosource, datasink, opts): workflow = pe.Workflow(name=name) inputnode = pe.Node(niu.IdentityInterface(fields=["vol_file","obj_file","T1MNI", "MNIT1", "PETT1", "T1PET"]), name='inputnode') internode = pe.Node(niu.IdentityInterface(fields=["surface"]), name='internode') outputnode = pe.Node(niu.IdentityInterface(fields=["surface", "mask"]), name='outputnode') if opts.analysis_space == 'icbm152': if opts.surface_space == 'icbm152': workflow.connect(inputnode, 'obj_file', internode, 'surface') # MNI --> PET if opts.surface_space == 'pet' : transform_MNI_T1 = pe.Node( transform_object(), name="transform_MNI_T1") workflow.connect(inputnode, 'obj_file', transform_MNI_T1, 'in_file' ) workflow.connect(inputnode, 'MNIT1', transform_MNI_T1, 'tfm_file' ) transform_T1_PET = pe.Node( transform_object(), name="transform_T1_PET") workflow.connect(transform_MNI_T1, 'out_file', transform_T1_PET, 'in_file' ) workflow.connect(inputnode, 'T1PET', transform_T1_PET, 'in_file' ) workflow.connect(transform_T1_PET, 'out_file', internode, 'surface') # MNI --> T1 if opts.surface_space == 't1' : transform_MNI_T1 = pe.Node( transform_object(), name="transform_MNI_T1") workflow.connect(inputnode, 'obj_file', transform_MNI_T1, 'in_file' ) workflow.connect(inputnode, 'MNIT1', transform_MNI_T1, 'tfm_file' ) workflow.connect(transform_MNI_T1, 'out_file', internode, 'surface') elif opts.analysis_space == 'pet': # PET --> MNI if opts.surface_space == 'icbm152' : transform_PET_T1 = pe.Node( transform_object(), name="transform_MNI_T1") workflow.connect(inputnode, 'obj_file', transform_PET_T1, 'in_file' ) workflow.connect(inputnode, 'PETT1', transform_PET_T1, 'tfm_file' ) transform_T1_MNI = pe.Node( transform_object(), name="transform_T1_MNI") workflow.connect(transform_T1_MNI, 'out_file', transform_T1_MNI, 'in_file' ) workflow.connect(inputnode, 'T1MNI', transform_T1_MNI, 'in_file' ) workflow.connect(transform_T1_MNI, 'out_file', internode, 'surface') if opts.surface_space == 'pet' : workflow.connect(inputnode, 'obj_file', internode, 'surface') # PET -> T1 if opts.surface_space == 't1' : transform_PET_T1 = pe.Node( transform_object(), name="transform_MNI_T1") workflow.connect(inputnode, 'obj_file', transform_PET_T1, 'in_file' ) workflow.connect(inputnode, 'PETT1', transform_PET_T1, 'tfm_file' ) workflow.connect(transform_PET_T1, 'out_file', internode, 'surface') elif opts.analysis_space == 't1': # T1 --> MNI if opts.surface_space == 'icbm152' : transform_T1_MNI = pe.Node( transform_object(), name="transform_T1_MNI") workflow.connect(inputnode, 'obj_file', transform_T1_MNI, 'in_file' ) workflow.connect(inputnode, 'T1MNI', transform_T1_MNI, 'tfm_file' ) workflow.connect(transform_T1_MNI, 'out_file', internode, 'surface') # T1 --> PET if opts.surface_space == 'pet' : transform_T1_PET = pe.Node( transform_object(), name="transform_T1_PET") workflow.connect(inputnode, 'obj_file', transform_T1_PET, 'in_file' ) workflow.connect(inputnode, 'PETT1', transform_T1_PET, 'tfm_file' ) workflow.connect(transform_T1_PET, 'out_file', internode, 'surface') if opts.surface_space == 't1' : workflow.connect(inputnode, 'obj_file', internode, 'surface') else : print("Error: variable <analysis_space> was not one of : icbm152, pet, t1") exit(1) volume_interpolateNode = pe.Node( volume_object_evaluateCommand(), name="volume_interpolate" ) workflow.connect(internode, 'surface', volume_interpolateNode, 'obj_file') workflow.connect(inputnode, 'vol_file', volume_interpolateNode, 'vol_file') workflow.connect(volume_interpolateNode, 'out_file', outputnode, 'mask' ) workflow.connect(internode, 'surface', outputnode, 'surface' ) return(workflow)
{ "alphanum_fraction": 0.6592956593, "author": null, "avg_line_length": 47.8823529412, "converted": null, "ext": "py", "file": null, "hexsha": "49e7e62c1c9104cb314b453fc09eb61dbf4ee692", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 15, "max_forks_repo_forks_event_max_datetime": "2021-11-05T13:29:04.000Z", "max_forks_repo_forks_event_min_datetime": "2017-09-14T21:30:30.000Z", "max_forks_repo_head_hexsha": "316c9675e6049443dd06556b046c79e5a82e5d3e", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "APPIAN-PET/PyPA", "max_forks_repo_path": "src/surf_masking.py", "max_issues_count": 15, "max_issues_repo_head_hexsha": "316c9675e6049443dd06556b046c79e5a82e5d3e", "max_issues_repo_issues_event_max_datetime": "2021-09-17T05:17:15.000Z", "max_issues_repo_issues_event_min_datetime": "2019-02-06T16:30:23.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "APPIAN-PET/PyPA", "max_issues_repo_path": "src/surf_masking.py", "max_line_length": 131, "max_stars_count": 26, "max_stars_repo_head_hexsha": "316c9675e6049443dd06556b046c79e5a82e5d3e", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "APPIAN-PET/PyPA", "max_stars_repo_path": "src/surf_masking.py", "max_stars_repo_stars_event_max_datetime": "2022-02-10T14:41:28.000Z", "max_stars_repo_stars_event_min_datetime": "2017-10-20T22:45:19.000Z", "num_tokens": 1222, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 4884 }
# This file was generated, do not modify it. # hide using DelimitedFiles using Plots img = readdlm("/tmp/benchC.txt",',',Int) M,N = img |> size p=heatmap(1:N, 1:M, img, aspect_ratio=1, size=(400,400)) savefig(p, joinpath(@OUTPUT, "mandC.png")) #hide
{ "alphanum_fraction": 0.6907630522, "author": null, "avg_line_length": 35.5714285714, "converted": null, "ext": "jl", "file": null, "hexsha": "b972c5be510a0745bd0a7ffc82bc8d8855eff801", "include": null, "lang": "Julia", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "fbd0a383a796b1a72c4115e9431ade48d4cbebe9", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "terasakisatoshi/MathWebiner.jl", "max_forks_repo_path": "__site/assets/benchmark/benchmark/code/plotmandC.jl", "max_issues_count": null, "max_issues_repo_head_hexsha": "fbd0a383a796b1a72c4115e9431ade48d4cbebe9", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "terasakisatoshi/MathWebiner.jl", "max_issues_repo_path": "__site/assets/benchmark/benchmark/code/plotmandC.jl", "max_line_length": 56, "max_stars_count": 2, "max_stars_repo_head_hexsha": "fbd0a383a796b1a72c4115e9431ade48d4cbebe9", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "terasakisatoshi/MathWebiner.jl", "max_stars_repo_path": "__site/assets/benchmark/benchmark/code/plotmandC.jl", "max_stars_repo_stars_event_max_datetime": "2020-09-16T22:57:04.000Z", "max_stars_repo_stars_event_min_datetime": "2020-03-14T16:50:08.000Z", "num_tokens": 86, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 249 }