text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
// Copyright Ross MacGregor 2013
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include "AsioExpressTest/pch.hpp"
#include <boost/test/unit_test.hpp>
#include <boost/shared_ptr.hpp>
#include "AsioExpress/Testing/AutoCompletionHandler.hpp"
#include "AsioExpress/Testing/TestCompletionHandler.hpp"
#include "AsioExpress/ErrorCodes.hpp"
#include "AsioExpress/MessagePort/DataBuffer.hpp"
using namespace AsioExpress;
using namespace AsioExpress::MessagePort;
using namespace AsioExpress::Testing;
using namespace std;
BOOST_AUTO_TEST_SUITE(DataBufferTest)
BOOST_AUTO_TEST_CASE(Test_Construction)
{
DataBuffer buffer(25);
BOOST_CHECK_EQUAL( buffer.Size(), 25);
BOOST_CHECK( buffer.Get() != 0 );
memset(buffer.Get(), 255, 25);
}
BOOST_AUTO_TEST_CASE(Test_Str_Construction)
{
char const *const text = "foobar";
std::string str(text);
DataBuffer buffer(str);
BOOST_CHECK_EQUAL( buffer.Size(), 6);
BOOST_CHECK( buffer.Get() != 0 );
BOOST_CHECK( memcmp(buffer.Get(), text, 6) == 0 );
}
BOOST_AUTO_TEST_CASE(Test_Assign)
{
char const * const text = "123456789a";
DataBuffer buffer;
buffer.Assign(text, 10);
BOOST_CHECK_EQUAL( buffer.Size(), 10);
BOOST_CHECK( buffer.Get() != 0 );
BOOST_CHECK( memcmp(buffer.Get(), text, 10) == 0 );
}
BOOST_AUTO_TEST_CASE(Test_Copy)
{
DataBuffer b1(25);
DataBuffer b2(b1);
BOOST_CHECK_EQUAL( b2.Size(), b1.Size() );
BOOST_CHECK( b1.Get() != b2.Get() );
BOOST_CHECK( memcmp(b1.Get(), b2.Get(), b1.Size()) == 0 );
}
BOOST_AUTO_TEST_CASE(Test_Assignment)
{
DataBuffer b1(25);
DataBuffer b2;
b2 = b1;
BOOST_CHECK_EQUAL( b2.Size(), b1.Size() );
BOOST_CHECK( b1.Get() != b2.Get() );
BOOST_CHECK( memcmp(b1.Get(), b2.Get(), b1.Size()) == 0 );
}
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "b453ed34b5ce650ed66613c1fb75ab7ccc49e899", "size": 1941, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "source/AsioExpressTest/DataBufferTest.cpp", "max_stars_repo_name": "suhao/asioexpress", "max_stars_repo_head_hexsha": "2f3453465934afdcdf4a575a2d933d86929b23c7", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "source/AsioExpressTest/DataBufferTest.cpp", "max_issues_repo_name": "suhao/asioexpress", "max_issues_repo_head_hexsha": "2f3453465934afdcdf4a575a2d933d86929b23c7", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "source/AsioExpressTest/DataBufferTest.cpp", "max_forks_repo_name": "suhao/asioexpress", "max_forks_repo_head_hexsha": "2f3453465934afdcdf4a575a2d933d86929b23c7", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.962962963, "max_line_length": 62, "alphanum_fraction": 0.6893353941, "num_tokens": 519}
|
[STATEMENT]
lemma mult_imp_msetext_huet:
assumes
irrefl: "irreflp gt" and trans: "transp gt" and
in_mult: "(mset xs, mset ys) \<in> mult {(x, y). gt y x}"
shows "msetext_huet gt ys xs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. msetext_huet gt ys xs
[PROOF STEP]
using in_mult
[PROOF STATE]
proof (prove)
using this:
(mset xs, mset ys) \<in> mult {(x, y). gt y x}
goal (1 subgoal):
1. msetext_huet gt ys xs
[PROOF STEP]
unfolding mult_def msetext_huet_def Let_def
[PROOF STATE]
proof (prove)
using this:
(mset xs, mset ys) \<in> (mult1 {(x, y). gt y x})\<^sup>+
goal (1 subgoal):
1. mset xs \<noteq> mset ys \<and> (\<forall>x. count (mset ys) x < count (mset xs) x \<longrightarrow> (\<exists>y. gt y x \<and> count (mset xs) y < count (mset ys) y))
[PROOF STEP]
proof (induct rule: trancl_induct)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>y. (mset xs, y) \<in> mult1 {(x, y). gt y x} \<Longrightarrow> mset xs \<noteq> y \<and> (\<forall>x. count y x < count (mset xs) x \<longrightarrow> (\<exists>ya. gt ya x \<and> count (mset xs) ya < count y ya))
2. \<And>y z. \<lbrakk>(mset xs, y) \<in> (mult1 {(x, y). gt y x})\<^sup>+; (y, z) \<in> mult1 {(x, y). gt y x}; mset xs \<noteq> y \<and> (\<forall>x. count y x < count (mset xs) x \<longrightarrow> (\<exists>ya. gt ya x \<and> count (mset xs) ya < count y ya))\<rbrakk> \<Longrightarrow> mset xs \<noteq> z \<and> (\<forall>x. count z x < count (mset xs) x \<longrightarrow> (\<exists>y. gt y x \<and> count (mset xs) y < count z y))
[PROOF STEP]
case (base Ys)
[PROOF STATE]
proof (state)
this:
(mset xs, Ys) \<in> mult1 {(x, y). gt y x}
goal (2 subgoals):
1. \<And>y. (mset xs, y) \<in> mult1 {(x, y). gt y x} \<Longrightarrow> mset xs \<noteq> y \<and> (\<forall>x. count y x < count (mset xs) x \<longrightarrow> (\<exists>ya. gt ya x \<and> count (mset xs) ya < count y ya))
2. \<And>y z. \<lbrakk>(mset xs, y) \<in> (mult1 {(x, y). gt y x})\<^sup>+; (y, z) \<in> mult1 {(x, y). gt y x}; mset xs \<noteq> y \<and> (\<forall>x. count y x < count (mset xs) x \<longrightarrow> (\<exists>ya. gt ya x \<and> count (mset xs) ya < count y ya))\<rbrakk> \<Longrightarrow> mset xs \<noteq> z \<and> (\<forall>x. count z x < count (mset xs) x \<longrightarrow> (\<exists>y. gt y x \<and> count (mset xs) y < count z y))
[PROOF STEP]
thus ?case
[PROOF STATE]
proof (prove)
using this:
(mset xs, Ys) \<in> mult1 {(x, y). gt y x}
goal (1 subgoal):
1. mset xs \<noteq> Ys \<and> (\<forall>x. count Ys x < count (mset xs) x \<longrightarrow> (\<exists>y. gt y x \<and> count (mset xs) y < count Ys y))
[PROOF STEP]
using irrefl
[PROOF STATE]
proof (prove)
using this:
(mset xs, Ys) \<in> mult1 {(x, y). gt y x}
irreflp gt
goal (1 subgoal):
1. mset xs \<noteq> Ys \<and> (\<forall>x. count Ys x < count (mset xs) x \<longrightarrow> (\<exists>y. gt y x \<and> count (mset xs) y < count Ys y))
[PROOF STEP]
unfolding irreflp_def msetext_huet_def Let_def mult1_def
[PROOF STATE]
proof (prove)
using this:
(mset xs, Ys) \<in> {(N, M). \<exists>a M0 K. M = add_mset a M0 \<and> N = M0 + K \<and> (\<forall>b. b \<in># K \<longrightarrow> (b, a) \<in> {(x, y). gt y x})}
\<forall>a. \<not> gt a a
goal (1 subgoal):
1. mset xs \<noteq> Ys \<and> (\<forall>x. count Ys x < count (mset xs) x \<longrightarrow> (\<exists>y. gt y x \<and> count (mset xs) y < count Ys y))
[PROOF STEP]
by (auto 0 3 split: if_splits)
[PROOF STATE]
proof (state)
this:
mset xs \<noteq> Ys \<and> (\<forall>x. count Ys x < count (mset xs) x \<longrightarrow> (\<exists>y. gt y x \<and> count (mset xs) y < count Ys y))
goal (1 subgoal):
1. \<And>y z. \<lbrakk>(mset xs, y) \<in> (mult1 {(x, y). gt y x})\<^sup>+; (y, z) \<in> mult1 {(x, y). gt y x}; mset xs \<noteq> y \<and> (\<forall>x. count y x < count (mset xs) x \<longrightarrow> (\<exists>ya. gt ya x \<and> count (mset xs) ya < count y ya))\<rbrakk> \<Longrightarrow> mset xs \<noteq> z \<and> (\<forall>x. count z x < count (mset xs) x \<longrightarrow> (\<exists>y. gt y x \<and> count (mset xs) y < count z y))
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>y z. \<lbrakk>(mset xs, y) \<in> (mult1 {(x, y). gt y x})\<^sup>+; (y, z) \<in> mult1 {(x, y). gt y x}; mset xs \<noteq> y \<and> (\<forall>x. count y x < count (mset xs) x \<longrightarrow> (\<exists>ya. gt ya x \<and> count (mset xs) ya < count y ya))\<rbrakk> \<Longrightarrow> mset xs \<noteq> z \<and> (\<forall>x. count z x < count (mset xs) x \<longrightarrow> (\<exists>y. gt y x \<and> count (mset xs) y < count z y))
[PROOF STEP]
case (step Ys Zs)
[PROOF STATE]
proof (state)
this:
(mset xs, Ys) \<in> (mult1 {(x, y). gt y x})\<^sup>+
(Ys, Zs) \<in> mult1 {(x, y). gt y x}
mset xs \<noteq> Ys \<and> (\<forall>x. count Ys x < count (mset xs) x \<longrightarrow> (\<exists>y. gt y x \<and> count (mset xs) y < count Ys y))
goal (1 subgoal):
1. \<And>y z. \<lbrakk>(mset xs, y) \<in> (mult1 {(x, y). gt y x})\<^sup>+; (y, z) \<in> mult1 {(x, y). gt y x}; mset xs \<noteq> y \<and> (\<forall>x. count y x < count (mset xs) x \<longrightarrow> (\<exists>ya. gt ya x \<and> count (mset xs) ya < count y ya))\<rbrakk> \<Longrightarrow> mset xs \<noteq> z \<and> (\<forall>x. count z x < count (mset xs) x \<longrightarrow> (\<exists>y. gt y x \<and> count (mset xs) y < count z y))
[PROOF STEP]
have asym[unfolded antisym_def, simplified]: "antisymp gt"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. antisymp gt
[PROOF STEP]
by (rule irreflp_transp_imp_antisymP[OF irrefl trans])
[PROOF STATE]
proof (state)
this:
antisymp gt
goal (1 subgoal):
1. \<And>y z. \<lbrakk>(mset xs, y) \<in> (mult1 {(x, y). gt y x})\<^sup>+; (y, z) \<in> mult1 {(x, y). gt y x}; mset xs \<noteq> y \<and> (\<forall>x. count y x < count (mset xs) x \<longrightarrow> (\<exists>ya. gt ya x \<and> count (mset xs) ya < count y ya))\<rbrakk> \<Longrightarrow> mset xs \<noteq> z \<and> (\<forall>x. count z x < count (mset xs) x \<longrightarrow> (\<exists>y. gt y x \<and> count (mset xs) y < count z y))
[PROOF STEP]
from step(3)
[PROOF STATE]
proof (chain)
picking this:
mset xs \<noteq> Ys \<and> (\<forall>x. count Ys x < count (mset xs) x \<longrightarrow> (\<exists>y. gt y x \<and> count (mset xs) y < count Ys y))
[PROOF STEP]
have "mset xs \<noteq> Ys" and
**: "\<And>x. count Ys x < count (mset xs) x \<Longrightarrow> (\<exists>y. gt y x \<and> count (mset xs) y < count Ys y)"
[PROOF STATE]
proof (prove)
using this:
mset xs \<noteq> Ys \<and> (\<forall>x. count Ys x < count (mset xs) x \<longrightarrow> (\<exists>y. gt y x \<and> count (mset xs) y < count Ys y))
goal (1 subgoal):
1. mset xs \<noteq> Ys &&& (\<And>x. count Ys x < count (mset xs) x \<Longrightarrow> \<exists>y. gt y x \<and> count (mset xs) y < count Ys y)
[PROOF STEP]
by blast+
[PROOF STATE]
proof (state)
this:
mset xs \<noteq> Ys
count Ys ?x < count (mset xs) ?x \<Longrightarrow> \<exists>y. gt y ?x \<and> count (mset xs) y < count Ys y
goal (1 subgoal):
1. \<And>y z. \<lbrakk>(mset xs, y) \<in> (mult1 {(x, y). gt y x})\<^sup>+; (y, z) \<in> mult1 {(x, y). gt y x}; mset xs \<noteq> y \<and> (\<forall>x. count y x < count (mset xs) x \<longrightarrow> (\<exists>ya. gt ya x \<and> count (mset xs) ya < count y ya))\<rbrakk> \<Longrightarrow> mset xs \<noteq> z \<and> (\<forall>x. count z x < count (mset xs) x \<longrightarrow> (\<exists>y. gt y x \<and> count (mset xs) y < count z y))
[PROOF STEP]
from step(2)
[PROOF STATE]
proof (chain)
picking this:
(Ys, Zs) \<in> mult1 {(x, y). gt y x}
[PROOF STEP]
obtain M0 a K where
*: "Zs = M0 + {#a#}" "Ys = M0 + K" "a \<notin># K" "\<And>b. b \<in># K \<Longrightarrow> gt a b"
[PROOF STATE]
proof (prove)
using this:
(Ys, Zs) \<in> mult1 {(x, y). gt y x}
goal (1 subgoal):
1. (\<And>M0 a K. \<lbrakk>Zs = M0 + {#a#}; Ys = M0 + K; a \<notin># K; \<And>b. b \<in># K \<Longrightarrow> gt a b\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using irrefl
[PROOF STATE]
proof (prove)
using this:
(Ys, Zs) \<in> mult1 {(x, y). gt y x}
irreflp gt
goal (1 subgoal):
1. (\<And>M0 a K. \<lbrakk>Zs = M0 + {#a#}; Ys = M0 + K; a \<notin># K; \<And>b. b \<in># K \<Longrightarrow> gt a b\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
unfolding mult1_def irreflp_def
[PROOF STATE]
proof (prove)
using this:
(Ys, Zs) \<in> {(N, M). \<exists>a M0 K. M = add_mset a M0 \<and> N = M0 + K \<and> (\<forall>b. b \<in># K \<longrightarrow> (b, a) \<in> {(x, y). gt y x})}
\<forall>a. \<not> gt a a
goal (1 subgoal):
1. (\<And>M0 a K. \<lbrakk>Zs = M0 + {#a#}; Ys = M0 + K; a \<notin># K; \<And>b. b \<in># K \<Longrightarrow> gt a b\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by force
[PROOF STATE]
proof (state)
this:
Zs = M0 + {#a#}
Ys = M0 + K
a \<notin># K
?b \<in># K \<Longrightarrow> gt a ?b
goal (1 subgoal):
1. \<And>y z. \<lbrakk>(mset xs, y) \<in> (mult1 {(x, y). gt y x})\<^sup>+; (y, z) \<in> mult1 {(x, y). gt y x}; mset xs \<noteq> y \<and> (\<forall>x. count y x < count (mset xs) x \<longrightarrow> (\<exists>ya. gt ya x \<and> count (mset xs) ya < count y ya))\<rbrakk> \<Longrightarrow> mset xs \<noteq> z \<and> (\<forall>x. count z x < count (mset xs) x \<longrightarrow> (\<exists>y. gt y x \<and> count (mset xs) y < count z y))
[PROOF STEP]
have "mset xs \<noteq> Zs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. mset xs \<noteq> Zs
[PROOF STEP]
proof (cases "K = {#}")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. K = {#} \<Longrightarrow> mset xs \<noteq> Zs
2. K \<noteq> {#} \<Longrightarrow> mset xs \<noteq> Zs
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
K = {#}
goal (2 subgoals):
1. K = {#} \<Longrightarrow> mset xs \<noteq> Zs
2. K \<noteq> {#} \<Longrightarrow> mset xs \<noteq> Zs
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
K = {#}
goal (1 subgoal):
1. mset xs \<noteq> Zs
[PROOF STEP]
using \<open>mset xs \<noteq> Ys\<close> ** *(1,2) irrefl[unfolded irreflp_def]
[PROOF STATE]
proof (prove)
using this:
K = {#}
mset xs \<noteq> Ys
count Ys ?x < count (mset xs) ?x \<Longrightarrow> \<exists>y. gt y ?x \<and> count (mset xs) y < count Ys y
Zs = M0 + {#a#}
Ys = M0 + K
\<forall>a. \<not> gt a a
goal (1 subgoal):
1. mset xs \<noteq> Zs
[PROOF STEP]
by (metis One_nat_def add.comm_neutral count_single diff_union_cancelL lessI
minus_multiset.rep_eq not_add_less2 plus_multiset.rep_eq union_commute zero_less_diff)
[PROOF STATE]
proof (state)
this:
mset xs \<noteq> Zs
goal (1 subgoal):
1. K \<noteq> {#} \<Longrightarrow> mset xs \<noteq> Zs
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. K \<noteq> {#} \<Longrightarrow> mset xs \<noteq> Zs
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
K \<noteq> {#}
goal (1 subgoal):
1. K \<noteq> {#} \<Longrightarrow> mset xs \<noteq> Zs
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
K \<noteq> {#}
goal (1 subgoal):
1. mset xs \<noteq> Zs
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. K \<noteq> {#} \<Longrightarrow> mset xs \<noteq> Zs
[PROOF STEP]
obtain aa :: "'a \<Rightarrow> 'a" where
f1: "\<forall>a. \<not> count Ys a < count (mset xs) a \<or> gt (aa a) a \<and>
count (mset xs) (aa a) < count Ys (aa a)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>aa. \<forall>a. \<not> count Ys a < count (mset xs) a \<or> gt (aa a) a \<and> count (mset xs) (aa a) < count Ys (aa a) \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using "**"
[PROOF STATE]
proof (prove)
using this:
count Ys ?x < count (mset xs) ?x \<Longrightarrow> \<exists>y. gt y ?x \<and> count (mset xs) y < count Ys y
goal (1 subgoal):
1. (\<And>aa. \<forall>a. \<not> count Ys a < count (mset xs) a \<or> gt (aa a) a \<and> count (mset xs) (aa a) < count Ys (aa a) \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by moura
[PROOF STATE]
proof (state)
this:
\<forall>a. \<not> count Ys a < count (mset xs) a \<or> gt (aa a) a \<and> count (mset xs) (aa a) < count Ys (aa a)
goal (1 subgoal):
1. K \<noteq> {#} \<Longrightarrow> mset xs \<noteq> Zs
[PROOF STEP]
have f2: "K + M0 = Ys"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. K + M0 = Ys
[PROOF STEP]
using "*"(2) union_ac(2)
[PROOF STATE]
proof (prove)
using this:
Ys = M0 + K
?M + ?N = ?N + ?M
goal (1 subgoal):
1. K + M0 = Ys
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
K + M0 = Ys
goal (1 subgoal):
1. K \<noteq> {#} \<Longrightarrow> mset xs \<noteq> Zs
[PROOF STEP]
have f3: "\<And>aa. count Zs aa = count M0 aa + count {#a#} aa"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>aa. count Zs aa = count M0 aa + count {#a#} aa
[PROOF STEP]
by (simp add: "*"(1))
[PROOF STATE]
proof (state)
this:
count Zs ?aa = count M0 ?aa + count {#a#} ?aa
goal (1 subgoal):
1. K \<noteq> {#} \<Longrightarrow> mset xs \<noteq> Zs
[PROOF STEP]
have f4: "\<And>a. count Ys a = count K a + count M0 a"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>a. count Ys a = count K a + count M0 a
[PROOF STEP]
using f2
[PROOF STATE]
proof (prove)
using this:
K + M0 = Ys
goal (1 subgoal):
1. \<And>a. count Ys a = count K a + count M0 a
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
count Ys ?a = count K ?a + count M0 ?a
goal (1 subgoal):
1. K \<noteq> {#} \<Longrightarrow> mset xs \<noteq> Zs
[PROOF STEP]
have f5: "count K a = 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. count K a = 0
[PROOF STEP]
by (meson "*"(3) count_inI)
[PROOF STATE]
proof (state)
this:
count K a = 0
goal (1 subgoal):
1. K \<noteq> {#} \<Longrightarrow> mset xs \<noteq> Zs
[PROOF STEP]
have "Zs - M0 = {#a#}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Zs - M0 = {#a#}
[PROOF STEP]
using "*"(1) add_diff_cancel_left'
[PROOF STATE]
proof (prove)
using this:
Zs = M0 + {#a#}
?a + ?b - ?a = ?b
goal (1 subgoal):
1. Zs - M0 = {#a#}
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
Zs - M0 = {#a#}
goal (1 subgoal):
1. K \<noteq> {#} \<Longrightarrow> mset xs \<noteq> Zs
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
Zs - M0 = {#a#}
[PROOF STEP]
have f6: "count M0 a < count Zs a"
[PROOF STATE]
proof (prove)
using this:
Zs - M0 = {#a#}
goal (1 subgoal):
1. count M0 a < count Zs a
[PROOF STEP]
by (metis in_diff_count union_single_eq_member)
[PROOF STATE]
proof (state)
this:
count M0 a < count Zs a
goal (1 subgoal):
1. K \<noteq> {#} \<Longrightarrow> mset xs \<noteq> Zs
[PROOF STEP]
have "\<And>m. count m a = 0 + count m a"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>m. count m a = 0 + count m a
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
count ?m a = 0 + count ?m a
goal (1 subgoal):
1. K \<noteq> {#} \<Longrightarrow> mset xs \<noteq> Zs
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
count ?m a = 0 + count ?m a
goal (1 subgoal):
1. K \<noteq> {#} \<Longrightarrow> mset xs \<noteq> Zs
[PROOF STEP]
{
[PROOF STATE]
proof (state)
this:
count ?m a = 0 + count ?m a
goal (1 subgoal):
1. K \<noteq> {#} \<Longrightarrow> mset xs \<noteq> Zs
[PROOF STEP]
assume "aa a \<noteq> a"
[PROOF STATE]
proof (state)
this:
aa a \<noteq> a
goal (1 subgoal):
1. K \<noteq> {#} \<Longrightarrow> mset xs \<noteq> Zs
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
aa a \<noteq> a
[PROOF STEP]
have "mset xs = Zs \<and> count Zs (aa a) < count K (aa a) + count M0 (aa a) \<longrightarrow>
count K (aa a) + count M0 (aa a) < count Zs (aa a)"
[PROOF STATE]
proof (prove)
using this:
aa a \<noteq> a
goal (1 subgoal):
1. mset xs = Zs \<and> count Zs (aa a) < count K (aa a) + count M0 (aa a) \<longrightarrow> count K (aa a) + count M0 (aa a) < count Zs (aa a)
[PROOF STEP]
using f5 f3 f2 f1 "*"(4) asym
[PROOF STATE]
proof (prove)
using this:
aa a \<noteq> a
count K a = 0
count Zs ?aa = count M0 ?aa + count {#a#} ?aa
K + M0 = Ys
\<forall>a. \<not> count Ys a < count (mset xs) a \<or> gt (aa a) a \<and> count (mset xs) (aa a) < count Ys (aa a)
?b \<in># K \<Longrightarrow> gt a ?b
antisymp gt
goal (1 subgoal):
1. mset xs = Zs \<and> count Zs (aa a) < count K (aa a) + count M0 (aa a) \<longrightarrow> count K (aa a) + count M0 (aa a) < count Zs (aa a)
[PROOF STEP]
by (auto dest!: antisympD)
[PROOF STATE]
proof (state)
this:
mset xs = Zs \<and> count Zs (aa a) < count K (aa a) + count M0 (aa a) \<longrightarrow> count K (aa a) + count M0 (aa a) < count Zs (aa a)
goal (1 subgoal):
1. K \<noteq> {#} \<Longrightarrow> mset xs \<noteq> Zs
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
aa a \<noteq> a \<Longrightarrow> mset xs = Zs \<and> count Zs (aa a) < count K (aa a) + count M0 (aa a) \<longrightarrow> count K (aa a) + count M0 (aa a) < count Zs (aa a)
goal (1 subgoal):
1. K \<noteq> {#} \<Longrightarrow> mset xs \<noteq> Zs
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
count ?m a = 0 + count ?m a
aa a \<noteq> a \<Longrightarrow> mset xs = Zs \<and> count Zs (aa a) < count K (aa a) + count M0 (aa a) \<longrightarrow> count K (aa a) + count M0 (aa a) < count Zs (aa a)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
count ?m a = 0 + count ?m a
aa a \<noteq> a \<Longrightarrow> mset xs = Zs \<and> count Zs (aa a) < count K (aa a) + count M0 (aa a) \<longrightarrow> count K (aa a) + count M0 (aa a) < count Zs (aa a)
goal (1 subgoal):
1. mset xs \<noteq> Zs
[PROOF STEP]
using f6 f5 f4 f1
[PROOF STATE]
proof (prove)
using this:
count ?m a = 0 + count ?m a
aa a \<noteq> a \<Longrightarrow> mset xs = Zs \<and> count Zs (aa a) < count K (aa a) + count M0 (aa a) \<longrightarrow> count K (aa a) + count M0 (aa a) < count Zs (aa a)
count M0 a < count Zs a
count K a = 0
count Ys ?a = count K ?a + count M0 ?a
\<forall>a. \<not> count Ys a < count (mset xs) a \<or> gt (aa a) a \<and> count (mset xs) (aa a) < count Ys (aa a)
goal (1 subgoal):
1. mset xs \<noteq> Zs
[PROOF STEP]
by (metis less_imp_not_less)
[PROOF STATE]
proof (state)
this:
mset xs \<noteq> Zs
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
mset xs \<noteq> Zs
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
mset xs \<noteq> Zs
goal (1 subgoal):
1. \<And>y z. \<lbrakk>(mset xs, y) \<in> (mult1 {(x, y). gt y x})\<^sup>+; (y, z) \<in> mult1 {(x, y). gt y x}; mset xs \<noteq> y \<and> (\<forall>x. count y x < count (mset xs) x \<longrightarrow> (\<exists>ya. gt ya x \<and> count (mset xs) ya < count y ya))\<rbrakk> \<Longrightarrow> mset xs \<noteq> z \<and> (\<forall>x. count z x < count (mset xs) x \<longrightarrow> (\<exists>y. gt y x \<and> count (mset xs) y < count z y))
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
mset xs \<noteq> Zs
goal (1 subgoal):
1. \<And>y z. \<lbrakk>(mset xs, y) \<in> (mult1 {(x, y). gt y x})\<^sup>+; (y, z) \<in> mult1 {(x, y). gt y x}; mset xs \<noteq> y \<and> (\<forall>x. count y x < count (mset xs) x \<longrightarrow> (\<exists>ya. gt ya x \<and> count (mset xs) ya < count y ya))\<rbrakk> \<Longrightarrow> mset xs \<noteq> z \<and> (\<forall>x. count z x < count (mset xs) x \<longrightarrow> (\<exists>y. gt y x \<and> count (mset xs) y < count z y))
[PROOF STEP]
{
[PROOF STATE]
proof (state)
this:
mset xs \<noteq> Zs
goal (1 subgoal):
1. \<And>y z. \<lbrakk>(mset xs, y) \<in> (mult1 {(x, y). gt y x})\<^sup>+; (y, z) \<in> mult1 {(x, y). gt y x}; mset xs \<noteq> y \<and> (\<forall>x. count y x < count (mset xs) x \<longrightarrow> (\<exists>ya. gt ya x \<and> count (mset xs) ya < count y ya))\<rbrakk> \<Longrightarrow> mset xs \<noteq> z \<and> (\<forall>x. count z x < count (mset xs) x \<longrightarrow> (\<exists>y. gt y x \<and> count (mset xs) y < count z y))
[PROOF STEP]
assume "count Zs a \<le> count (mset xs) a"
[PROOF STATE]
proof (state)
this:
count Zs a \<le> count (mset xs) a
goal (1 subgoal):
1. \<And>y z. \<lbrakk>(mset xs, y) \<in> (mult1 {(x, y). gt y x})\<^sup>+; (y, z) \<in> mult1 {(x, y). gt y x}; mset xs \<noteq> y \<and> (\<forall>x. count y x < count (mset xs) x \<longrightarrow> (\<exists>ya. gt ya x \<and> count (mset xs) ya < count y ya))\<rbrakk> \<Longrightarrow> mset xs \<noteq> z \<and> (\<forall>x. count z x < count (mset xs) x \<longrightarrow> (\<exists>y. gt y x \<and> count (mset xs) y < count z y))
[PROOF STEP]
with \<open>a \<notin># K\<close>
[PROOF STATE]
proof (chain)
picking this:
a \<notin># K
count Zs a \<le> count (mset xs) a
[PROOF STEP]
have "count Ys a < count (mset xs) a"
[PROOF STATE]
proof (prove)
using this:
a \<notin># K
count Zs a \<le> count (mset xs) a
goal (1 subgoal):
1. count Ys a < count (mset xs) a
[PROOF STEP]
unfolding *(1,2)
[PROOF STATE]
proof (prove)
using this:
a \<notin># K
count (M0 + {#a#}) a \<le> count (mset xs) a
goal (1 subgoal):
1. count (M0 + K) a < count (mset xs) a
[PROOF STEP]
by (auto simp add: not_in_iff)
[PROOF STATE]
proof (state)
this:
count Ys a < count (mset xs) a
goal (1 subgoal):
1. \<And>y z. \<lbrakk>(mset xs, y) \<in> (mult1 {(x, y). gt y x})\<^sup>+; (y, z) \<in> mult1 {(x, y). gt y x}; mset xs \<noteq> y \<and> (\<forall>x. count y x < count (mset xs) x \<longrightarrow> (\<exists>ya. gt ya x \<and> count (mset xs) ya < count y ya))\<rbrakk> \<Longrightarrow> mset xs \<noteq> z \<and> (\<forall>x. count z x < count (mset xs) x \<longrightarrow> (\<exists>y. gt y x \<and> count (mset xs) y < count z y))
[PROOF STEP]
with **
[PROOF STATE]
proof (chain)
picking this:
count Ys ?x < count (mset xs) ?x \<Longrightarrow> \<exists>y. gt y ?x \<and> count (mset xs) y < count Ys y
count Ys a < count (mset xs) a
[PROOF STEP]
obtain z where z: "gt z a" "count (mset xs) z < count Ys z"
[PROOF STATE]
proof (prove)
using this:
count Ys ?x < count (mset xs) ?x \<Longrightarrow> \<exists>y. gt y ?x \<and> count (mset xs) y < count Ys y
count Ys a < count (mset xs) a
goal (1 subgoal):
1. (\<And>z. \<lbrakk>gt z a; count (mset xs) z < count Ys z\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
gt z a
count (mset xs) z < count Ys z
goal (1 subgoal):
1. \<And>y z. \<lbrakk>(mset xs, y) \<in> (mult1 {(x, y). gt y x})\<^sup>+; (y, z) \<in> mult1 {(x, y). gt y x}; mset xs \<noteq> y \<and> (\<forall>x. count y x < count (mset xs) x \<longrightarrow> (\<exists>ya. gt ya x \<and> count (mset xs) ya < count y ya))\<rbrakk> \<Longrightarrow> mset xs \<noteq> z \<and> (\<forall>x. count z x < count (mset xs) x \<longrightarrow> (\<exists>y. gt y x \<and> count (mset xs) y < count z y))
[PROOF STEP]
with *
[PROOF STATE]
proof (chain)
picking this:
Zs = M0 + {#a#}
Ys = M0 + K
a \<notin># K
?b \<in># K \<Longrightarrow> gt a ?b
gt z a
count (mset xs) z < count Ys z
[PROOF STEP]
have "count Ys z \<le> count Zs z"
[PROOF STATE]
proof (prove)
using this:
Zs = M0 + {#a#}
Ys = M0 + K
a \<notin># K
?b \<in># K \<Longrightarrow> gt a ?b
gt z a
count (mset xs) z < count Ys z
goal (1 subgoal):
1. count Ys z \<le> count Zs z
[PROOF STEP]
using asym
[PROOF STATE]
proof (prove)
using this:
Zs = M0 + {#a#}
Ys = M0 + K
a \<notin># K
?b \<in># K \<Longrightarrow> gt a ?b
gt z a
count (mset xs) z < count Ys z
antisymp gt
goal (1 subgoal):
1. count Ys z \<le> count Zs z
[PROOF STEP]
by (auto simp: intro: count_inI dest: antisympD)
[PROOF STATE]
proof (state)
this:
count Ys z \<le> count Zs z
goal (1 subgoal):
1. \<And>y z. \<lbrakk>(mset xs, y) \<in> (mult1 {(x, y). gt y x})\<^sup>+; (y, z) \<in> mult1 {(x, y). gt y x}; mset xs \<noteq> y \<and> (\<forall>x. count y x < count (mset xs) x \<longrightarrow> (\<exists>ya. gt ya x \<and> count (mset xs) ya < count y ya))\<rbrakk> \<Longrightarrow> mset xs \<noteq> z \<and> (\<forall>x. count z x < count (mset xs) x \<longrightarrow> (\<exists>y. gt y x \<and> count (mset xs) y < count z y))
[PROOF STEP]
with z
[PROOF STATE]
proof (chain)
picking this:
gt z a
count (mset xs) z < count Ys z
count Ys z \<le> count Zs z
[PROOF STEP]
have "\<exists>z. gt z a \<and> count (mset xs) z < count Zs z"
[PROOF STATE]
proof (prove)
using this:
gt z a
count (mset xs) z < count Ys z
count Ys z \<le> count Zs z
goal (1 subgoal):
1. \<exists>z. gt z a \<and> count (mset xs) z < count Zs z
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<exists>z. gt z a \<and> count (mset xs) z < count Zs z
goal (1 subgoal):
1. \<And>y z. \<lbrakk>(mset xs, y) \<in> (mult1 {(x, y). gt y x})\<^sup>+; (y, z) \<in> mult1 {(x, y). gt y x}; mset xs \<noteq> y \<and> (\<forall>x. count y x < count (mset xs) x \<longrightarrow> (\<exists>ya. gt ya x \<and> count (mset xs) ya < count y ya))\<rbrakk> \<Longrightarrow> mset xs \<noteq> z \<and> (\<forall>x. count z x < count (mset xs) x \<longrightarrow> (\<exists>y. gt y x \<and> count (mset xs) y < count z y))
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
count Zs a \<le> count (mset xs) a \<Longrightarrow> \<exists>z. gt z a \<and> count (mset xs) z < count Zs z
goal (1 subgoal):
1. \<And>y z. \<lbrakk>(mset xs, y) \<in> (mult1 {(x, y). gt y x})\<^sup>+; (y, z) \<in> mult1 {(x, y). gt y x}; mset xs \<noteq> y \<and> (\<forall>x. count y x < count (mset xs) x \<longrightarrow> (\<exists>ya. gt ya x \<and> count (mset xs) ya < count y ya))\<rbrakk> \<Longrightarrow> mset xs \<noteq> z \<and> (\<forall>x. count z x < count (mset xs) x \<longrightarrow> (\<exists>y. gt y x \<and> count (mset xs) y < count z y))
[PROOF STEP]
note count_a = this
[PROOF STATE]
proof (state)
this:
count Zs a \<le> count (mset xs) a \<Longrightarrow> \<exists>z. gt z a \<and> count (mset xs) z < count Zs z
goal (1 subgoal):
1. \<And>y z. \<lbrakk>(mset xs, y) \<in> (mult1 {(x, y). gt y x})\<^sup>+; (y, z) \<in> mult1 {(x, y). gt y x}; mset xs \<noteq> y \<and> (\<forall>x. count y x < count (mset xs) x \<longrightarrow> (\<exists>ya. gt ya x \<and> count (mset xs) ya < count y ya))\<rbrakk> \<Longrightarrow> mset xs \<noteq> z \<and> (\<forall>x. count z x < count (mset xs) x \<longrightarrow> (\<exists>y. gt y x \<and> count (mset xs) y < count z y))
[PROOF STEP]
{
[PROOF STATE]
proof (state)
this:
count Zs a \<le> count (mset xs) a \<Longrightarrow> \<exists>z. gt z a \<and> count (mset xs) z < count Zs z
goal (1 subgoal):
1. \<And>y z. \<lbrakk>(mset xs, y) \<in> (mult1 {(x, y). gt y x})\<^sup>+; (y, z) \<in> mult1 {(x, y). gt y x}; mset xs \<noteq> y \<and> (\<forall>x. count y x < count (mset xs) x \<longrightarrow> (\<exists>ya. gt ya x \<and> count (mset xs) ya < count y ya))\<rbrakk> \<Longrightarrow> mset xs \<noteq> z \<and> (\<forall>x. count z x < count (mset xs) x \<longrightarrow> (\<exists>y. gt y x \<and> count (mset xs) y < count z y))
[PROOF STEP]
fix y
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>y z. \<lbrakk>(mset xs, y) \<in> (mult1 {(x, y). gt y x})\<^sup>+; (y, z) \<in> mult1 {(x, y). gt y x}; mset xs \<noteq> y \<and> (\<forall>x. count y x < count (mset xs) x \<longrightarrow> (\<exists>ya. gt ya x \<and> count (mset xs) ya < count y ya))\<rbrakk> \<Longrightarrow> mset xs \<noteq> z \<and> (\<forall>x. count z x < count (mset xs) x \<longrightarrow> (\<exists>y. gt y x \<and> count (mset xs) y < count z y))
[PROOF STEP]
assume count_y: "count Zs y < count (mset xs) y"
[PROOF STATE]
proof (state)
this:
count Zs y < count (mset xs) y
goal (1 subgoal):
1. \<And>y z. \<lbrakk>(mset xs, y) \<in> (mult1 {(x, y). gt y x})\<^sup>+; (y, z) \<in> mult1 {(x, y). gt y x}; mset xs \<noteq> y \<and> (\<forall>x. count y x < count (mset xs) x \<longrightarrow> (\<exists>ya. gt ya x \<and> count (mset xs) ya < count y ya))\<rbrakk> \<Longrightarrow> mset xs \<noteq> z \<and> (\<forall>x. count z x < count (mset xs) x \<longrightarrow> (\<exists>y. gt y x \<and> count (mset xs) y < count z y))
[PROOF STEP]
have "\<exists>x. gt x y \<and> count (mset xs) x < count Zs x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>x. gt x y \<and> count (mset xs) x < count Zs x
[PROOF STEP]
proof (cases "y = a")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. y = a \<Longrightarrow> \<exists>x. gt x y \<and> count (mset xs) x < count Zs x
2. y \<noteq> a \<Longrightarrow> \<exists>x. gt x y \<and> count (mset xs) x < count Zs x
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
y = a
goal (2 subgoals):
1. y = a \<Longrightarrow> \<exists>x. gt x y \<and> count (mset xs) x < count Zs x
2. y \<noteq> a \<Longrightarrow> \<exists>x. gt x y \<and> count (mset xs) x < count Zs x
[PROOF STEP]
with count_y count_a
[PROOF STATE]
proof (chain)
picking this:
count Zs y < count (mset xs) y
count Zs a \<le> count (mset xs) a \<Longrightarrow> \<exists>z. gt z a \<and> count (mset xs) z < count Zs z
y = a
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
count Zs y < count (mset xs) y
count Zs a \<le> count (mset xs) a \<Longrightarrow> \<exists>z. gt z a \<and> count (mset xs) z < count Zs z
y = a
goal (1 subgoal):
1. \<exists>x. gt x y \<and> count (mset xs) x < count Zs x
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<exists>x. gt x y \<and> count (mset xs) x < count Zs x
goal (1 subgoal):
1. y \<noteq> a \<Longrightarrow> \<exists>x. gt x y \<and> count (mset xs) x < count Zs x
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. y \<noteq> a \<Longrightarrow> \<exists>x. gt x y \<and> count (mset xs) x < count Zs x
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
y \<noteq> a
goal (1 subgoal):
1. y \<noteq> a \<Longrightarrow> \<exists>x. gt x y \<and> count (mset xs) x < count Zs x
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>x. gt x y \<and> count (mset xs) x < count Zs x
[PROOF STEP]
proof (cases "y \<in># K")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. y \<in># K \<Longrightarrow> \<exists>x. gt x y \<and> count (mset xs) x < count Zs x
2. y \<notin># K \<Longrightarrow> \<exists>x. gt x y \<and> count (mset xs) x < count Zs x
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
y \<in># K
goal (2 subgoals):
1. y \<in># K \<Longrightarrow> \<exists>x. gt x y \<and> count (mset xs) x < count Zs x
2. y \<notin># K \<Longrightarrow> \<exists>x. gt x y \<and> count (mset xs) x < count Zs x
[PROOF STEP]
with *(4)
[PROOF STATE]
proof (chain)
picking this:
?b \<in># K \<Longrightarrow> gt a ?b
y \<in># K
[PROOF STEP]
have "gt a y"
[PROOF STATE]
proof (prove)
using this:
?b \<in># K \<Longrightarrow> gt a ?b
y \<in># K
goal (1 subgoal):
1. gt a y
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
gt a y
goal (2 subgoals):
1. y \<in># K \<Longrightarrow> \<exists>x. gt x y \<and> count (mset xs) x < count Zs x
2. y \<notin># K \<Longrightarrow> \<exists>x. gt x y \<and> count (mset xs) x < count Zs x
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
gt a y
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
gt a y
goal (1 subgoal):
1. \<exists>x. gt x y \<and> count (mset xs) x < count Zs x
[PROOF STEP]
by (cases "count Zs a \<le> count (mset xs) a",
blast dest: count_a trans[unfolded transp_def, rule_format], auto dest: count_a)
[PROOF STATE]
proof (state)
this:
\<exists>x. gt x y \<and> count (mset xs) x < count Zs x
goal (1 subgoal):
1. y \<notin># K \<Longrightarrow> \<exists>x. gt x y \<and> count (mset xs) x < count Zs x
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. y \<notin># K \<Longrightarrow> \<exists>x. gt x y \<and> count (mset xs) x < count Zs x
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
y \<notin># K
goal (1 subgoal):
1. y \<notin># K \<Longrightarrow> \<exists>x. gt x y \<and> count (mset xs) x < count Zs x
[PROOF STEP]
with \<open>y \<noteq> a\<close>
[PROOF STATE]
proof (chain)
picking this:
y \<noteq> a
y \<notin># K
[PROOF STEP]
have "count Zs y = count Ys y"
[PROOF STATE]
proof (prove)
using this:
y \<noteq> a
y \<notin># K
goal (1 subgoal):
1. count Zs y = count Ys y
[PROOF STEP]
unfolding *(1,2)
[PROOF STATE]
proof (prove)
using this:
y \<noteq> a
y \<notin># K
goal (1 subgoal):
1. count (M0 + {#a#}) y = count (M0 + K) y
[PROOF STEP]
by (simp add: not_in_iff)
[PROOF STATE]
proof (state)
this:
count Zs y = count Ys y
goal (1 subgoal):
1. y \<notin># K \<Longrightarrow> \<exists>x. gt x y \<and> count (mset xs) x < count Zs x
[PROOF STEP]
with count_y **
[PROOF STATE]
proof (chain)
picking this:
count Zs y < count (mset xs) y
count Ys ?x < count (mset xs) ?x \<Longrightarrow> \<exists>y. gt y ?x \<and> count (mset xs) y < count Ys y
count Zs y = count Ys y
[PROOF STEP]
obtain z where z: "gt z y" "count (mset xs) z < count Ys z"
[PROOF STATE]
proof (prove)
using this:
count Zs y < count (mset xs) y
count Ys ?x < count (mset xs) ?x \<Longrightarrow> \<exists>y. gt y ?x \<and> count (mset xs) y < count Ys y
count Zs y = count Ys y
goal (1 subgoal):
1. (\<And>z. \<lbrakk>gt z y; count (mset xs) z < count Ys z\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
gt z y
count (mset xs) z < count Ys z
goal (1 subgoal):
1. y \<notin># K \<Longrightarrow> \<exists>x. gt x y \<and> count (mset xs) x < count Zs x
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>x. gt x y \<and> count (mset xs) x < count Zs x
[PROOF STEP]
proof (cases "z \<in># K")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. z \<in># K \<Longrightarrow> \<exists>x. gt x y \<and> count (mset xs) x < count Zs x
2. z \<notin># K \<Longrightarrow> \<exists>x. gt x y \<and> count (mset xs) x < count Zs x
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
z \<in># K
goal (2 subgoals):
1. z \<in># K \<Longrightarrow> \<exists>x. gt x y \<and> count (mset xs) x < count Zs x
2. z \<notin># K \<Longrightarrow> \<exists>x. gt x y \<and> count (mset xs) x < count Zs x
[PROOF STEP]
with *(4)
[PROOF STATE]
proof (chain)
picking this:
?b \<in># K \<Longrightarrow> gt a ?b
z \<in># K
[PROOF STEP]
have "gt a z"
[PROOF STATE]
proof (prove)
using this:
?b \<in># K \<Longrightarrow> gt a ?b
z \<in># K
goal (1 subgoal):
1. gt a z
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
gt a z
goal (2 subgoals):
1. z \<in># K \<Longrightarrow> \<exists>x. gt x y \<and> count (mset xs) x < count Zs x
2. z \<notin># K \<Longrightarrow> \<exists>x. gt x y \<and> count (mset xs) x < count Zs x
[PROOF STEP]
with z(1)
[PROOF STATE]
proof (chain)
picking this:
gt z y
gt a z
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
gt z y
gt a z
goal (1 subgoal):
1. \<exists>x. gt x y \<and> count (mset xs) x < count Zs x
[PROOF STEP]
by (cases "count Zs a \<le> count (mset xs) a")
(blast dest: count_a not_le_imp_less trans[unfolded transp_def, rule_format])+
[PROOF STATE]
proof (state)
this:
\<exists>x. gt x y \<and> count (mset xs) x < count Zs x
goal (1 subgoal):
1. z \<notin># K \<Longrightarrow> \<exists>x. gt x y \<and> count (mset xs) x < count Zs x
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. z \<notin># K \<Longrightarrow> \<exists>x. gt x y \<and> count (mset xs) x < count Zs x
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
z \<notin># K
goal (1 subgoal):
1. z \<notin># K \<Longrightarrow> \<exists>x. gt x y \<and> count (mset xs) x < count Zs x
[PROOF STEP]
with \<open>a \<notin># K\<close>
[PROOF STATE]
proof (chain)
picking this:
a \<notin># K
z \<notin># K
[PROOF STEP]
have "count Ys z \<le> count Zs z"
[PROOF STATE]
proof (prove)
using this:
a \<notin># K
z \<notin># K
goal (1 subgoal):
1. count Ys z \<le> count Zs z
[PROOF STEP]
unfolding *
[PROOF STATE]
proof (prove)
using this:
a \<notin># K
z \<notin># K
goal (1 subgoal):
1. count (M0 + K) z \<le> count (M0 + {#a#}) z
[PROOF STEP]
by (auto simp add: not_in_iff)
[PROOF STATE]
proof (state)
this:
count Ys z \<le> count Zs z
goal (1 subgoal):
1. z \<notin># K \<Longrightarrow> \<exists>x. gt x y \<and> count (mset xs) x < count Zs x
[PROOF STEP]
with z
[PROOF STATE]
proof (chain)
picking this:
gt z y
count (mset xs) z < count Ys z
count Ys z \<le> count Zs z
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
gt z y
count (mset xs) z < count Ys z
count Ys z \<le> count Zs z
goal (1 subgoal):
1. \<exists>x. gt x y \<and> count (mset xs) x < count Zs x
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<exists>x. gt x y \<and> count (mset xs) x < count Zs x
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<exists>x. gt x y \<and> count (mset xs) x < count Zs x
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<exists>x. gt x y \<and> count (mset xs) x < count Zs x
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<exists>x. gt x y \<and> count (mset xs) x < count Zs x
goal (1 subgoal):
1. \<And>y z. \<lbrakk>(mset xs, y) \<in> (mult1 {(x, y). gt y x})\<^sup>+; (y, z) \<in> mult1 {(x, y). gt y x}; mset xs \<noteq> y \<and> (\<forall>x. count y x < count (mset xs) x \<longrightarrow> (\<exists>ya. gt ya x \<and> count (mset xs) ya < count y ya))\<rbrakk> \<Longrightarrow> mset xs \<noteq> z \<and> (\<forall>x. count z x < count (mset xs) x \<longrightarrow> (\<exists>y. gt y x \<and> count (mset xs) y < count z y))
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
count Zs ?y2 < count (mset xs) ?y2 \<Longrightarrow> \<exists>x. gt x ?y2 \<and> count (mset xs) x < count Zs x
goal (1 subgoal):
1. \<And>y z. \<lbrakk>(mset xs, y) \<in> (mult1 {(x, y). gt y x})\<^sup>+; (y, z) \<in> mult1 {(x, y). gt y x}; mset xs \<noteq> y \<and> (\<forall>x. count y x < count (mset xs) x \<longrightarrow> (\<exists>ya. gt ya x \<and> count (mset xs) ya < count y ya))\<rbrakk> \<Longrightarrow> mset xs \<noteq> z \<and> (\<forall>x. count z x < count (mset xs) x \<longrightarrow> (\<exists>y. gt y x \<and> count (mset xs) y < count z y))
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
mset xs \<noteq> Zs
count Zs ?y2 < count (mset xs) ?y2 \<Longrightarrow> \<exists>x. gt x ?y2 \<and> count (mset xs) x < count Zs x
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
mset xs \<noteq> Zs
count Zs ?y2 < count (mset xs) ?y2 \<Longrightarrow> \<exists>x. gt x ?y2 \<and> count (mset xs) x < count Zs x
goal (1 subgoal):
1. mset xs \<noteq> Zs \<and> (\<forall>x. count Zs x < count (mset xs) x \<longrightarrow> (\<exists>y. gt y x \<and> count (mset xs) y < count Zs y))
[PROOF STEP]
unfolding msetext_huet_def Let_def
[PROOF STATE]
proof (prove)
using this:
mset xs \<noteq> Zs
count Zs ?y2 < count (mset xs) ?y2 \<Longrightarrow> \<exists>x. gt x ?y2 \<and> count (mset xs) x < count Zs x
goal (1 subgoal):
1. mset xs \<noteq> Zs \<and> (\<forall>x. count Zs x < count (mset xs) x \<longrightarrow> (\<exists>y. gt y x \<and> count (mset xs) y < count Zs y))
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
mset xs \<noteq> Zs \<and> (\<forall>x. count Zs x < count (mset xs) x \<longrightarrow> (\<exists>y. gt y x \<and> count (mset xs) y < count Zs y))
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 16969, "file": "Lambda_Free_RPOs_Extension_Orders", "length": 140}
|
[STATEMENT]
lemma WTBlock_code:
"\<And>is_lub. \<lbrakk> is_type P T; is_lub,P,E(V \<mapsto> T) \<turnstile> e :: T';
case vo of None \<Rightarrow> True | \<lfloor>v\<rfloor> \<Rightarrow> case typeof v of None \<Rightarrow> False | Some T' \<Rightarrow> P \<turnstile> T' \<le> T \<rbrakk>
\<Longrightarrow> is_lub,P,E \<turnstile> {V:T=vo; e} :: T'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>is_lub. \<lbrakk>is_type P T; is_lub,P,E(V \<mapsto> T) \<turnstile> e :: T'; case vo of None \<Rightarrow> True | \<lfloor>v\<rfloor> \<Rightarrow> case typeof v of None \<Rightarrow> False | \<lfloor>T'\<rfloor> \<Rightarrow> P \<turnstile> T' \<le> T\<rbrakk> \<Longrightarrow> is_lub,P,E \<turnstile> {V:T=vo; e} :: T'
[PROOF STEP]
by(auto)
|
{"llama_tokens": 308, "file": "JinjaThreads_J_WellType", "length": 1}
|
"""
GCN model for relation extraction.
"""
import copy
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from model.BiLSTM import EncoderLSTM
from model.localEncoder import LocalEncoder
from model.classifier import EntityClassifier
class DocClassifier(nn.Module):
def __init__(self, opt, emb_matrix=None):
super().__init__()
self.opt = opt
# Doc Embedding
self.emb = nn.Embedding(opt['vocab_size'], opt['emb_dim'], padding_idx=0)
self.init_embeddings(emb_matrix)
self.ner_embed = nn.Embedding(opt['ner_num'], opt['ner_dim']) if opt['ner_dim'] > 0 else None
self.coref_embed = nn.Embedding(opt['max_len'], opt['coref_dim']) if opt['coref_dim'] > 0 else None
self.in_drop = nn.Dropout(opt['input_dropout'])
# Global Encoder
in_dim = opt['emb_dim'] + opt['ner_dim'] + opt['coref_dim']
self.global_encoder = EncoderLSTM(in_dim, opt['hidden_dim'] // 2, 1, True, True, opt['rnn_dropout'], False, True)
self.rnn_drop = nn.Dropout(opt['rnn_dropout'])
self.entity_classifier = EntityClassifier(opt['hidden_dim'], opt['num_class'], opt['mlp_dropout'])
# Local Encoder
self.local_encoder = LocalEncoder(in_dim, opt['hidden_dim'], opt['input_dropout'], opt['rnn_dropout'],
self.emb, self.ner_embed, self.coref_embed, opt['max_len'], opt['pos_dim'])
def init_embeddings(self, emb_matrix):
# 决定word embedding中的哪些部分更新,但实际上只有UNK会更新
def keep_partial_grad(grad, topk):
"""
Keep only the topk rows of grads.
"""
assert topk < grad.size(0)
grad.data[topk:].zero_()
return grad
if emb_matrix is None:
self.emb.weight.data[1:, :].uniform_(-1.0, 1.0)
else:
emb_matrix = torch.from_numpy(emb_matrix)
self.emb.weight.data.copy_(emb_matrix)
# decide finetuning
if self.opt['topn'] <= 0:
print("Do not finetune word embedding layer.")
self.emb.weight.requires_grad = False
elif self.opt['topn'] < self.opt['vocab_size']:
print("Finetune top {} word embeddings.".format(self.opt['topn']))
self.emb.weight.register_hook(lambda x: keep_partial_grad(x, self.opt['topn']))
else:
print("Finetune all embeddings.")
def forward(self, sample):
"""
words : (bz, doc_len) int64
ner : (bz, doc_len) int64
coref : (bz, doc_len) int64
length : (bz) int64
head_mask: (bz, doc_len) float32
tail_mask: (bz, doc_len) float32
sent_doc_mp: (bz, sent_num, doc_len) float32
sent_num : (bz, ) int64
support_set: (bz, sent_num) #float32
"""
for k, v in sample.items():
sample[k] = v.cuda()
words, ner, coref, length, head_mask, tail_mask = \
sample['words_id'], sample['ners_id'], sample['coref_id'], sample['doc_len'], \
sample['head_mask'], sample['tail_mask']
# Doc Embedding
word_embs = self.emb(words)
embs = [word_embs]
if self.opt['ner_dim'] > 0:
embs += [self.ner_embed(ner)]
if self.opt['coref_dim'] > 0:
embs += [self.coref_embed(coref)]
embs = torch.cat(embs, dim=2)
embs = self.in_drop(embs)
doc_outputs, last_h = self.global_encoder(embs, length)
doc_outputs = self.rnn_drop(doc_outputs) # (bz, doc_len, hs)
doc_outputs = self.rnn_drop(doc_outputs) # (bz, doc_len, hs)
head_mask.unsqueeze_(1)
tail_mask.unsqueeze_(1)
global_head = torch.matmul(head_mask, doc_outputs).squeeze(1) # (bz, hs)
global_tail = torch.matmul(tail_mask, doc_outputs).squeeze(1) # (bz, hs)
# Document Representation
# Local Encoder
local_head, local_tail = self.local_encoder(sample, doc_outputs, embs, head_mask, tail_mask)
# Classifier
pred = self.entity_classifier(global_head, global_tail, local_head, local_tail, sample['path2ins'])
return pred
|
{"hexsha": "adc7a414e1760ab879dd17e979f8429d47ead70e", "size": 4306, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/model/docBilstm.py", "max_stars_repo_name": "AndrewZhe/Three-Sentences-Are-All-You-Need", "max_stars_repo_head_hexsha": "afad6f9e700c9a95e03ef200718ebee8e18ca016", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 21, "max_stars_repo_stars_event_min_datetime": "2021-06-02T08:47:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T08:21:38.000Z", "max_issues_repo_path": "src/model/docBilstm.py", "max_issues_repo_name": "AndrewZhe/Three-Sentences-Are-All-You-Need", "max_issues_repo_head_hexsha": "afad6f9e700c9a95e03ef200718ebee8e18ca016", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-10-13T01:27:54.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-24T09:16:16.000Z", "max_forks_repo_path": "src/model/docBilstm.py", "max_forks_repo_name": "AndrewZhe/Three-Sentences-Are-All-You-Need", "max_forks_repo_head_hexsha": "afad6f9e700c9a95e03ef200718ebee8e18ca016", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-10-05T18:10:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-21T06:56:48.000Z", "avg_line_length": 37.4434782609, "max_line_length": 121, "alphanum_fraction": 0.5982350209, "include": true, "reason": "import numpy", "num_tokens": 1097}
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2021 Beartype authors.
# See "LICENSE" for further details.
'''
**Beartype PEP-agnostic type hint getter utility unit tests.**
This submodule unit tests the public API of the private
:mod:`beartype._util.hint.utilhintget` submodule.
'''
# ....................{ IMPORTS }....................
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# WARNING: To raise human-readable test errors, avoid importing from
# package-specific submodules at module scope.
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# ....................{ TESTS }....................
# Ignore non-fatal NumPy warnings emitted by this unit test under Python < 3.8.
def test_get_hint_reduced() -> None:
'''
Test the :func:`beartype._util.hint.utilhintget.get_hint_reduced` getter.
'''
# Defer heavyweight imports.
from beartype.roar import (
BeartypeDecorHintNonpepNumpyException,
BeartypeDecorHintNonpepNumpyWarning,
)
from beartype.vale import IsEqual
from beartype._cave._cavefast import NoneType
from beartype._data.hint.pep.sign.datapepsigns import (
HintSignAnnotated,
)
from beartype._util.hint.utilhintget import get_hint_reduced
from beartype._util.hint.pep.utilpepget import get_hint_pep_sign
from beartype._util.py.utilpyversion import IS_PYTHON_AT_LEAST_3_8
from beartype_test.a00_unit.data.hint.pep.proposal.data_pep484 import (
PEP484_GENERICS_IO)
from beartype_test.util.mod.pytmodimport import (
import_module_typing_any_attr_or_none_safe)
from beartype_test.util.mod.pytmodtest import (
is_package_numpy,
is_package_numpy_typing_ndarray_deep,
)
from pytest import raises, warns
# "typing.Annotated" type hint factory imported from either the "typing" or
# "typing_extensions" modules if importable *OR* "None" otherwise.
Annotated = import_module_typing_any_attr_or_none_safe('Annotated')
# Assert this getter preserves a PEP-noncompliant object as is.
assert get_hint_reduced(int) is int
# Assert this getter reduces "None" to "type(None)".
assert get_hint_reduced(None) is NoneType
# If the active Python interpreter targets Python >= 3.8 and thus declares
# the "typing.Protocol" superclass...
if IS_PYTHON_AT_LEAST_3_8:
# Defer version-specific imports.
from typing import Protocol
# For each PEP 484-compliant "typing" IO generic superclass...
for pep484_generic_io in PEP484_GENERICS_IO:
# Equivalent PEP 544-compliant protocol reduced from this generic.
pep544_protocol_io = get_hint_reduced(pep484_generic_io)
# Assert this protocol actually is a protocol.
assert issubclass(pep544_protocol_io, Protocol)
# If this factory is importable, the active Python interpreter supports PEP
# 593. In this case...
if Annotated is not None:
# Assert this getter reduces a beartype-agnostic metahint to the
# lower-level hint it annotates.
assert get_hint_reduced(Annotated[int, 42]) is int
# If the active Python interpreter targets Python >= 3.8 and thus
# supports the __class_getitem__() dunder method required by beartype
# validators...
if IS_PYTHON_AT_LEAST_3_8:
# Assert this getter preserves a beartype-specific metahint as is.
leaves_when_laid = Annotated[
str, IsEqual['In their noonday dreams.']]
assert get_hint_reduced(leaves_when_laid) is leaves_when_laid
# If a recent version of NumPy is importable...
if is_package_numpy():
# Defer third party imports.
from numpy import float64, ndarray
from numpy.typing import NDArray
# If beartype deeply supports "numpy.typing.NDArray" type hints under
# the active Python interpreter...
if is_package_numpy_typing_ndarray_deep():
# Beartype validator reduced from such a hint.
ndarray_reduced = get_hint_reduced(NDArray[float64])
# Assert this validator is a "typing{_extensions}.Annotated" hint.
assert get_hint_pep_sign(ndarray_reduced) is HintSignAnnotated
# Assert that reducing a "numpy.typing.NDArray" type hint
# erroneously subscripted by an object *NOT* reducible to a data
# type raises the expected exception.
with raises(BeartypeDecorHintNonpepNumpyException):
get_hint_reduced(NDArray[
'From my wings are shaken the dews that waken'])
# Else, beartype only shallowly supports "numpy.typing.NDArray" type
# hints under the active Python interpreter. In this case...
else:
# Assert this getter reduces such a hint to the untyped NumPy array
# class "numpy.ndarray" with a non-fatal warning.
with warns(BeartypeDecorHintNonpepNumpyWarning):
assert get_hint_reduced(NDArray[float64]) is ndarray
|
{"hexsha": "e1559ad005747940ec79066d848807ac005c5059", "size": 5234, "ext": "py", "lang": "Python", "max_stars_repo_path": "beartype_test/a00_unit/a00_util/hint/a90_core/test_a00_utilhintget.py", "max_stars_repo_name": "vcokltfre/beartype", "max_stars_repo_head_hexsha": "9a1f81bea37a0eb4f51443dded8cec2e751e42a8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "beartype_test/a00_unit/a00_util/hint/a90_core/test_a00_utilhintget.py", "max_issues_repo_name": "vcokltfre/beartype", "max_issues_repo_head_hexsha": "9a1f81bea37a0eb4f51443dded8cec2e751e42a8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "beartype_test/a00_unit/a00_util/hint/a90_core/test_a00_utilhintget.py", "max_forks_repo_name": "vcokltfre/beartype", "max_forks_repo_head_hexsha": "9a1f81bea37a0eb4f51443dded8cec2e751e42a8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.735042735, "max_line_length": 79, "alphanum_fraction": 0.6518914788, "include": true, "reason": "from numpy", "num_tokens": 1223}
|
import numpy as np
import torch
from base import BaseTrainer
from utils.bbox import Toolbox
class Trainer(BaseTrainer):
"""
Trainer class
Note:
Inherited from BaseTrainer.
self.optimizer is by default handled by BaseTrainer based on config.
"""
def __init__(self, model, loss, metrics, resume, config,
data_loader, toolbox: Toolbox, valid_data_loader=None, train_logger=None):
super(Trainer, self).__init__(model, loss, metrics, resume, config, train_logger)
self.config = config
self.batch_size = data_loader.batch_size
self.data_loader = data_loader
self.valid_data_loader = valid_data_loader
self.valid = True if self.valid_data_loader is not None else False
self.log_step = int(np.sqrt(self.batch_size))
self.toolbox = toolbox
def _to_tensor(self, *tensors):
t = []
for __tensors in tensors:
t.append(__tensors.to(self.device))
return t
def _eval_metrics(self, output, target, mask):
acc_metrics = np.zeros(len(self.metrics))
output = output.cpu().data.numpy()
target = target.cpu().data.numpy()
output = np.argmax(output, axis=1)
for i, metric in enumerate(self.metrics):
acc_metrics[i] += metric(output, target)
return acc_metrics
def _train_epoch(self, epoch):
"""
Training logic for an epoch
:param epoch: Current training epoch.
:return: A log that contains all information you want to save.
Note:
If you have additional information to record, for example:
> additional_log = {"x": x, "y": y}
merge it with log before return. i.e.
> log = {**log, **additional_log}
> return log
The metrics in log must have the key 'metrics'.
"""
self.model.train()
total_loss = 0
total_metrics = np.zeros(len(self.metrics))
for batch_idx, gt in enumerate(self.data_loader):
img, score_map, geo_map, training_mask, transcript = gt
img, score_map, geo_map, training_mask = self._to_tensor(img, score_map, geo_map, training_mask)
recog_map = None
self.optimizer.zero_grad()
pred_score_map, pred_geo_map, pred_recog_map = self.model(img)
loss = self.loss(score_map, pred_score_map, geo_map, pred_geo_map, pred_recog_map, recog_map, training_mask)
loss.backward()
self.optimizer.step()
total_loss += loss.item()
#total_metrics += self._eval_metrics(output, target)
total_metrics += 0
if self.verbosity >= 2 and batch_idx % self.log_step == 0:
self.logger.info('Train Epoch: {} [{}/{} ({:.0f}%)] Loss: {:.6f}'.format(
epoch,
batch_idx * self.data_loader.batch_size,
len(self.data_loader) * self.data_loader.batch_size,
100.0 * batch_idx / len(self.data_loader),
loss.item()))
log = {
'loss': total_loss / len(self.data_loader),
'metrics': (total_metrics / len(self.data_loader)).tolist()
}
if self.valid:
val_log = self._valid_epoch()
log = {**log, **val_log}
return log
def _valid_epoch(self):
"""
Validate after training an epoch
:return: A log that contains information about validation
Note:
The validation metrics in log must have the key 'val_metrics'.
"""
self.model.eval()
total_val_loss = 0
total_val_metrics = np.zeros(len(self.metrics))
with torch.no_grad():
for batch_idx, gt in enumerate(self.valid_data_loader):
img, score_map, geo_map, training_mask, transcript = gt
img, score_map, geo_map, training_mask = self._to_tensor(img, score_map, geo_map, training_mask)
recog_map = None
pred_score_map, pred_geo_map, pred_recog_map = self.model(img)
loss = self.loss(score_map, pred_score_map, geo_map, pred_geo_map, pred_recog_map, recog_map,
training_mask)
total_val_loss += loss.item()
output = (pred_score_map, pred_geo_map, pred_recog_map)
target = (score_map, geo_map, recog_map)
#total_val_metrics += self._eval_metrics(output, target, training_mask) #TODO: should add AP metric
return {
'val_loss': total_val_loss / len(self.valid_data_loader),
'val_metrics': (total_val_metrics / len(self.valid_data_loader)).tolist()
}
|
{"hexsha": "a4f5e59ec269e5851c20f9b92d43377f82976804", "size": 4796, "ext": "py", "lang": "Python", "max_stars_repo_path": "trainer/trainer.py", "max_stars_repo_name": "lijian10086/FOTS.PyTorch", "max_stars_repo_head_hexsha": "3bbebbf08f077bc998b5f0bbc5311e920a5f675d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "trainer/trainer.py", "max_issues_repo_name": "lijian10086/FOTS.PyTorch", "max_issues_repo_head_hexsha": "3bbebbf08f077bc998b5f0bbc5311e920a5f675d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "trainer/trainer.py", "max_forks_repo_name": "lijian10086/FOTS.PyTorch", "max_forks_repo_head_hexsha": "3bbebbf08f077bc998b5f0bbc5311e920a5f675d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.1782945736, "max_line_length": 120, "alphanum_fraction": 0.595704754, "include": true, "reason": "import numpy", "num_tokens": 1034}
|
\documentclass[a4paper,10pt]{article}
\usepackage[utf8]{inputenc}
\usepackage[margin=1in,includefoot,footskip=30pt]{geometry}
\usepackage{tikz}
\usepackage{bbm}
\usepackage{amsmath}
\usepackage{hyperref}
\newcommand\D{\mathrm{d}}
\newcommand\R{\mathbbm{R}}
\newcommand\Convolution{\ast}
\newcommand\Correlation{\star}
\newcommand\Reversed[1]{\overline{#1}}
\newcommand\Indicator[1]{\mathbbm{1}_{#1}}
\DeclareMathOperator{\nzd}{nzd}
\DeclareMathOperator{\IntervalLeft}{left}
\DeclareMathOperator{\IntervalRight}{right}
\DeclareMathOperator{\IntervalCenter}{center}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\title{Convolution formulas using shapes}
\author{Francois Gindraud}
\date{V3}
\begin{document}
\maketitle
Formulas for convolutions are very quickly unwieldy, especially if the functions being convoluted are defined by parts.
This document shows the \emph{shape} approach to convolutions: functions are decomposed into simple geometrical shapes, and convolution of the functions are defined by combining the convolutions of simple shapes.
Pros:
\begin{itemize}
\item Manipulate only small formulas for each shape.
\item Individual shape convolution formulas are easier to check than huge flat formulas with lots of min/max.
\item More readable implementation of formulas into code.
\item High level simplifications are easier to detect if some shapes cancel with others.
\end{itemize}
Cons:
\begin{itemize}
\item Missed simplification opportunities, if the simplifications only happen between the formulas of different shapes.
\item More verbose than the flat strategy.
\end{itemize}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Notations}
\paragraph{Convolution}
Convolution is noted using the $\Convolution$ operator, with the usual definition:
\[ \left[ f \Convolution g \right](x) = \int_\R f(x - t) g(t) \D t \]
Convolution is commutative and associative.
\paragraph{Time shift / translation}
The time shift combinator of shift $h$ moves the shape $f$ forward along the time axis by $h$.
For a shape $f$ "centered" on $c$, $\tau_h f$ is centered on $c+h$.
\[ \tau_h f(x) = f(x - h) \]
Time shifts combine additively:
\[ \tau_h (\tau_{h'} f) = \tau_{h+h'} f \]
Convolution of shifted functions is the shift of convolutions:
\[
\left[ \tau_h f \Convolution g \right](x) =
\int f((x - t) - h) g(t) \D t =
\left[ f \Convolution g \right](x-h) =
[\tau_h (f \Convolution g)](x)
\]
\paragraph{Scaling}
The scaling combinator scales the shape $f$ by a factor $c$ on the vertical axis.
\[ \left[ c \times f \right] (x) = c \times f(x) \]
Scaling combines multiplicatively with itself, and can be swapped with time shift:
\[ c \times (c' \times f) = (c \times c') \times f \]
\[ c \times (\tau_h f) = \tau_h (c \times f) \]
Convolution of scaled functions is a scaled convolution:
\[ (c \times f) \Convolution g = c \times (f \Convolution g) \]
\paragraph{Time inversion}
A \emph{reversed} function is the shape mirrored by the vertical axis:
\[ \Reversed{f}(x) = f(-x) \]
Simplifications:
\[ \Reversed{\Reversed{f}} = f \]
\[ \Reversed{c \times f} = c \times \Reversed{f} \]
\[ \Reversed{\tau_h f}(x) = [\tau_h f](-x) = f(-x-h) = \Reversed{f}(x+h) = [\tau_{-h} \Reversed{f}] (x) \]
Convolution:
\[
\Reversed{f \Convolution g}(x) = \int f(-x-t) g(t) \D t = \int f(-x+T) g(-T) \D T = \left[ \Reversed{f} \Convolution \Reversed{g} \right] (x)
\]
\paragraph{Sum}
The sum combinator sums the value of multiple shapes.
\[ [f+g](x) = f(x) + g(x) \]
Distributes with most operations:
\[ f \Convolution (g + h) = f \Convolution g + f \Convolution h \]
\[ \tau_s (f + g) = \tau_s f + \tau_s g \]
\[ \Reversed{f + g} = \Reversed{f} + \Reversed{g} \]
\paragraph{Cross-correlation}
Cross correlation is noted using the $\Correlation$ operator, with the usual definition:
\[ \left[ f \Correlation g \right] (x) = \int_\R f(t-x) g(t) \D t \]
It can be expressed using the convolution:
\[ \left[ f \Correlation g \right] (x) = \int \Reversed{f}(x-t) g(t) \D t = \left[ \Reversed{f} \Convolution g \right] (x) \]
Not commutative:
\[
g \Correlation f = \Reversed{g} \Convolution f = \Reversed{\Reversed{f}} \Convolution \Reversed{g} =
\Reversed{\Reversed{f} \Convolution g} = \Reversed{f \Correlation g}
\]
Relation with shifting is simple but not symmetric:
\[
\tau_h f \Correlation g = \Reversed{\tau_h f} \Convolution g = \tau_{-h} \Reversed{f} \Convolution g =
\tau_{-h} (\Reversed{f} \Convolution g) = \tau_{-h} (f \Correlation g)
\]
\[ f \Correlation \tau_h g = \Reversed{f} \Convolution \tau_h g = \tau_h (\Reversed{f} \Convolution g) = \tau_h (f \Correlation g) \]
\paragraph{Interval operations}
For interval $I = [a,b]$:
\[ h \in \R \quad I + h = h + I = [a + h, b + h] \]
\[ -I = [-b, -a] \]
\[ \IntervalLeft(I) = a \qquad \IntervalCenter(I) = \frac{a+b}{2} \qquad \IntervalRight(I) = b \]
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Basic shapes}
\paragraph{Indicator function}
The \emph{indicator function} for an interval $I = [a,b]$ is a rectangle of height $1$, horizontally from $a$ to $b$.
The interval can also have open bounds, changing the function value at the bounds, but not the overall shape.
\begin{center}\begin{tikzpicture}
\draw[very thin,color=gray] (-0.5, -0.5) grid (5.5, 1.7);
\draw[->] (-0.5,0) -- (5.5,0) node[right] {$x$};
\draw[->] (0,-0.5) -- (0, 1.7);
\node[below right] at (0, 0) {$0$};
\begin{scope}[color=blue]
\draw (-0.5,0) -- (1.5,0) coordinate(a) -- (1.5,1) -- (3.5,1) -- (3.5,0) coordinate(b) -- (5.5,0);
\node[below] at (a) {$a$};
\node[below] at (b) {$b$};
\end{scope}
\end{tikzpicture}\end{center}
\[
\Indicator{[a,b]}(x) = \begin{cases}
1 & x \in \left[ a, b \right] \\
0 & \text{otherwise}
\end{cases}
\]
\[ \Reversed{\Indicator{[a,b]}}(x) = \Indicator{[a,b]}(-x) = \Indicator{[-b, -a]}(x) \]
\[ \tau_h \Indicator{[a,b]}(x) = \Indicator{[a,b]}(x - h) = \Indicator{[a + h, b + h]}(x) = \Indicator{[a,b] + h}(x) \]
\paragraph{Polynom fragment}
A \emph{polynom fragment} is function whose value is a polynom on a interval $I$, and $0$ elsewhere.
The polynom itself is defined with an origin at the center of $I$ : this choice is designed to reduce floating point errors during computation, as the polynom will be evaluated "as close to its zero" as feasible.
\begin{center}\begin{tikzpicture}
\draw[very thin,color=gray] (-0.5, -0.5) grid (5.5, 2.7);
\draw[->] (-0.5,0) -- (5.5,0) node[right] {$x$};
\draw[->] (0,-0.5) -- (0, 2.7);
\node[below right] at (0, 0) {$0$}; \begin{scope}[color=blue]
\draw (-0.5,0) -- (1.5,0) coordinate(a) parabola[bend at end] (2.5,2) parabola (3.5,1) -- (3.5,0) coordinate(b) -- (5.5,0);
\node[below] at (a) {$a$};
\node[below] at (b) {$b$};
\end{scope}
\end{tikzpicture}\end{center}
\[
P_{I,a}(x) = \begin{cases}
\sum_{0 \le k \le N_a} a_k (x - \IntervalCenter(I))^k & x \in I \\
0 & \text{otherwise}
\end{cases}
\]
Shifting does not change coefficients:
\[ \begin{split}
\tau_h P_{I,a}(x)
&= \sum_{0 \le k \le N_a} a_k ((x - h) - \IntervalCenter(I))^k \qquad x - h \in I \\
&= \sum_{0 \le k \le N_a} a_k (x - (h + \IntervalCenter(I)))^k \qquad x \in I + h \\
&= \sum_{0 \le k \le N_a} a_k (x - \IntervalCenter(I + h))^k \qquad x \in I + h \\
&= P_{I + h, a}(x)
\end{split} \]
Reversion inverses the interval and odd coefficients:
\[ \begin{split}
\Reversed{P_{I,a}}(x)
&= \sum_{0 \le k \le N_a} a_k (-x - \IntervalCenter(I))^k \qquad -x \in I \\
&= \sum_{0 \le k \le N_a} a_k (-(x - \IntervalCenter(-I)))^k \qquad x \in -I \\
&= \sum_{0 \le k \le N_a} a_k (-1)^k (x - \IntervalCenter(-I))^k \qquad x \in -I \\
&= P_{-I, (a_k (-1)^k)_{0 \le k \le N_a}}(x)
\end{split} \]
An indicator is a subcase of polynomial:
\[ \Indicator{I} = P_{I, a} \text{ with } (a_k) = \{a_0 = 1\} \]
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Polynomial Convolution \& Cross-correlation}
Convolution and cross correlation are defined on polynom fragments, which covers a lot of cases.
In particular it covers indicator functions which are polynom fragments of degree $0$.
\subsection{Convolution}
The target is to compute
\[
C = P_{I_a,a} \Convolution P_{I_b,b}
\]
for $(a_k)_{0 \le k \le N_a}$ and $(b_j)_{0 \le j \le N_b}$.
The polynoms can be expressed as a translation of 0-centered polynoms, and thus we only need to compute convolution for 0-centered polynoms:
\[
P_{I_a,a} = \tau_{s_a} P_{[-w_a,w_a],a}
\]
\[
C = \tau_{s_a + s_b} (P_{[-w_a,w_a],a} \Convolution P_{[-w_b,w_b],b})
\]
In the following paragraphs the shifting $s_a + s_b$ is ignored for simplicity ; it is applied to all 3 polynom fragments result of the convolution.
We also suppose that $w_b \le w_a$ by using the commutativity of the convolution.
\[ \begin{split}
C(x)
&= \left[ P_{[-w_a,w_a],a} \Convolution P_{[-w_b,w_b],b} \right](x) \\
&= \int P_{[-w_a,w_a],a}(x-t) P_{[-w_b,w_b],b}(t) \D t \\
&= \int\limits_{\substack{x - t \in [-w_a,w_a] \\ t \in [-w_b,w_b]}} \left( \sum_{0 \le k \le N_a} a_k (x-t)^k \right) \left( \sum_{0 \le j \le N_b} b_j t^j \right) \D t \\
&= \int\limits_{t \in [x-w_a,x+w_a] \cap [-w_b,w_b]} \left( \sum_{0 \le i \le k \le N_a} a_k \binom{k}{i} x^{k-i} (-t)^i \right) \left( \sum_{0 \le j \le N_b} b_j t^j \right) \D t \\
&= \int\limits_{t \in I(x)} \sum_{\substack{0 \le i \le k \le N_a \\ 0 \le j \le N_b}} a_k b_j \binom{k}{i} (-1)^i x^{k-i} t^{i+j} \D t \\
&= \sum_{\substack{0 \le i \le k \le N_a \\ 0 \le j \le N_b}} a_k b_j \binom{k}{i} (-1)^i x^{k-i} \int\limits_{I(x)} t^{i+j} \D t \\
&= \sum_{\substack{0 \le i \le k \le N_a \\ 0 \le j \le N_b}} a_k b_j \binom{k}{i} \frac{(-1)^i}{i+j+1} x^{k-i} \left[ t^{i+j+1} \right]^{I(x)}
\end{split} \]
Thus the value of $C(x)$ is given by integrating a polynomial expression over an interval depending on $x$ : $I(x) = [x-w_a,x+w_a] \cap [-w_b,w_b]$.
By using $w_b \le w_a$, we can compute the value of $I(x)$ which is defined piecewise in 5 cases:
\begin{center}\begin{tikzpicture}
\coordinate (Origin) at (0,0);
\draw (0,0.1) -- (0,-0.1) node[below] {$0$};
\draw[->] (-6.5,0) -- (6,0) node[right] {$x$};
\begin{scope}[color=blue]
\draw (-1,1.6) coordinate(lwq) -- +(2,0) coordinate(rwq);
\draw[dashed] (lwq) -- (lwq |- Origin) node[below] {$-w_b$};
\draw[dashed] (rwq) -- (rwq |- Origin) node[below] {$w_b$};
\end{scope}
\begin{scope}[color=red]
\draw (-5.5,0.6) coordinate(x1l) node[left] {$1$} -- +(3,0) coordinate(x1r);
\draw[dashed] (x1l) -- (x1l |- Origin) node[below] {$x_1-w_a$};
\draw[dashed] (x1r) -- (x1r |- Origin) node[below] {$x_1+w_a$};
\draw (-3.5,0.8) node[left] {$2$} -- +(3,0);
\draw (-1.5,1) node[left] {$3$} -- +(3,0);
\draw (0.3,1.2) -- +(3,0) node[right] {$4$};
\draw (2,1.4) -- +(3,0) node[right] {$5$} ;
\end{scope}
\end{tikzpicture}\end{center}
\[ I(x) = \begin{cases}
\emptyset & x < -w_a-w_b \\
[-w_b, x+w_a] & x \in [-w_a-w_b, -w_a+w_b] \qquad \text{"left"} \\
[-w_b, w_b] & x \in [-w_a+w_b, w_a-w_b] \qquad \text{"center"} \\
[x-w_a, w_b] & x \in [w_a-w_b, w_a+w_b] \qquad \text{"right"} \\
\emptyset & x > w_a + w_b
\end{cases} \]
From the definition of $C(x)$ and the values of $I(x)$, we can determine that $C$ is defined piecewise with 3 components defined on consecutive intervals.
Moreover, as the bounds of $I(x)$ are polynomial in $x$, the 3 components are polynoms of $x$.
These 3 components are called \emph{left}, \emph{center} and \emph{right} parts.
By choosing component bounds of the form $]a,b]$, components are non-overlapping, and we have $C(x) = C_\text{left}(x) + C_\text{center}(x) + C_\text{right}(x)$.
As an additional constraint, all components are written as valid \emph{polynom fragments} : this lets us reuse the formulas to compute convolutions of convolutions like $P_1 \Convolution P_2 \Convolution P_3$.
Due to the properties of integral, $C(x)$ is continuous, and the various components must match at the border points.
This gives us some easily testable properties to check the implementations of formulas:
\[ C_\text{left}(-w_a-w_b) = 0 \]
\[ C_\text{left}(-w_a+w_b) = C_\text{center}(-w_a+w_b) \]
\[ C_\text{center}(w_a-w_b) = C_\text{right}(w_a-w_b) \]
\[ C_\text{right}(w_a+w_b) = 0 \]
\paragraph{Left part}
The left part must be of the form:
\[
C_\text{left}(x) = P_{]-w_a-w_b, -w_a+w_b], c}(x) = \sum_k c_k (x + w_a)^k
\]
Using the integral version of $C(x)$ and the interval $I(x)$:
\[
C_\text{left}(x) = \sum_{\substack{0 \le i \le k \le N_a \\ 0 \le j \le N_b}}
a_k b_j \binom{k}{i} \frac{(-1)^i}{i+j+1} x^{k-i} \left( (x+w_a)^{i+j+1} - (-w_b)^{i+j+1} \right)
\]
Using binomial decomposition:
\[
x^{k-i} = ((x+w_a) - w_a)^{k-i} = \sum_{0 \le l \le k-i} \binom{k-i}{l} (x+w_a)^{k-i-l} (-w_a)^l
\]
Injecting that into $C_\text{left}(x)$:
\[
C_\text{left}(x) = \sum_{\substack{0 \le i \le k \le N_a \\ 0 \le j \le N_b \\ 0 \le l \le k-i}}
a_k b_j \binom{k}{i} \binom{k-i}{l} \frac{(-1)^{i+l}}{i+j+1} w_a^l \left( (x+w_a)^{k+j+1-l} - (-w_b)^{i+j+1} (x+w_a)^{k-i-l} \right)
\]
This expression is a polynom in $x+w_a$, which was the target.
Due to its complexity, there is probably no nice closed expression for each coefficient.
However each coefficient value can be computed easily, by accumulating the factors in from of $(x+w_a)^{k+j+1-l}$ and $(x+w_a)^{k-i-l}$ for all values of $i,j,k,l$.
The degree of $C_\text{left}(x)$ is $N_a + N_b + 1$ (max of $k+j+1-l$ on the iteration space).
\paragraph{Center part}
The center part must be of the form:
\[
C_\text{center}(x) = P_{]-(w_a-w_b), w_a-w_b], c}(x) = \sum_k c_k x^k
\]
Using the integral version of $C(x)$ and the interval $I(x)$:
\[ \begin{split}
C_\text{center}(x)
&= \sum_{\substack{0 \le i \le k \le N_a \\ 0 \le j \le N_b}} a_k b_j \binom{k}{i} \frac{(-1)^i}{i+j+1} x^{k-i} \left( w_b^{i+j+1} - (-w_b)^{i+j+1} \right) \\
&= \sum_{\substack{0 \le i \le k \le N_a \\ 0 \le j \le N_b}} a_k b_j \binom{k}{i} \frac{(-1)^i}{i+j+1} w_b^{i+j+1} (1 + (-1)^{i+j}) x^{k-i}
\end{split} \]
Coefficients can be computed by accumulations as for the left part.
The degree of $C_\text{center}(x)$ is $N_a$ (max of $k-i$ on the iteration space).
As an optimisation, the computation is not done if $w_a = w_b$ as the component is $0$.
\paragraph{Right part}
The right part must be of the form:
\[
C_\text{right}(x) = P_{]w_a-w_b, w_a+w_b], c}(x) = \sum_k c_k (x - w_a)^k
\]
Using $C(x)$ and $I(x)$ expression:
\[
C_\text{left}(x) = \sum_{\substack{0 \le i \le k \le N_a \\ 0 \le j \le N_b}}
a_k b_j \binom{k}{i} \frac{(-1)^i}{i+j+1} x^{k-i} \left( w_b^{i+j+1} - (x-w_a)^{i+j+1} \right)
\]
Using binomial formula, and injecting it into $C_\text{right}(x)$:
\[
x^{k-i} = ((x-w_a) + w_a)^{k-i} = \sum_{0 \le l \le k-i} \binom{k-i}{l} (x-w_a)^{k-i-l} w_a^l
\]
\[
C_\text{right}(x) = \sum_{\substack{0 \le i \le k \le N_a \\ 0 \le j \le N_b \\ 0 \le l \le k-i}}
a_k b_j \binom{k}{i} \binom{k-i}{l} \frac{(-1)^i}{i+j+1} w_a^l \left( w_b^{i+j+1} (x-w_a)^{k-i-l} - (x-w_a)^{k+j+1-l} \right)
\]
This is a polynom of $x-w_a$ of degree $N_a + N_b + 1$.
As for the other parts, the coefficients can be computed by accumulation.
\subsection{Cross correlation}
The cross correlation is computed using the reverse rule:
\[ P \Correlation Q = \Reversed{P} \Convolution Q \]
The computational cost of the convolution is greater than the small reverse operation for polynomials.
\subsection{Piecewise polynoms}
This is done using distributivity of addition with respect to convolution (and cross correlation):
\[ \left[\sum_i P_i \right] \Convolution \left[\sum_j Q_j \right] = \sum_{i,j} P_i \Convolution Q_j \]
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Application to the Hawkes computations}
Refer to the BRP18 paper and code for details.
This section shows that various computed values are in the form :
\[ \sum_{X_l \in N_l, X_{l'} \in N_{l'}} F(X_l-X_{l'}) \]
We define the \emph{non zero domain} of a shape as an interval of $x$ containing all non zero values of $F$ : $F(x) \ne 0 \Rightarrow x \in \nzd(F)$.
This interval can be easily computed for the family of shapes and combinations introduced previously.
The sum above can then be simplified by ignoring all $X_l - X_{l'} \notin \nzd(F)$, as $F$ would be $0$.
\subsection{Base shapes used}
Basic functions considered are all piecewise polynoms
\paragraph{Histogram base} L2-normalized
\[ \varphi_k = \frac{1}{\sqrt{\delta}} \Indicator{\left] k\delta, (k+1)\delta \right]} \qquad 0 \le k < K \]
\paragraph{Haar wavelets} L2-normalized
\[ \varphi_{s,p} = \frac{\sqrt{2}^s}{\sqrt{\delta}} \left(
\Indicator{ \left] \frac{2p \delta}{2^{s+1}}, \frac{(2p+1) \delta}{2^{s+1}} \right] } -
\Indicator{ \left] \frac{(2p +1) \delta}{2^{s+1}}, \frac{(2p + 2) \delta}{2^{s+1}} \right] }
\right) \qquad \substack{0 \le s < \text{Nscale} \\ 0 \le p < 2^s} \]
\paragraph{Interval kernel} L1-normalized
\[ W_l = \frac{1}{\eta_l} \Indicator{\left[ 0, \eta_l \right]} \]
\subsection{Non-kernel case}
\paragraph{B}
\[ b_{k,l}^m = \sum_{X_l \in N_l, X_m \in N_m} \varphi_k (X_m - X_l) \]
Border: \[ b_0^m = |N_m| \]
\paragraph{G}
G can be expressed as a cross correlation of phi functions:
\[ \begin{split}
\mathsf{G}_{l,l',k,k'}
& = \int \left( \sum_{X_l \in N_l} \varphi_k(x-X_l) \right) \left( \sum_{X_{l'} \in N_{l'}} \varphi_{k'}(x-X_{l'}) \right) \D x \\
& = \sum_{X_l \in N_l, X_{l'} \in N_{l'}} \int \varphi_k(x-X_l) \varphi_{k'}(x-X_{l'}) \D x \\
& = \sum_{X_l \in N_l, X_{l'} \in N_{l'}} \int \varphi_k(x-(X_l-X_l')) \varphi_{k'}(x) \D x \\
& = \sum_{X_l \in N_l, X_{l'} \in N_{l'}} [\varphi_k \Correlation \varphi_{k'}] (X_l-X_l')
\end{split} \]
Borders:
\[ \mathbf{g}_0 = T_{max} \]
\[ \mathbf{g}_{lk}
= \int \left( \sum_{X_l \in N_l} \varphi_k(x - X_l) \right) \D x
= \sum_{X_l \in N_l} \int \varphi_k(x - X_l) \D x
= |N_l| \int \varphi_k
\]
\paragraph{Penalty components}
$\widehat{V}_{k,l}^m$ is computed using a variation on the $\sum_{X_l \in N_l, X_{l'} \in N_{l'}} F(X_l-X_{l'})$ algorithm described above.
\[ \widehat{V}_{spont}^m = \int \D N_m = |N_m| \]
\[
\widehat{V}_{k,l}^m =
\int \left( \sum_{X_l \in N_l} \varphi_k(x-X_l) \right)^2 \D N_m =
\sum_{X_m \in N_m} \left( \sum_{X_l \in N_l} \varphi_k(X_m-X_l) \right)^2
\]
$\widehat{B}_{k,l}^m$ is computed exactly by enumerating the values of $\sum_{X_l \in N_l} \varphi_k(x - X_l)$ for an indicator.
If $\varphi_k$ is not an indicator, we approximate it by an indicator function and use the indicator sup computation.
\[ \widehat{B}_{spont}^m = 1 \]
\[ \widehat{B}_{k,l}^m = \sup |\sum_{X_l \in N_l} \varphi_k(x - X_l)| \]
\subsection{Kernel case}
\paragraph{B}
\[ b_{W,k,l}^m = \sum_{X_l \in N_l, X_m \in N_m} [W_l \Convolution W_m \Convolution \varphi_k] (X_m - X_l) \]
Border:
\[
b_{W,0}^m
= \int \left( \sum_{X_m \in N_m} W_m(x - X_l) \right) \D x
= \sum_{X_m \in N_m} \int W_m(x - X_m) \D x
= |N_m| \int W_m
\]
\paragraph{G}
\[ \begin{split}
\mathsf{G}_{W,l,l',k,k'}
& = \int \left( \sum_{X_l \in N_l} [W_l \Convolution \varphi_k](x-X_l) \right) \left( \sum_{X_{l'} \in N_{l'}} [W_{l'} \Convolution \varphi_{k'}](x-X_{l'}) \right) \D x \\
& = \sum_{X_l \in N_l, X_{l'} \in N_{l'}} \int [W_l \Convolution \varphi_k](x-X_l) [W_{l'} \Convolution \varphi_{k'}](x-X_{l'}) \D x \\
& = \sum_{X_l \in N_l, X_{l'} \in N_{l'}} \int [W_l \Convolution \varphi_k](x-(X_l-X_l')) [W_{l'} \Convolution \varphi_{k'}](x) \D x \\
& = \sum_{X_l \in N_l, X_{l'} \in N_{l'}} [(W_l \Convolution \varphi_k) \Correlation (W_{l'} \Convolution \varphi_{k'})] (X_l-X_l')
\end{split} \]
Borders:
\[ \mathbf{g}_{W,0} = T_{max} \]
\[ \begin{split} \mathbf{g}_{W,lk}
& = \int \left( \sum_{X_l \in N_l} [W_l \Convolution \varphi_k](x - X_l) \right) \D x \\
& = \sum_{X_l \in N_l} \int [W_l \Convolution \varphi_k](x - X_l) \D x \\
& = |N_l| \int [W_l \Convolution \varphi_k] \\
& = |N_l| \int \varphi_k \int W_l \quad \text{with Fubini's theorem}
\end{split} \]
\paragraph{Penalty components}
TODO check expression for spontaneous cases !
\[ \widehat{V}_{W,spont}^m = \int \D N_m = |N_m| \]
\[ \widehat{V}_{W,k,l}^m = \sum_{X_m \in N_m} \left( \sum_{X_l \in N_l} [W_l \Convolution W_m \Convolution \varphi_k](X_m-X_l) \right)^2 \]
\[ \widehat{B}_{W,spont}^m = 1 \]
\[ \widehat{B}_{W,k,l}^m = \sup |\sum_{X_l \in N_l} [W_l \Convolution W_m \Convolution \varphi_k](x - X_l)| \]
\subsection{Lasso parameters}
All previous values are defined for one single region.
For multiple regions they must be combined to have only one B, G, d values.
B and G are summed over all regions:
\[ B = \sum_r B_r \]
\[ G = \sum_r G_r \]
$\widehat{V}$ is summed with a region-dependent factor.
\[ \widehat{V} = \frac{1}{R^2} \sum_r \widehat{V}_r \]
$\widehat{B}$ is summed over regions. FIXME in BRP18 the final $\widehat{B}$ is a single sup over the sum of all regions.
\[ \widehat{B} = \sum_r \widehat{B}_r \]
These two values are combined to compute $d$, coefficient wise :
\[
d =
\sqrt{2 \gamma \widehat{V} \log(M + M^2 K) } +
\frac{\gamma}{3} \log(M + M^2 K) \widehat{B}
\]
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Old specifications and proofs}
At first, the shape strategy was used by explicitely computing each base case.
This was sufficient for hawkes computations using histograms (scaled indicator functions).
However this required a lot of manual computations and testing of each base case in code.
From these base cases it was possible to identify key properties:
\begin{itemize}
\item Convolution resulted in 3 components.
\item Each component was polynomial for polynomial input functions.
\item Parameters of the component could be derived from the input function parameters.
\end{itemize}
These properties were then used to build the generalized polynomial shape strategy.
The old proofs were used to generate the test cases for the polynomial system.
They have been removed from this document but are still available in the git history.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\end{document}
|
{"hexsha": "33d4bc2617e802d351390424fda0bc85d6e8e856", "size": 22524, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "doc/shapes/shapes.tex", "max_stars_repo_name": "lereldarion/hawkes", "max_stars_repo_head_hexsha": "bdcb7284e6baa11e915c818b099a076084b8c3f6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "doc/shapes/shapes.tex", "max_issues_repo_name": "lereldarion/hawkes", "max_issues_repo_head_hexsha": "bdcb7284e6baa11e915c818b099a076084b8c3f6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "doc/shapes/shapes.tex", "max_forks_repo_name": "lereldarion/hawkes", "max_forks_repo_head_hexsha": "bdcb7284e6baa11e915c818b099a076084b8c3f6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-10-21T11:52:44.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-20T09:34:17.000Z", "avg_line_length": 46.5371900826, "max_line_length": 212, "alphanum_fraction": 0.613079382, "num_tokens": 8178}
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
"""
This example demonstrates isocurve for triangular mesh with vertex data and a
qt interface.
"""
import sys
import numpy as np
from vispy import scene
from vispy.geometry.generation import create_sphere
from vispy.color.colormap import get_colormaps
try:
from sip import setapi
setapi("QVariant", 2)
setapi("QString", 2)
except ImportError:
pass
from PyQt4 import QtGui, QtCore
class ObjectWidget(QtGui.QWidget):
"""
Widget for editing OBJECT parameters
"""
signal_objet_changed = QtCore.pyqtSignal(name='objectChanged')
def __init__(self, parent=None):
super(ObjectWidget, self).__init__(parent)
l_nbr_steps = QtGui.QLabel("Nbr Steps ")
self.nbr_steps = QtGui.QSpinBox()
self.nbr_steps.setMinimum(3)
self.nbr_steps.setMaximum(100)
self.nbr_steps.setValue(6)
self.nbr_steps.valueChanged.connect(self.update_param)
l_cmap = QtGui.QLabel("Cmap ")
self.cmap = list(get_colormaps().keys())
self.combo = QtGui.QComboBox(self)
self.combo.addItems(self.cmap)
self.combo.currentIndexChanged.connect(self.update_param)
gbox = QtGui.QGridLayout()
gbox.addWidget(l_cmap, 0, 0)
gbox.addWidget(self.combo, 0, 1)
gbox.addWidget(l_nbr_steps, 1, 0)
gbox.addWidget(self.nbr_steps, 1, 1)
vbox = QtGui.QVBoxLayout()
vbox.addLayout(gbox)
vbox.addStretch(1.0)
self.setLayout(vbox)
def update_param(self, option):
self.signal_objet_changed.emit()
class MainWindow(QtGui.QMainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
self.resize(700, 500)
self.setWindowTitle('vispy example ...')
splitter = QtGui.QSplitter(QtCore.Qt.Horizontal)
self.canvas = Canvas()
self.canvas.create_native()
self.canvas.native.setParent(self)
self.props = ObjectWidget()
splitter.addWidget(self.props)
splitter.addWidget(self.canvas.native)
self.setCentralWidget(splitter)
self.props.signal_objet_changed.connect(self.update_view)
self.update_view()
def update_view(self):
# banded, nbr_steps, cmap
self.canvas.set_data(self.props.nbr_steps.value(),
self.props.combo.currentText())
class Canvas(scene.SceneCanvas):
def __init__(self):
scene.SceneCanvas.__init__(self, keys='interactive')
self.size = 800, 600
self.view = self.central_widget.add_view()
self.view.camera = scene.TurntableCamera()
self.radius = 2.0
mesh = create_sphere(20, 20, radius=self.radius)
vertices = mesh.get_vertices()
tris = mesh.get_faces()
cl = np.linspace(-self.radius, self.radius, 6 + 2)[1:-1]
self.iso = scene.visuals.Isoline(vertices=vertices, tris=tris,
data=vertices[:, 2],
levels=cl, color_lev='autumn',
parent=self.view.scene)
# Add a 3D axis to keep us oriented
scene.visuals.XYZAxis(parent=self.view.scene)
def set_data(self, n_levels, cmap):
self.iso.set_color(cmap)
cl = np.linspace(-self.radius, self.radius, n_levels + 2)[1:-1]
self.iso.set_levels(cl)
# -----------------------------------------------------------------------------
if __name__ == '__main__':
appQt = QtGui.QApplication(sys.argv)
win = MainWindow()
win.show()
appQt.exec_()
|
{"hexsha": "717bec954aff2392f2b95c6ff2d9de0d53300661", "size": 3904, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/demo/scene/isocurve_for_trisurface_qt.py", "max_stars_repo_name": "izaid/vispy", "max_stars_repo_head_hexsha": "402cf95bfef88d70c9c45bb27c532ed72944e14a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/demo/scene/isocurve_for_trisurface_qt.py", "max_issues_repo_name": "izaid/vispy", "max_issues_repo_head_hexsha": "402cf95bfef88d70c9c45bb27c532ed72944e14a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/demo/scene/isocurve_for_trisurface_qt.py", "max_forks_repo_name": "izaid/vispy", "max_forks_repo_head_hexsha": "402cf95bfef88d70c9c45bb27c532ed72944e14a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.0307692308, "max_line_length": 79, "alphanum_fraction": 0.5945184426, "include": true, "reason": "import numpy", "num_tokens": 847}
|
\documentclass{article}
\usepackage{fullpage}
\usepackage{amsmath}
\newcommand{\F}{\mathbf{F}}
\newcommand{\N}{\mathbf{N}}
\renewcommand{\S}{\mathbf{S}}
\newcommand{\D}{\mathbf{D}}
\newcommand{\X}{\mathbf{X}}
\newcommand{\f}{\mathbf{f}}
\renewcommand{\P}{\mathbf{P}}
\newcommand{\Ds}{\D_s}
\newcommand{\Dm}{\D_m}
\renewcommand{\o}{\mathbf{1}}
\newcommand{\I}{\mathbf{I}}
\newcommand{\tr}{\mbox{tr}}
\newcommand{\m}[1]{\begin{pmatrix}#1\end{pmatrix}}
\newcommand{\pp}[2]{\frac{\partial #1}{\partial #2}}
\begin{document}
\section{Finite Elements}
\begin{tabular}{|c|c|cc|l|}
\hline
Symbol & Definition & Dimensions & Units & Meaning \\
\hline
$\o$ & $\o = \m{1 & \cdots & 1}^T$ & $d$ & $1$ & All ones vector \\
$\S$ & $\S = \m{\I & -\o}$ & $d \times (d+1)$ & $1$ & Scatter matrix \\
\hline
$\X_m$ & $\m{X_1 & X_2 & X_3 & X_4}_m$ & $d \times (d+1)$ & $m$ & Material space element node positions \\
$\X$ & $\m{X_1 & X_2 & X_3 & X_4}$ & $d \times (d+1)$ & $m$ & World space element node positions \\
$\Dm$ & $\Dm = \X_m \S^T$ & $d \times d$ & $m$ & Material space relative node positions \\
$\Ds$ & $\Ds = \X \S^T$ & $d \times d$ & $m$ & World space relative node positions \\
$\F$ & $\F = \Ds \Dm^{-1}$ & $d \times d$ & $1$ & Deformation gradient \\
\hline
$J$ & $J = \det(\F)$ & scalar & $1$ & Jacobian \\
$I_1$ & $I_1 = \tr(\F \F^T)$ & scalar & $1$ & First invariant \\
\hline
$V$ & $V = \frac{1}{d!} \det(\Dm)$ & scalar & $m^d$ & Material space element volume \\
$\N$ & $\N = V \Dm^{-T}$ & $d \times d$ & $m^{d-1}$ & Material space area weighted normals \\
\hline
$\psi$ & - & scalar & $kg \, m^{2-d} s^{-1}$ & Energy density \\
$\phi$ & $\phi = V \psi$ & scalar & $kg \, m^2 s^{-1}$ & Potential energy of element \\
$\f$ & $\f = -\pp{\phi}{\X}$ & $d \times (d+1)$ & $kg \, m s^{-1}$ & Force on element nodes \\
$\P$ & $\f = -\P \N \S^T$ & $d \times d$ & $kg \, m^{2-d} s^{-1}$ & First Piola-Kirchoff stress \\
\hline
\end{tabular}
\vspace{.2in}
Consider that a small position change $\delta \X$ is made. Then
\begin{eqnarray*}
0 & = & \f : \delta \X + \delta \phi \\
& = & \tr(\f \delta \X^T) + \delta \phi \\
& = & \tr(-\P \N \S^T \delta \X^T) + V \delta \psi \\
& = & -V \tr(\P \Dm^{-T} \S^T \delta \X^T) + V \delta \psi \\
\delta \psi & = & \tr(\P \Dm^{-T} \S^T \delta \X^T) \\
& = & \tr(\P \delta (\Dm^{-T} \S^T \X^T)) \\
& = & \tr(\P \delta (\Dm^{-T} \Ds^T)) \\
& = & \tr(\P \delta \F^T) \\
& = & \P : \delta \F \\
\P & = & \pp{\psi}{\F}
\end{eqnarray*}
\section{Potential Energy for Neo Hookean}
Let $\psi = \frac{\mu}{2} (I_1 - d) - \mu \ln J + \frac{\lambda}{2} \ln^2 J$.
\begin{eqnarray*}
\pp{J}{\F} & = & \pp{}{\F} \det(\F) = \det(\F) \F^{-1} = J \F^{-1} \\
\pp{I_1}{\F} & = & \pp{}{\F} \tr(\F \F^T) = 2 \F \\
\pp{}{\F} & = & \pp{}{\F} \ln J = \F^{-1} \\
\P = \pp{\psi}{\X} & = & \pp{}{\X} \left( \frac{\mu}{2} (I_1 - d) - \mu \ln J + \frac{\lambda}{2} \ln^2 J \right) \\
& = & \frac{\mu}{2} \pp{I_1}{\X} - \mu \pp{}{\F} \ln J + \lambda \ln J \pp{}{\F} \ln J \\
& = & \mu (\F - \F^{-1}) + \lambda \ln J \F^{-1}
\end{eqnarray*}
\end{document}
|
{"hexsha": "1b144ab169b484e4a713dad5c52b1888a0d64e39", "size": 3064, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "applications/physbam/physbam-lib/Public_Library/PhysBAM_Solids/PhysBAM_Deformables/Constitutive_Models/neo_hookean_energy.tex", "max_stars_repo_name": "schinmayee/nimbus", "max_stars_repo_head_hexsha": "170cd15e24a7a88243a6ea80aabadc0fc0e6e177", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 20, "max_stars_repo_stars_event_min_datetime": "2017-07-03T19:09:09.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-10T02:53:56.000Z", "max_issues_repo_path": "applications/physbam/physbam-lib/Public_Library/PhysBAM_Solids/PhysBAM_Deformables/Constitutive_Models/neo_hookean_energy.tex", "max_issues_repo_name": "schinmayee/nimbus", "max_issues_repo_head_hexsha": "170cd15e24a7a88243a6ea80aabadc0fc0e6e177", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "applications/physbam/physbam-lib/Public_Library/PhysBAM_Solids/PhysBAM_Deformables/Constitutive_Models/neo_hookean_energy.tex", "max_forks_repo_name": "schinmayee/nimbus", "max_forks_repo_head_hexsha": "170cd15e24a7a88243a6ea80aabadc0fc0e6e177", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2017-09-17T02:05:06.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-31T00:12:01.000Z", "avg_line_length": 38.3, "max_line_length": 116, "alphanum_fraction": 0.5127284595, "num_tokens": 1356}
|
import numpy as np
import subprocess
preface_script = '''#!/bin/bash
#SBATCH -N 1
#SBATCH --partition=batch
#SBATCH -J {name}
#SBATCH -o ./SLURM_jobs/{name}.%J.out
#SBATCH -e ./SLURM_jobs/{name}.%J.err
#SBATCH --time={days}-00:00:00
#SBATCH --gres=gpu:v100:1
#SBATCH --mem=140G
#SBATCH --constraint=[gpu]
#SBATCH --cpus-per-gpu=8
#run the application:
module load anaconda3/4.4.0
source /home/hinnertr/.bashrc
conda activate ~/.conda/envs/IIC-Clustering/
module load cuda/10.0.130
'''
def run_MNIST_Sinkhorn_job(radius=0.01,
sinkhorn_batch_size=512,
num_sinkhorn_dataloaders=5,
epochs=50,
identifier=686,
days=3):
# "CUDA_VISIBLE_DEVICES=0 " \
command = "PYTHONPATH='.' python3 src/scripts/cluster/cluster_greyscale_twohead_sinkhorn.py --model_ind {identifier} --arch ClusterNet6cTwoHead --mode IID --dataset MNIST --dataset_root datasets/MNIST_twohead --gt_k 10 --output_k_A 50 --output_k_B 10 --lamb_A 1.0 --lamb_B 1.0 --lr 0.0001 --num_epochs {num_epochs} --batch_sz 8000 --num_dataloaders 5 --num_sub_heads 5 --num_sinkhorn_dataloaders {num_sinkhorn_dataloaders} --sinkhorn_batch_size {sinkhorn_batch_size} --sinkhorn_WS_radius {WS_radius} --crop_orig --crop_other --tf1_crop centre_half --tf2_crop random --tf1_crop_sz 20 --tf2_crop_szs 16 20 24 --input_sz 24 --rot_val 25 --no_flip --head_B_epochs 2 --out_root out/MNIST_twohead_Sinkhorn".format(identifier=str(identifier),
num_epochs=str(epochs),
num_sinkhorn_dataloaders=num_sinkhorn_dataloaders,
sinkhorn_batch_size=sinkhorn_batch_size,
WS_radius = radius)
slurm_path = './SLURM_jobs/'
filename = slurm_path + "Sinkhorn_jobscript.sh"
with open(file=filename, mode='w') as f:
f.write(preface_script.format(name='Sinkhorn', days=str(days)))
f.write(command)
subprocess.call('sbatch '+filename, shell=True)
def run_MNIST_normal_job(identifier=685,
epochs=50,
days=3):
command = "CUDA_VISIBLE_DEVICES=0 " \
"PYTHONPATH='.' " \
"python3 src/scripts/cluster/cluster_greyscale_twohead.py " \
"--model_ind {identifier} " \
"--arch ClusterNet6cTwoHead " \
"--mode IID " \
"--dataset MNIST " \
"--dataset_root datasets/MNIST_twohead " \
"--gt_k 10 " \
"--output_k_A 50 " \
"--output_k_B 10 " \
"--lamb_A 1.0 " \
"--lamb_B 1.0 " \
"--lr 0.0001 " \
"--num_epochs {num_epochs} " \
"--batch_sz 8000 " \
"--num_dataloaders 5 " \
"--num_sub_heads 5 " \
"--crop_orig " \
"--crop_other " \
"--tf1_crop centre_half " \
"--tf2_crop random " \
"--tf1_crop_sz 20 " \
"--tf2_crop_szs 16 20 24 " \
"--input_sz 24 " \
"--rot_val 25 " \
"--no_flip " \
"--head_B_epochs 2 " \
"--out_root out/MNIST_twohead".format(identifier=str(identifier),
num_epochs=epochs)
slurm_path = './SLURM_jobs/'
filename = slurm_path + "NormalMNIST_jobscript.sh"
with open(file=filename, mode='w') as f:
f.write(preface_script.format(name='NormalMNIST', days=str(days)))
f.write(command)
subprocess.call('sbatch '+filename, shell=True)
if __name__=='__main__':
run_MNIST_Sinkhorn_job(radius=0.01, sinkhorn_batch_size=16384, num_sinkhorn_dataloaders=5, days=1)
run_MNIST_Sinkhorn_job(radius=0.1, sinkhorn_batch_size=16384, num_sinkhorn_dataloaders=5, days=1)
run_MNIST_Sinkhorn_job(radius=0.001, sinkhorn_batch_size=16384, num_sinkhorn_dataloaders=5, days=1)
run_MNIST_Sinkhorn_job(radius=0.2, sinkhorn_batch_size=16384, num_sinkhorn_dataloaders=5, days=1)
run_MNIST_Sinkhorn_job(radius=1, sinkhorn_batch_size=16384, num_sinkhorn_dataloaders=5, days=1)
# run_MNIST_normal_job(days=2)
|
{"hexsha": "e273d8a658ca72096648dd7dbae0cd1a77e0b744", "size": 6989, "ext": "py", "lang": "Python", "max_stars_repo_path": "run_SLURM_jobs.py", "max_stars_repo_name": "THinnerichs/MiS-Information-Clustering", "max_stars_repo_head_hexsha": "597c70e1283222e0e841e24f6805b967aaf3c9e0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "run_SLURM_jobs.py", "max_issues_repo_name": "THinnerichs/MiS-Information-Clustering", "max_issues_repo_head_hexsha": "597c70e1283222e0e841e24f6805b967aaf3c9e0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "run_SLURM_jobs.py", "max_forks_repo_name": "THinnerichs/MiS-Information-Clustering", "max_forks_repo_head_hexsha": "597c70e1283222e0e841e24f6805b967aaf3c9e0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 69.198019802, "max_line_length": 762, "alphanum_fraction": 0.3660037201, "include": true, "reason": "import numpy", "num_tokens": 1167}
|
import numpy as np
from ..base import Model, ParamMixin, CollectionMixin
from ..feed import Feed
from ..loss import SoftmaxCrossEntropy
class NeuralNetwork(Model, CollectionMixin):
def __init__(self, layers, loss):
self.layers = layers
self.loss = loss
self.bprop_until = next((idx for idx, l in enumerate(self.layers)
if isinstance(l, ParamMixin)), 0)
self.layers[self.bprop_until].bprop_to_x = False
self.collection = self.layers
self._initialized = False
def setup(self, x_shape, y_shape=None):
# Setup layers sequentially
if self._initialized:
return
for layer in self.layers:
layer.setup(x_shape)
x_shape = layer.y_shape(x_shape)
self.loss.setup(x_shape, y_shape)
self._initialized = True
def update(self, x, y):
self.phase = 'train'
# Forward propagation
y_pred = self.fprop(x)
# Backward propagation
grad = self.loss.grad(y_pred, y)
for layer in reversed(self.layers[self.bprop_until:]):
grad = layer.bprop(grad)
return self.loss.loss(y_pred, y)
def fprop(self, x):
for layer in self.layers:
x = layer.fprop(x)
return x
def y_shape(self, x_shape):
for layer in self.layers:
x_shape = layer.y_shape(x_shape)
return x_shape
def predict(self, feed):
""" Calculate the output for the given input x. """
feed = Feed.from_any(feed)
self.phase = 'test'
if isinstance(self.loss, SoftmaxCrossEntropy):
# Add softmax from SoftmaxCrossEntropy
self.layers += [self.loss]
y = []
for x_batch, in feed.batches():
y.append(np.array(self.fprop(x_batch)))
y = np.concatenate(y)[:feed.n_samples]
if isinstance(self.loss, SoftmaxCrossEntropy):
self.layers = self.layers[:-1]
return y
|
{"hexsha": "974dc10865f4551169bdcc58a23764d1412d1d79", "size": 2008, "ext": "py", "lang": "Python", "max_stars_repo_path": "deeppy/feedforward/neural_network.py", "max_stars_repo_name": "purushothamgowthu/deeppy", "max_stars_repo_head_hexsha": "8cb658b33b91a0a91dea089a843941baf3f73481", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1170, "max_stars_repo_stars_event_min_datetime": "2015-01-02T17:34:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-08T06:22:29.000Z", "max_issues_repo_path": "deeppy/feedforward/neural_network.py", "max_issues_repo_name": "purushothamgowthu/deeppy", "max_issues_repo_head_hexsha": "8cb658b33b91a0a91dea089a843941baf3f73481", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 27, "max_issues_repo_issues_event_min_datetime": "2015-02-19T14:40:14.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-29T09:06:29.000Z", "max_forks_repo_path": "deeppy/feedforward/neural_network.py", "max_forks_repo_name": "purushothamgowthu/deeppy", "max_forks_repo_head_hexsha": "8cb658b33b91a0a91dea089a843941baf3f73481", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 390, "max_forks_repo_forks_event_min_datetime": "2015-01-02T15:24:33.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-07T08:49:28.000Z", "avg_line_length": 30.4242424242, "max_line_length": 73, "alphanum_fraction": 0.5951195219, "include": true, "reason": "import numpy", "num_tokens": 447}
|
import cv2 as cv
import numpy as np
import os
import sys
import time
path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(path, ".."))
sys.path.append(os.path.join(path, "../../../../common/"))
sys.path.append(os.path.join(path, "../../../../common/atlas_utils"))
from constants import IMG_EXT
from acl_model import Model
from acl_image import AclImage
from acl_resource import AclResource
MODEL_WIDTH = 224
MODEL_HEIGHT = 224
out_w = 56
out_h = 56
INPUT_DIR = '../data/'
OUTPUT_DIR = '../out/'
model_path = '../model/colorization_yuv.om'
def preprocess(picPath):
bgr_img = cv.imread(picPath).astype(np.float32)
orig_shape = bgr_img.shape[:2]
bgr_img = bgr_img / 255.0
lab_img = cv.cvtColor(bgr_img, cv.COLOR_BGR2Lab)
orig_l = lab_img[:,:,0]
if not orig_l.flags['C_CONTIGUOUS']:
orig_l = np.ascontiguousarray(orig_l)
lab_img = cv.resize(lab_img, (MODEL_WIDTH, MODEL_HEIGHT)).astype(np.float32)
l_data = lab_img[:,:,0]
if not l_data.flags['C_CONTIGUOUS']:
l_data = np.ascontiguousarray(l_data)
l_data = l_data - 50
return orig_shape, orig_l, l_data
def postprocess(result_list, pic, orig_shape, orig_l):
result_list[0] = result_list[0].reshape(1,2,56,56).transpose(0,2,3,1)
result_array = result_list[0][0]
ab_data = cv.resize(result_array, orig_shape[::-1])
result_lab = np.concatenate((orig_l[:, :, np.newaxis], ab_data), axis=2)
result_bgr = (255 * np.clip(cv.cvtColor(result_lab, cv.COLOR_Lab2BGR), 0, 1)).astype('uint8')
file_name = os.path.join(OUTPUT_DIR, "out_" + os.path.basename(pic))
cv.imwrite(file_name, result_bgr)
def main():
"""
acl resource initialization
"""
if not os.path.exists(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
#ACL resource initialization
acl_resource = AclResource()
acl_resource.init()
model = Model(model_path)
images_list = [os.path.join(INPUT_DIR, img)
for img in os.listdir(INPUT_DIR)
if os.path.splitext(img)[1] in IMG_EXT]
for pic in images_list:
orig_shape, orig_l, l_data = preprocess(pic)
result_list = model.execute([l_data,])
postprocess(result_list, pic, orig_shape, orig_l)
break
print("Execute end")
if __name__ == '__main__':
main()
|
{"hexsha": "d8ee726ca72f5c7d09f2af12e9836c8fffee52c5", "size": 2450, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/level2_simple_inference/6_other/colorization_picture/src/colorize.py", "max_stars_repo_name": "Dedederek/samples", "max_stars_repo_head_hexsha": "31d99de20af2f7046556e0f48c4b789b99e422f8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-02-26T17:58:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T06:21:28.000Z", "max_issues_repo_path": "python/level2_simple_inference/6_other/colorization_picture/src/colorize.py", "max_issues_repo_name": "Dedederek/samples", "max_issues_repo_head_hexsha": "31d99de20af2f7046556e0f48c4b789b99e422f8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/level2_simple_inference/6_other/colorization_picture/src/colorize.py", "max_forks_repo_name": "Dedederek/samples", "max_forks_repo_head_hexsha": "31d99de20af2f7046556e0f48c4b789b99e422f8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-03-22T21:13:11.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-24T06:52:33.000Z", "avg_line_length": 25.7894736842, "max_line_length": 97, "alphanum_fraction": 0.6367346939, "include": true, "reason": "import numpy", "num_tokens": 642}
|
# -*- coding: utf-8 -*-
"""Compute medoids from time series."""
__author__ = ["chrisholder", "TonyBagnall"]
import numpy as np
from sktime.distances import pairwise_distance
def medoids(
X: np.ndarray,
precomputed_pairwise_distance: np.ndarray = None,
distance_metric: str = "dtw",
):
"""Compute the medoids from a panel of time series.
Parameters
----------
X : np.ndarray (3d array of shape (n_instances, n_dimensions, series_length))
Time series to compute medoids from.
precomputed_pairwise_distance: np.ndarray (2d array of shape
(n_instances, n_instances)), defaults = None
Precomputed pairwise distance between each time series in X.
distance_metric: str, defaults = 'dtw'
String of distance metric to compute.
Returns
-------
np.ndarray (2d array of shape (n_dimensions, series_length)
The time series that is the medoids.
"""
if precomputed_pairwise_distance is None:
precomputed_pairwise_distance = pairwise_distance(X, metric=distance_metric)
x_size = X.shape[0]
distance_matrix = np.zeros((x_size, x_size))
for j in range(x_size):
for k in range(x_size):
distance_matrix[j, k] = precomputed_pairwise_distance[j, k]
return X[np.argmin(sum(distance_matrix))]
|
{"hexsha": "db320b4d167b316d9f1eb06e3fdbc81143af8aeb", "size": 1316, "ext": "py", "lang": "Python", "max_stars_repo_path": "sktime/clustering/metrics/medoids.py", "max_stars_repo_name": "mikofski/sktime", "max_stars_repo_head_hexsha": "87bdf36dbc0990f29942eb6f7fa56a8e6c5fa7b7", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sktime/clustering/metrics/medoids.py", "max_issues_repo_name": "mikofski/sktime", "max_issues_repo_head_hexsha": "87bdf36dbc0990f29942eb6f7fa56a8e6c5fa7b7", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sktime/clustering/metrics/medoids.py", "max_forks_repo_name": "mikofski/sktime", "max_forks_repo_head_hexsha": "87bdf36dbc0990f29942eb6f7fa56a8e6c5fa7b7", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.0975609756, "max_line_length": 84, "alphanum_fraction": 0.6816109422, "include": true, "reason": "import numpy", "num_tokens": 310}
|
import os, sys
import glob
import argparse
import numpy as np
import pandas as pd
from tqdm import tqdm
def generate_results_table(results_dir, key, savepath=None, append=False):
flist = glob.glob(os.path.join(results_dir, '*_sender_RTT.csv'))
table = [] # key, tracename, duration, blksize, qsize, mmdelay, avg cap, avg tput, avg util, delay statistics
for fpath in tqdm(flist, desc='Parsing all output files in \'{}\''.format(results_dir)):
fname = os.path.basename(fpath)
trace, rest = fname.split('_T')
if '_Q' in rest:
duration, blksize, qsize, mmdelay, _ = rest.split('_', 4)
qsize = int(float(qsize[1:]))
else:
duration, blksize, mmdelay, _ = rest.split('_', 3)
qsize = None
duration = int(duration[1:])
mmdelay = int(mmdelay[5:])
df_rtt = pd.read_csv(fpath, header=None, names=['timestamp', 'rtt'])
df_rtt.set_index('timestamp', inplace=True)
df_rtt['seconds'] = df_rtt.index.values.round()
df_rtt = df_rtt.groupby('seconds').mean()
df_rtt.loc[:, 'rtt'] = df_rtt.rtt.values * 1000 # convert RTT to milliseconds
df_tput = pd.read_csv(fpath.replace('sender_RTT', 'uplink_mmtput'), index_col=[0])
df_tput = (df_tput * 8 / 1e6)
table.append((key, trace, duration, blksize, qsize, mmdelay, df_tput.capacity_bytes.mean(), df_tput.egress_bytes.mean(), (df_tput.egress_bytes*100 / df_tput.capacity_bytes).mean(), df_rtt.rtt.min(), df_rtt.rtt.max(), df_rtt.rtt.mean(), df_rtt.rtt.std(), df_rtt.rtt.quantile(0.25), df_rtt.rtt.quantile(0.5), df_rtt.rtt.quantile(0.75)))
df = pd.DataFrame(table, columns=['key', 'trace', 'duration', 'blksize', 'qsize', 'mmdelay', 'capacity', 'throughput', 'utilization', 'delay_min', 'delay_max', 'delay_avg', 'delay_std', 'delay_25', 'delay_50', 'delay_75'])
df = df.set_index(['key', 'trace', 'duration', 'blksize', 'qsize', 'mmdelay'])
df.sort_index(level=[0,1,2,3,4,5], inplace=True)
if savepath is None:
savepath = os.path.join('results', 'results_{}.csv'.format(key))
else:
if os.path.exists(savepath) and append:
df_existing = pd.read_csv(savepath, index_col=[0,1,2,3,4,5])
df = pd.concat([df_existing, df], axis=0)
df.to_csv(savepath, float_format='%.4f')
return df
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('results_dir', help='Directory where the sim run outputs are present')
parser.add_argument('save_key', help='Name for this group of results (usually name of the CCA)')
parser.add_argument('--savepath', '-o', help='Path to save the results table')
parser.add_argument('--append', '-a', action='store_true', help='Append to an existing table (ignored if --savepath is not provided)')
args = parser.parse_args()
if not os.path.exists('results'):
os.makedirs('results')
generate_results_table(args.results_dir, args.save_key, savepath=args.savepath, append=args.append)
|
{"hexsha": "4e1bccbd34a292866d646d656c09b4f3aa476c3f", "size": 3067, "ext": "py", "lang": "Python", "max_stars_repo_path": "make_table.py", "max_stars_repo_name": "shivariyer/mmwave_cc", "max_stars_repo_head_hexsha": "5bb2ebbff1f2bc080e5fe81a6b1ca00aed441400", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-04-09T15:42:56.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-06T12:30:11.000Z", "max_issues_repo_path": "make_table.py", "max_issues_repo_name": "shivariyer/mmwave_cc", "max_issues_repo_head_hexsha": "5bb2ebbff1f2bc080e5fe81a6b1ca00aed441400", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "make_table.py", "max_forks_repo_name": "shivariyer/mmwave_cc", "max_forks_repo_head_hexsha": "5bb2ebbff1f2bc080e5fe81a6b1ca00aed441400", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.1029411765, "max_line_length": 342, "alphanum_fraction": 0.6507988262, "include": true, "reason": "import numpy", "num_tokens": 859}
|
function h = tc_sessionPlots(tcs, plotType, titleType);
% h = tc_sessionPlots(tcs, plotType, [titleType = 1]);
%
% produce a figure showing individual subjects time course or amplitude data
% each subject's data will be a subplot of the figure
% plotType (string):
% 'wholetc' raw timecourse
% 'meanampsplustcs' left panel will show mean amps according to tc.params.ampType; right panel will show event-triggered average
% 'meantcs' event trigerred averaging mean time course
% 'deconvolvedTcs' deconvolved time courses
% 'amps' Means amplitudes of each condition according to the
% value in the tc.params.ampType filed which can be
% one of the following: 'difference' 'betas' 'relamps' or 'deconvolved';
% 'meanamps' estimated from the event-triggered average based on the
% baseline and peak periods
% 'betas' one beta per condition (no beta for baseline)estimated
% from a glm on the tc
% 'relamps' David Ress's method for amplitude estimatation
% 'deconvolvedamps' Etimate of the mean amps from the deconvolved time
% course based on the peak and baseline period
%
% titleType: 1 (default) uses tcs.sessionCode for chart titles (usually
% corresponds to the directory of the session); 2 uses tcs.description.
%
%
% ras, 02/2007.
% ras, 04/2007: was part of tc_acrossSessions, but broke off to be its own
% function, since it's pretty useful. Also allows cells of tc structs as
% inputs.
% remus 10/2007: added titleType flag.
% kgs 3/2008: added deconvolvedtc and deconvolvedamps options
%
h = [];
% set titleType parameters
if exist('titleType') & titleType == 2
titleField = 'description';
else
titleField = 'sessionCode';
end
% allow input to be a cell array
if iscell(tcs)
% remove empty entries
tcs = tcs( cellfind(tcs) );
C = tcs;
tcs = tcs{1};
for i = 2:length(C)
tcs(i) = mergeStructures(tcs(i-1), C{i});
end
end
nSessions = length(tcs);
maxSubplotsPerFig = 16; % if length(tcs)>this, make multiple figures
nFigs = ceil(nSessions / maxSubplotsPerFig);
subplotsPerFig = min(nSessions, maxSubplotsPerFig);
if nFigs > 1
nRows = 5;
nCols = 4;
else
nRows = ceil( sqrt(subplotsPerFig) );
nCols = ceil( subplotsPerFig / nRows );
end
% adjustment for plot types which display >1 axis per session
if isequal(lower(plotType), 'meanampsplustcs')
nFigs = ceil(2 * nSessions / maxSubplotsPerFig); % 2 subplots per session
subplotsPerFig = maxSubplotsPerFig / 2;
nRows = nRows*2;
nCols = nCols*2;
end
for s = 1:nSessions
if mod(s, subplotsPerFig)==1
h(end+1) = figure('Color', 'w', 'Position', [100 100 600 800], ...
'Name', sprintf('%s Session Plots', tcs(1).roi.name));
end
% get text for the title for this subplot
titleText = tcs(s).params.(titleField);
titleText( titleText=='_' ) = '-'; % TeX markup--avoid subscripts
switch lower(plotType)
case {'wholetc' 'wholetcs'}
subplot(nRows, nCols, s);
tc_plotWholeTc(tcs(s), gca);
title(titleText, 'FontSize', 10);
case 'meanampsplustcs'
subplot(ceil(nRows/2), nCols, (2*s-1));
tc_barMeanAmplitudes(tcs(s), gca, 0); % this will plot the event triggered average plots
title(titleText, 'FontSize', 10);
subplot(ceil(nRows/2), nCols, (2*s));
tc_plotMeanTrials(tcs(s), gca);
if isfield(tcs(s), 'SNR') & ~isempty(tcs(s), 'SNR')
snrNums = tcs(s).condNums(tcs(s).params.snrConds);
snrConds = sprintf('%i ', snrNums);
title(sprintf('SNR: %3.2f (conds [%s])', tcs(s).SNR, snrConds));
end
case {'meantc' 'meantcs'}
subplot(nRows, nCols, s);
tc_plotMeanTrials(tcs(s), gca); % this will plot what is in the meantc field
title(titleText, 'FontSize', 10);
case {'deconvolved' 'deconvolvedTcs' 'deconvolvedtcs'} % will plot the deconvolved time courses
currTc=tcs(s);
currTc.params.glmHRF=0 % set glm flag for deconvolution
% deconvolve
currTc = tc_applyGlm(currTc);
% add a blank condition at the beginning betas are zero
% because this is the baseline to which the glm is
% estimated
currTc.meanTcs = [zeros(currTc.glm.nh, 1) currTc.glm.betas];
subplot(nRows, nCols, s);
tc_plotMeanTrials(currTc, gca);
title(titleText, 'FontSize', 10);
case 'amps'
subplot(nRows, nCols, s);
% 0 - Use the value in the tc.params.ampType field.
tc_barMeanAmplitudes(tcs(s), gca, 0);
title(titleText, 'FontSize', 10);
case 'meanamps'
subplot(nRows, nCols, s);
% 1 - Plot Peak/Baseline difference of event-triggered average
tc_barMeanAmplitudes(tcs(s), gca, 1);
title(titleText, 'FontSize', 10);
case 'betas'
subplot(nRows, nCols, s);
% 2 - Plot GLM Beta values
tc_barMeanAmplitudes(tcs(s), gca, 2);
title(titleText, 'FontSize', 10);
case {'relamps' 'dotproductamps' 'projamps'}
subplot(nRows, nCols, s);
% 3 - Plot Dot-Product Projection Amplitudes
tc_barMeanAmplitudes(tcs(s), gca, 3);
title(titleText, 'FontSize', 10);
case { 'deconvolvedamps' 'deconvolvedAmps'}
subplot(nRows, nCols, s);
% 4 - Plot Peak/Baseline from deconvolved time courses
tc_barMeanAmplitudes(tcs(s), gca, 4);
title(titleText, 'FontSize', 10);
otherwise, warning( sprintf('Unknown plot type %s', plotType) )
end
end
return
|
{"author": "vistalab", "repo": "vistasoft", "sha": "7f0102c696c091c858233340cc7e1ab02f064d4c", "save_path": "github-repos/MATLAB/vistalab-vistasoft", "path": "github-repos/MATLAB/vistalab-vistasoft/vistasoft-7f0102c696c091c858233340cc7e1ab02f064d4c/mrBOLD/EventRelated/TimeCourseUI/tc_sessionPlots.m"}
|
from __future__ import absolute_import
from __future__ import print_function
import pandas as pd
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
from matplotlib.pyplot import grid
from matplotlib import figure
from matplotlib.ticker import MultipleLocator,FuncFormatter,NullFormatter
from . import rowingphysics
from six.moves import map
from six.moves import range
def format_time_tick(x,pos=None):
hour=int(x/3600)
min=int((x-hour*3600.)/60)
min_str=str(min).zfill(2)
template='%d:%s'
return template % (hour,min_str)
def format_time(x,pos=None):
min = int(x/60.)
sec = x-min*60.
str1 = "{min:0>2}:{sec:0>4.1f}".format(
min=min,
sec=sec,
)
return str1
def format_pace_tick(x,pos=None):
min=int(x/60.)
sec=int(x-min*60.)
sec_str=str(sec).zfill(2)
template='%d:%s'
return template % (min,sec_str)
def format_pace(x,pos=None):
if np.isinf(x) or np.isnan(x):
x=0
min=int(x/60)
sec=(x-min*60.)
str1 = "{min:0>2}:{sec:0>4.1f}".format(
min = min,
sec = sec
)
return str1
def format_time(x,pos=None):
min = int(x/60.)
sec = x-min*60
str1 = "{min:0>2}:{sec:0>4.1f}".format(
min=min,
sec=sec,
)
return str1
def tempofromergsplit(ergsplit):
tempo1 = 25.
tempo2 = 35.
split1 = 120.
split2 = 85.
ratio = (ergsplit-split2)/(split1-split2)
tempo = tempo2+ratio*(tempo1-tempo2)
return tempo
def tempofromotwsplit(otwsplit,otwsplits,mc=70.):
tempo1 = 16.
tempo2 = 35.
split1 = max(rawtoseconds(otwsplits))/4.
split2 = min(rawtoseconds(otwsplits))/4.
ratio = (otwsplit-split2)/(split1-split2)
tempo = tempo2+ratio*(tempo1-tempo2)
# if tempo<18:
# tempo = 18.0
return tempo
def splitvalues(s):
min, sec = list(map(float, s.split(':')))
return [min,sec]
def rawtoseconds(raw):
r = np.array(list(map(splitvalues,raw)))
thetime = 60.*r[:,0]+r[:,1]
return thetime
def plotdata(filename,r,rg,erg):
expdata_raw = pd.read_csv(filename)
ergtime = rawtoseconds(expdata_raw['erg score'])/4.
fifty = rawtoseconds(expdata_raw['50kg'])/4.
sixty = rawtoseconds(expdata_raw['60kg'])/4.
seventy = rawtoseconds(expdata_raw['70kg'])/4.
eighty = rawtoseconds(expdata_raw['80kg'])/4.
ninety = rawtoseconds(expdata_raw['90kg'])/4.
hundred = rawtoseconds(expdata_raw['100kg'])/4.
hundredten = rawtoseconds(expdata_raw['110kg'])/4.
ratio = 0.5
print('50kg')
r.mc = 50.0
r.tempo = 30.
otwsplit50kg = []
for ergscore in expdata_raw['erg score']:
print(ergscore)
mins, secs = splitvalues(ergscore)
ergsplit = (60.0*mins+secs)/4.
r.tempo = tempofromergsplit(ergsplit)
ergsplitmin = int(ergsplit) / 60
ergsplitsec = ergsplit-60*ergsplitmin
res = rowingphysics.ergtopower(ergsplitmin,ergsplitsec,ratio,r,erg)
totalpower = res[0]
ergpower = res[1]
res = rowingphysics.constantwatt(totalpower,r,rg)
otwsplit = 500./res[1]
otwsplit50kg.append(otwsplit)
print('70kg')
r.mc = 70.0
otwsplit70kg = []
for ergscore in expdata_raw['erg score']:
print(ergscore)
mins, secs = splitvalues(ergscore)
ergsplit = (60.0*mins+secs)/4.
r.tempo = tempofromergsplit(ergsplit)
ergsplitmin = int(ergsplit) / 60
ergsplitsec = ergsplit-60*ergsplitmin
res = rowingphysics.ergtopower(ergsplitmin,ergsplitsec,ratio,r,erg)
totalpower = res[0]
ergpower = res[1]
res = rowingphysics.constantwatt(totalpower,r,rg)
otwsplit = 500./res[1]
otwsplit70kg.append(otwsplit)
print('90kg')
r.mc = 90.0
otwsplit90kg = []
for ergscore in expdata_raw['erg score']:
print(ergscore)
mins, secs = splitvalues(ergscore)
ergsplit = (60.0*mins+secs)/4.
r.tempo = tempofromergsplit(ergsplit)
ergsplitmin = int(ergsplit) / 60
ergsplitsec = ergsplit-60*ergsplitmin
res = rowingphysics.ergtopower(ergsplitmin,ergsplitsec,ratio,r,erg)
totalpower = res[0]
ergpower = res[1]
res = rowingphysics.constantwatt(totalpower,r,rg)
otwsplit = 500./res[1]
otwsplit90kg.append(otwsplit)
print('110kg')
r.mc = 110.0
otwsplit110kg = []
for ergscore in expdata_raw['erg score']:
print(ergscore)
mins, secs = splitvalues(ergscore)
ergsplit = (60.0*mins+secs)/4.
r.tempo = tempofromergsplit(ergsplit)
ergsplitmin = int(ergsplit) / 60
ergsplitsec = ergsplit-60*ergsplitmin
res = rowingphysics.ergtopower(ergsplitmin,ergsplitsec,ratio,r,erg)
totalpower = res[0]
ergpower = res[1]
res = rowingphysics.constantwatt(totalpower,r,rg)
otwsplit = 500./res[1]
otwsplit110kg.append(otwsplit)
# making the plot
fig = plt.figure(figsize=(12,10))
ax = fig.add_subplot(1,1,1)
ax.plot(ergtime,fifty,color='k')
ax.plot(ergtime,otwsplit50kg,color='k')
# ax.plot(ergtime,sixty,color='r')
ax.plot(ergtime,seventy,color='g')
ax.plot(ergtime,otwsplit70kg,color='g')
# ax.plot(ergtime,eighty,color='b')
ax.plot(ergtime,ninety,color='c')
ax.plot(ergtime,otwsplit90kg,color='c')
# ax.plot(ergtime,hundred,color='m')
ax.plot(ergtime,hundredten,color='y')
ax.plot(ergtime,otwsplit110kg,color='y')
ax.axis([125,75,150,75])
ax.set_xticks(list(range(85,125,10)))
ax.set_xlabel('Erg split')
ax.set_ylabel('OTW split')
ax.set_title(filename)
ax.set_yticks(list(range(85,150,10)))
timeTickFormatter = NullFormatter()
majorTimeFormatter = FuncFormatter(format_time_tick)
majorLocator = (15*60)
ax.xaxis.set_major_formatter(majorTimeFormatter)
majorFormatter = FuncFormatter(format_pace_tick)
majorLocator = (5)
ax.xaxis.set_major_formatter(majorFormatter)
ax.yaxis.set_major_formatter(majorFormatter)
ax.legend(['50kg','50kg OTW','70kg','70kg OTW',
'90kg','90kg OTW',
'110kg','110kg OTW'],
prop={'size':10},loc=0)
grid(True)
fig.show()
def plotinversedata(filename,r,rg,erg):
expdata_raw = pd.read_csv(filename)
ergtime = rawtoseconds(expdata_raw['erg score'])/4.
fifty = rawtoseconds(expdata_raw['50kg'])/4.
sixty = rawtoseconds(expdata_raw['60kg'])/4.
seventy = rawtoseconds(expdata_raw['70kg'])/4.
eighty = rawtoseconds(expdata_raw['80kg'])/4.
ninety = rawtoseconds(expdata_raw['90kg'])/4.
hundred = rawtoseconds(expdata_raw['100kg'])/4.
hundredten = rawtoseconds(expdata_raw['110kg'])/4.
ratio = 0.5
print('50kg')
r.mc = 50.0
r.tempo = 30.
ergsplit50kg = []
for otwscore in expdata_raw['50kg']:
print(otwscore)
mins, secs = splitvalues(otwscore)
otwsplit = (60.0*mins+secs)/4.
r.tempo = tempofromotwsplit(otwsplit,expdata_raw['50kg'],50)
otwsplitmin = int(otwsplit) / 60
otwsplitsec = otwsplit-60*otwsplitmin
velo = 500./float(otwsplit)
res = rowingphysics.constantvelofast(velo,r,rg,Fmax=600,windv=0)
force = res[0]
power = res[3]
ratio = res[2]
try:
reserg = rowingphysics.constantwatt_erg(power,r,erg,theconst=1.0,
aantal=20,aantal2=20,
ratiomin=0.4,ratiomax=0.6)
except:
ergpower = 50.
reserg = [0,1,2,3,50]
ergpower = reserg[4]
ergvelo = (ergpower/2.8)**(1./3.)
ergpace = 500./ergvelo
print((format_pace(otwsplit),format_pace(ergpace)))
ergsplit50kg.append(ergpace)
print('70kg')
r.mc = 70.0
r.tempo = 30.
ergsplit70kg = []
for otwscore in expdata_raw['70kg']:
print(otwscore)
mins, secs = splitvalues(otwscore)
otwsplit = (60.0*mins+secs)/4.
r.tempo = tempofromotwsplit(otwsplit,expdata_raw['70kg'],70)
otwsplitmin = int(otwsplit) / 60
otwsplitsec = otwsplit-60*otwsplitmin
velo = 500./float(otwsplit)
res = rowingphysics.constantvelofast(velo,r,rg,Fmax=600,windv=0)
force = res[0]
power = res[3]
ratio = res[2]
try:
reserg = rowingphysics.constantwatt_erg(power,r,erg,theconst=1.0,
aantal=20,aantal2=20,
ratiomin=0.4,ratiomax=0.6)
except:
ergpower = 50.
ergpower = reserg[4]
ergvelo = (ergpower/2.8)**(1./3.)
ergpace = 500./ergvelo
print((format_pace(otwsplit),format_pace(ergpace)))
ergsplit70kg.append(ergpace)
print('90kg')
r.mc = 90.0
r.tempo = 30.
ergsplit90kg = []
for otwscore in expdata_raw['90kg']:
print(otwscore)
mins, secs = splitvalues(otwscore)
otwsplit = (60.0*mins+secs)/4.
r.tempo = tempofromotwsplit(otwsplit,expdata_raw['90kg'],90)
otwsplitmin = int(otwsplit) / 60
otwsplitsec = otwsplit-60*otwsplitmin
velo = 500./float(otwsplit)
res = rowingphysics.constantvelofast(velo,r,rg,Fmax=600,windv=0)
force = res[0]
power = res[3]
ratio = res[2]
try:
reserg = rowingphysics.constantwatt_erg(power,r,erg,theconst=1.0,
aantal=20,aantal2=20,
ratiomin=0.4,ratiomax=0.6)
except:
ergpower = 50.
ergpower = reserg[4]
ergvelo = (ergpower/2.8)**(1./3.)
ergpace = 500./ergvelo
print((format_pace(otwsplit),format_pace(ergpace)))
ergsplit90kg.append(ergpace)
print('110kg')
r.mc = 110.0
r.tempo = 30.
ergsplit110kg = []
for otwscore in expdata_raw['110kg']:
print(otwscore)
mins, secs = splitvalues(otwscore)
otwsplit = (60.0*mins+secs)/4.
r.tempo = tempofromotwsplit(otwsplit,expdata_raw['110kg'],110)
otwsplitmin = int(otwsplit) / 60
otwsplitsec = otwsplit-60*otwsplitmin
velo = 500./float(otwsplit)
res = rowingphysics.constantvelofast(velo,r,rg,Fmax=600,windv=0)
force = res[0]
power = res[3]
ratio = res[2]
try:
reserg = rowingphysics.constantwatt_erg(power,r,erg,theconst=1.0,
aantal=20,aantal2=20,
ratiomin=0.4,ratiomax=0.6)
except:
ergpower = 50.
ergpower = reserg[4]
ergvelo = (ergpower/2.8)**(1./3.)
ergpace = 500./ergvelo
print((format_pace(otwsplit),format_pace(ergpace)))
ergsplit110kg.append(ergpace)
# making the plot
fig = plt.figure(figsize=(12,10))
ax = fig.add_subplot(1,1,1)
ax.plot(ergtime,fifty,color='k')
ax.scatter(ergsplit50kg,fifty,color='k')
ax.plot(ergtime,seventy,color='r')
ax.scatter(ergsplit70kg,seventy,color='r')
ax.plot(ergtime,ninety,color='g')
ax.scatter(ergsplit90kg,ninety,color='g')
ax.plot(ergtime,hundredten,color='b')
ax.scatter(ergsplit110kg,hundredten,color='b')
ax.axis([155,75,200,80])
ax.set_xticks(list(range(80,155,10)))
ax.set_xlabel('Erg split')
ax.set_ylabel('OTW split')
ax.set_title(filename)
ax.set_yticks(list(range(85,195,10)))
timeTickFormatter = NullFormatter()
majorTimeFormatter = FuncFormatter(format_time_tick)
majorLocator = (15*60)
ax.xaxis.set_major_formatter(majorTimeFormatter)
majorFormatter = FuncFormatter(format_pace_tick)
majorLocator = (5)
ax.xaxis.set_major_formatter(majorFormatter)
ax.yaxis.set_major_formatter(majorFormatter)
ax.legend(['50kg kleshnev','70kg kleshnev','90kg kleshnev','110kg kleshnev',
'50kg me','70kg me','90kg me','110kg me'],
prop={'size':10},loc=0)
grid(True)
fig.show()
def plotinversedata2(filename,r,rg,erg):
expdata_raw = pd.read_csv(filename)
ergtime = rawtoseconds(expdata_raw['erg score'])/4.
fifty = rawtoseconds(expdata_raw['50kg'])/4.
sixty = rawtoseconds(expdata_raw['60kg'])/4.
seventy = rawtoseconds(expdata_raw['70kg'])/4.
eighty = rawtoseconds(expdata_raw['80kg'])/4.
ninety = rawtoseconds(expdata_raw['90kg'])/4.
hundred = rawtoseconds(expdata_raw['100kg'])/4.
hundredten = rawtoseconds(expdata_raw['110kg'])/4.
ratio = 0.5
print('50kg')
r.mc = 50.0
r.tempo = 30.
ergsplit50kg = []
for otwscore in expdata_raw['50kg']:
print(otwscore)
mins, secs = splitvalues(otwscore)
otwsplit = (60.0*mins+secs)/4.
r.tempo = tempofromotwsplit(otwsplit,expdata_raw['50kg'],50)
otwsplitmin = int(otwsplit) / 60
otwsplitsec = otwsplit-60*otwsplitmin
velo = 500./float(otwsplit)
res = rowingphysics.constantvelofast(velo,r,rg,Fmax=600,windv=0)
force = res[0]
power = res[3]
ratio = res[2]
try:
ergpower = rowingphysics.find_ergpower(power,r,erg,aantal=5,aantal2=5)
print((power,ergpower))
except:
ergpower = 50.
ergvelo = (ergpower/2.8)**(1./3.)
ergpace = 500./ergvelo
print((format_pace(otwsplit),format_pace(ergpace)))
ergsplit50kg.append(ergpace)
print('70kg')
r.mc = 70.0
r.tempo = 30.
ergsplit70kg = []
for otwscore in expdata_raw['70kg']:
print(otwscore)
mins, secs = splitvalues(otwscore)
otwsplit = (60.0*mins+secs)/4.
r.tempo = tempofromotwsplit(otwsplit,expdata_raw['70kg'],70)
otwsplitmin = int(otwsplit) / 60
otwsplitsec = otwsplit-60*otwsplitmin
velo = 500./float(otwsplit)
res = rowingphysics.constantvelofast(velo,r,rg,Fmax=600,windv=0)
force = res[0]
power = res[3]
ratio = res[2]
try:
ergpower = rowingphysics.find_ergpower(power,r,erg,aantal=5,aantal2=5)
print((power,ergpower))
except:
ergpower = 50.
ergvelo = (ergpower/2.8)**(1./3.)
ergpace = 500./ergvelo
print((format_pace(otwsplit),format_pace(ergpace)))
ergsplit70kg.append(ergpace)
print('90kg')
r.mc = 90.0
r.tempo = 30.
ergsplit90kg = []
for otwscore in expdata_raw['90kg']:
print(otwscore)
mins, secs = splitvalues(otwscore)
otwsplit = (60.0*mins+secs)/4.
r.tempo = tempofromotwsplit(otwsplit,expdata_raw['90kg'],90)
otwsplitmin = int(otwsplit) / 60
otwsplitsec = otwsplit-60*otwsplitmin
velo = 500./float(otwsplit)
res = rowingphysics.constantvelofast(velo,r,rg,Fmax=600,windv=0)
force = res[0]
power = res[3]
ratio = res[2]
try:
ergpower = rowingphysics.find_ergpower(power,r,erg,aantal=5,aantal2=5)
except:
ergpower = 50.
ergvelo = (ergpower/2.8)**(1./3.)
ergpace = 500./ergvelo
print((format_pace(otwsplit),format_pace(ergpace)))
ergsplit90kg.append(ergpace)
print('110kg')
r.mc = 110.0
r.tempo = 30.
ergsplit110kg = []
for otwscore in expdata_raw['110kg']:
print(otwscore)
mins, secs = splitvalues(otwscore)
otwsplit = (60.0*mins+secs)/4.
r.tempo = tempofromotwsplit(otwsplit,expdata_raw['110kg'],110)
otwsplitmin = int(otwsplit) / 60
otwsplitsec = otwsplit-60*otwsplitmin
velo = 500./float(otwsplit)
res = rowingphysics.constantvelofast(velo,r,rg,Fmax=600,windv=0)
force = res[0]
power = res[3]
ratio = res[2]
try:
ergpower = rowingphysics.find_ergpower(power,r,erg,aantal=5,aantal2=5)
except:
ergpower = 50.
ergvelo = (ergpower/2.8)**(1./3.)
ergpace = 500./ergvelo
print((format_pace(otwsplit),format_pace(ergpace)))
ergsplit110kg.append(ergpace)
# making the plot
fig = plt.figure(figsize=(12,10))
ax = fig.add_subplot(1,1,1)
ax.plot(ergtime,fifty,color='k')
ax.plot(ergsplit50kg,fifty,color='k')
ax.plot(ergtime,seventy,color='r')
ax.plot(ergsplit70kg,seventy,color='r')
ax.plot(ergtime,ninety,color='g')
ax.plot(ergsplit90kg,ninety,color='g')
ax.plot(ergtime,hundredten,color='b')
ax.plot(ergsplit110kg,hundredten,color='b')
ax.axis([155,75,200,85])
ax.set_xticks(list(range(85,155,10)))
ax.set_xlabel('Erg split')
ax.set_ylabel('OTW split')
ax.set_title(filename)
ax.set_yticks(list(range(85,200,10)))
timeTickFormatter = NullFormatter()
majorTimeFormatter = FuncFormatter(format_time_tick)
majorLocator = (15*60)
ax.xaxis.set_major_formatter(majorTimeFormatter)
majorFormatter = FuncFormatter(format_pace_tick)
majorLocator = (5)
ax.xaxis.set_major_formatter(majorFormatter)
ax.yaxis.set_major_formatter(majorFormatter)
ax.legend(['50kg','50kg OTW',
'70kg','70kg OTW',
'90kg','90kg OTW',
'110kg','110kg OTW' ],
prop={'size':10},loc=0)
grid(True)
fig.show()
def test_one(r,rg,pace):
min = pace[0]
sec = pace[1]
p = 60*min+sec
velo = 500./p
res = rowingphysics.constantvelofast(velo,r,rg,Fmax=600,windv=0)
force = res[0]
power = res[3]
ratio = res[2]
print(power)
res2 = rowingphysics.constantwatt(power,r,rg)
otwsplit = 500./res2[1]
return otwsplit,otwsplit/p
def test_two(r,e,pace):
ergsplitmin = pace[0]
sec = pace[1]
p = 60*ergsplitmin+sec
velo = 500./p
ratio = 0.5
res = rowingphysics.ergtopower(ergsplitmin,sec,ratio,r,e)
totalpower = res[0]
ergpower = res[1]
print((totalpower,ergpower))
reserg = rowingphysics.constantwatt_erg(totalpower,r,e,theconst=1.0,
ratiomin=0.4,ratiomax=0.6,aantal=30,aantal2=30)
powerergdisplay = reserg[4]
v = (powerergdisplay/2.8)**(1./3.)
ergsplit = 500./v
return ergsplit, ergsplit/p
def test_three(r,rg,e,pace):
min = pace[0]
sec = pace[1]
p = 60*min+sec
print((format_pace(p)))
velo = 500./p
res = rowingphysics.constantvelofast(velo,r,rg,Fmax=600,windv=0)
force = res[0]
power = res[3]
ratio = res[2]
print((power,ratio))
reserg = rowingphysics.constantwatt_erg(power,r,e,theconst=1.0,ratiomin=0.4,ratiomax=0.6)
powerergdisplay = reserg[4]
print(powerergdisplay)
v = (powerergdisplay/2.8)**(1./3.)
ergpace = 500./v
print((format_pace(ergpace)))
ergsplitmin = ergpace / 60
sec = ergpace - 60.*ergsplitmin
res = rowingphysics.ergtopower(ergsplitmin,sec,ratio,r,e)
totalpower = res[0]
ergpower = res[1]
print((totalpower, ergpower))
res2 = rowingphysics.constantwatt(totalpower,r,rg)
otwsplit = 500./res2[1]
print((format_pace(otwsplit)))
def test_four(r,rg,e,pace,ratio=0.5):
min = pace[0]
sec = pace[1]
p = 60*min+sec
print((format_pace(p)))
velo = 500./p
res = rowingphysics.ergtopower(min,sec,ratio,r,e)
totalpower = res[0]
ergpower = res[1]
print((totalpower, ergpower))
res2 = rowingphysics.constantwatt(totalpower,r,rg)
otwsplit = 500./res2[1]
velo = res2[1]
print((format_pace(otwsplit)))
res = rowingphysics.constantvelofast(velo,r,rg,Fmax=600,windv=0)
force = res[0]
power = res[3]
ratio = res[2]
print((power,ratio))
reserg = rowingphysics.constantwatt_erg(power,r,e,theconst=1.0,ratiomin=0.4,ratiomax=0.6)
powerergdisplay = reserg[4]
print(powerergdisplay)
v = (powerergdisplay/2.8)**(1./3.)
ergpace = 500./v
print((format_pace(ergpace)))
def test_five(r,rg,e,ergpace,otwpace,ratio=0.5):
ergmin = ergpace[0]
ergsec = ergpace[1]
ergp = 60*ergmin+ergsec
otwmin = otwpace[0]
otwsec = otwpace[1]
otwp = 60*otwmin+otwsec
tempos = np.array([20.,21.,22.,23.,24.,25.,26.,27.,28.,29.,30.,31.,32.,33.,34.,35.])
ergpace2 = 0.0*tempos
otwpace2 = 0.0*tempos
print((ergp,otwp))
for i in range(len(tempos)):
r.tempo = tempos[i]
try:
res = rowingphysics.ergtopower(ergmin,ergsec,ratio,r,e)
except:
res = [0,0]
totalpower = res[0]
ergpower = res[1]
print((tempos[i],ergpower,totalpower))
try:
res2 = rowingphysics.constantwatt(totalpower,r,rg)
except:
res2 = [0,0]
print((res2[3]))
otwpace2[i] = 500./res2[1]
try:
res3 = rowingphysics.constantvelofast(500./otwp,r,rg,Fmax=600,windv=0)
except:
res3 = [0,0,0,0,0]
force = res3[0]
power = res3[3]
ratio = res3[2]
try:
reserg = rowingphysics.constantwatt_erg(power,r,e,theconst=1.0,timestep=0.01,ratiomin=0.4,ratiomax=0.6)
# reserg = rowingphysics.constantwatt_erg(power,r,e,theconst=1.0,aantal=20,aantal2=20,ratiomin=0.2,ratiomax=0.8)
except:
reserg = [0,0,0,0,0]
powerergdisplay = reserg[4]
v = (powerergdisplay/2.8)**(1./3.)
if v==0:
v=1.0
print((power, ratio, 500./v))
ergpace2[i] = 500./v
# making the plot
fig = plt.figure(figsize=(12,10))
ax = fig.add_subplot(1,1,1)
ax.plot(tempos,otwpace2,color='k')
ax.plot(tempos,ergpace2,color='r')
ax.plot(tempos,ergp+0.0*ergpace2,color='r')
ax.plot(tempos,otwp+0.0*ergpace2,color='k')
ax.axis([18,40,150,85])
ax.set_xlabel('SPM')
ax.set_ylabel('split')
ax.set_yticks(list(range(85,150,10)))
majorFormatter = FuncFormatter(format_pace_tick)
majorLocator = (5)
ax.yaxis.set_major_formatter(majorFormatter)
ax.legen='OTW'
grid(True)
fig.show()
def test_six(r,rg,e,ergpace,otwpace,ratio=0.5):
ergmin = ergpace[0]
ergsec = ergpace[1]
ergp = 60*ergmin+ergsec
otwmin = otwpace[0]
otwsec = otwpace[1]
otwp = 60*otwmin+otwsec
tempos = np.array([20.,21.,22.,23.,24.,25.,26.,27.,28.,29.,30.,31.,32.,33.,34.,35.])
tempoerg = 0.0*tempos
ergpace2 = 0.0*tempos
otwpace2 = 0.0*tempos
print((ergp,otwp))
for i in range(len(tempos)):
r.tempo = tempos[i]
try:
res = rowingphysics.ergtopower(ergmin,ergsec,ratio,r,e)
except:
res = [0,0]
totalpower = res[0]
ergpower = res[1]
print((tempos[i],ergpower,totalpower))
try:
res2 = rowingphysics.constantwatt(totalpower,r,rg)
except:
res2 = [0,0]
print((res2[3]))
otwpace2[i] = 500./res2[1]
try:
res3 = rowingphysics.constantvelofast(500./otwp,r,rg,Fmax=600,windv=0)
except:
res3 = [0,0,0,0,0]
force = res3[0]
power = res3[3]
ratio = res3[2]
try:
reserg = rowingphysics.constantwatt_ergtempo(power,r,e,theconst=1.0,timestep=0.01,ratio=ratio)
except:
reserg = [0,0,0,0,0]
powerergdisplay = reserg[4]
tempoerg[i] = reserg[0]
v = (powerergdisplay/2.8)**(1./3.)
if v==0:
v=1.0
print((power, ratio, 500./v))
ergpace2[i] = 500./v
# making the plot
fig = plt.figure(figsize=(12,10))
ax = fig.add_subplot(1,1,1)
ax.plot(tempos,otwpace2,color='k')
ax.plot(tempoerg,ergpace2,color='r')
ax.plot(tempos,ergp+0.0*ergpace2,color='r')
ax.plot(tempos,otwp+0.0*ergpace2,color='k')
ax.axis([18,40,150,80])
ax.set_xlabel('SPM')
ax.set_ylabel('split')
ax.set_yticks(list(range(80,150,10)))
majorFormatter = FuncFormatter(format_pace_tick)
majorLocator = (5)
ax.yaxis.set_major_formatter(majorFormatter)
ax.legen='OTW'
grid(True)
fig.show()
|
{"hexsha": "7a5f2887d73cfa2998f75be9c4e97bb471d543d0", "size": 21970, "ext": "py", "lang": "Python", "max_stars_repo_path": "rowingphysics/expdata.py", "max_stars_repo_name": "sanderroosendaal/rowingphysics", "max_stars_repo_head_hexsha": "9b809c80708e96d13761861218ff7744829cc4c9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-04-24T15:20:40.000Z", "max_stars_repo_stars_event_max_datetime": "2017-04-24T15:20:40.000Z", "max_issues_repo_path": "rowingphysics/expdata.py", "max_issues_repo_name": "sanderroosendaal/rowingphysics", "max_issues_repo_head_hexsha": "9b809c80708e96d13761861218ff7744829cc4c9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2016-11-03T21:38:34.000Z", "max_issues_repo_issues_event_max_datetime": "2016-11-29T09:50:57.000Z", "max_forks_repo_path": "rowingphysics/expdata.py", "max_forks_repo_name": "sanderroosendaal/rowingphysics", "max_forks_repo_head_hexsha": "9b809c80708e96d13761861218ff7744829cc4c9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.2733050847, "max_line_length": 116, "alphanum_fraction": 0.6605826127, "include": true, "reason": "import numpy", "num_tokens": 7741}
|
MODULE btree
!
! Purpose:
! To define the derived data type used as a node in the
! binary tree, and to define the operations >, <. and ==
! for this data type. This module also contains the
! subroutines to add a node to the tree, write out the
! values in the tree, and find a value in the tree.
!
! Record of revisions:
! Date Programmer Description of change
! ==== ========== =====================
! 12/24/06 S. J. Chapman Original code
!
IMPLICIT NONE
! Restrict access to module contents.
PRIVATE
PUBLIC :: node, OPERATOR(>), OPERATOR(<), OPERATOR(==)
PUBLIC :: add_node, write_node, find_node
! Declare type for a node of the binary tree.
TYPE :: node
CHARACTER(len=10) :: last
CHARACTER(len=10) :: first
CHARACTER :: mi
CHARACTER(len=16) :: phone
TYPE (node), POINTER :: before
TYPE (node), POINTER :: after
END TYPE
INTERFACE OPERATOR (>)
MODULE PROCEDURE greater_than
END INTERFACE
INTERFACE OPERATOR (<)
MODULE PROCEDURE less_than
END INTERFACE
INTERFACE OPERATOR (==)
MODULE PROCEDURE equal_to
END INTERFACE
CONTAINS
RECURSIVE SUBROUTINE add_node (ptr, new_node)
!
! Purpose:
! To add a new node to the binary tree structure.
!
TYPE (node), POINTER :: ptr ! Pointer to current pos. in tree
TYPE (node), POINTER :: new_node ! Pointer to new node
IF ( .NOT. ASSOCIATED(ptr) ) THEN
! There is no tree yet. Add the node right here.
ptr => new_node
ELSE IF ( new_node < ptr ) THEN
IF ( ASSOCIATED(ptr%before) ) THEN
CALL add_node ( ptr%before, new_node )
ELSE
ptr%before => new_node
END IF
ELSE
IF ( ASSOCIATED(ptr%after) ) THEN
CALL add_node ( ptr%after, new_node )
ELSE
ptr%after => new_node
END IF
END IF
END SUBROUTINE add_node
RECURSIVE SUBROUTINE write_node (ptr)
!
! Purpose:
! To write out the contents of the binary tree
! structure in order.
!
TYPE (node), POINTER :: ptr ! Pointer to current pos. in tree
! Write contents of previous node.
IF ( ASSOCIATED(ptr%before) ) THEN
CALL write_node ( ptr%before )
END IF
! Write contents of current node.
WRITE (*,"(1X,A,', ',A,1X,A)") ptr%last, ptr%first, ptr%mi
! Write contents of next node.
IF ( ASSOCIATED(ptr%after) ) THEN
CALL write_node ( ptr%after )
END IF
END SUBROUTINE write_node
RECURSIVE SUBROUTINE find_node (ptr, search, error)
!
! Purpose:
! To find a particular node in the binary tree structure.
! "Search" is a pointer to the name to find, and will
! also contain the results when the subroutine finishes
! if the node is found.
!
TYPE (node), POINTER :: ptr ! Pointer to curr pos. in tree
TYPE (node), POINTER :: search ! Pointer to value to find.
INTEGER :: error ! Error: 0 = ok, 1 = not found
IF ( search < ptr ) THEN
IF ( ASSOCIATED(ptr%before) ) THEN
CALL find_node (ptr%before, search, error)
ELSE
error = 1
END IF
ELSE IF ( search == ptr ) THEN
search = ptr
error = 0
ELSE
IF ( ASSOCIATED(ptr%after) ) THEN
CALL find_node (ptr%after, search, error)
ELSE
error = 1
END IF
END IF
END SUBROUTINE find_node
LOGICAL FUNCTION greater_than (op1, op2)
!
! Purpose:
! To test to see if operand 1 is > operand 2
! in alphabetical order.
!
TYPE (node), INTENT(IN) :: op1, op2
CHARACTER(len=10) :: last1, last2, first1, first2
CHARACTER :: mi1, mi2
CALL ushift (op1, last1, first1, mi1 )
CALL ushift (op2, last2, first2, mi2 )
IF (last1 > last2) THEN
greater_than = .TRUE.
ELSE IF (last1 < last2) THEN
greater_than = .FALSE.
ELSE ! Last names match
IF (first1 > first2) THEN
greater_than = .TRUE.
ELSE IF (first1 < first2) THEN
greater_than = .FALSE.
ELSE ! First names match
IF (mi1 > mi2) THEN
greater_than = .TRUE.
ELSE
greater_than = .FALSE.
END IF
END IF
END IF
END FUNCTION greater_than
LOGICAL FUNCTION less_than (op1, op2)
!
! Purpose:
! To test to see if operand 1 is < operand 2
! in alphabetical order.
!
TYPE (node), INTENT(IN) :: op1, op2
CHARACTER(len=10) :: last1, last2, first1, first2
CHARACTER :: mi1, mi2
CALL ushift (op1, last1, first1, mi1 )
CALL ushift (op2, last2, first2, mi2 )
IF (last1 < last2) THEN
less_than = .TRUE.
ELSE IF (last1 > last2) THEN
less_than = .FALSE.
ELSE ! Last names match
IF (first1 < first2) THEN
less_than = .TRUE.
ELSE IF (first1 > first2) THEN
less_than = .FALSE.
ELSE ! First names match
IF (mi1 < mi2) THEN
less_than = .TRUE.
ELSE
less_than = .FALSE.
END IF
END IF
END IF
END FUNCTION less_than
LOGICAL FUNCTION equal_to (op1, op2)
!
! Purpose:
! To test to see if operand 1 is equal to operand 2
! alphabetically.
!
TYPE (node), INTENT(IN) :: op1, op2
CHARACTER(len=10) :: last1, last2, first1, first2
CHARACTER :: mi1, mi2
CALL ushift (op1, last1, first1, mi1 )
CALL ushift (op2, last2, first2, mi2 )
IF ( (last1 == last2) .AND. (first1 == first2) .AND. &
(mi1 == mi2 ) ) THEN
equal_to = .TRUE.
ELSE
equal_to = .FALSE.
END IF
END FUNCTION equal_to
SUBROUTINE ushift( op, last, first, mi )
!
! Purpose:
! To create upshifted versions of all strings for
! comparison.
!
TYPE (node), INTENT(IN) :: op
CHARACTER(len=10), INTENT(INOUT) :: last, first
CHARACTER, INTENT(INOUT) :: mi
last = op%last
first = op%first
mi = op%mi
CALL ucase (last)
CALL ucase (first)
CALL ucase (mi)
END SUBROUTINE ushift
SUBROUTINE ucase ( string )
!
! Purpose:
! To shift a character string to upper case on any processor,
! regardless of collating sequence.
!
! Record of revisions:
! Date Programmer Description of change
! ==== ========== =====================
! 11/25/06 S. J. Chapman Original code
!
IMPLICIT NONE
! Declare calling parameters:
CHARACTER(len=*), INTENT(INOUT) :: string
! Declare local variables:
INTEGER :: i ! Loop index
INTEGER :: length ! Length of input string
! Get length of string
length = LEN ( string )
! Now shift lower case letters to upper case.
DO i = 1, length
IF ( LGE(string(i:i),'a') .AND. LLE(string(i:i),'z') ) THEN
string(i:i) = ACHAR ( IACHAR ( string(i:i) ) - 32 )
END IF
END DO
END SUBROUTINE ucase
END MODULE btree
PROGRAM binary_tree
!
! Purpose:
! To read in a series of random names and phone numbers
! and store them in a binary tree. After the values are
! stored, they are written out in sorted order. Then the
! user is prompted for a name to retrieve, and the program
! recovers the data associated with that name.
!
! Record of revisions:
! Date Programmer Description of change
! ==== ========== =====================
! 12/24/06 S. J. Chapman Original code
!
USE btree
IMPLICIT NONE
! Data dictionary: declare variable types & definitions
INTEGER :: error ! Error flag: 0=success
CHARACTER(len=20) :: filename ! Input data file name
INTEGER :: istat ! Status: 0 for success
TYPE (node), POINTER :: root ! Pointer to root node
TYPE (node), POINTER :: temp ! Temp pointer to node
! Nullify new pointers
NULLIFY ( root, temp )
! Get the name of the file containing the input data.
WRITE (*,*) 'Enter the file name with the input data: '
READ (*,'(A20)') filename
! Open input data file. Status is OLD because the input data must
! already exist.
OPEN ( UNIT=9, FILE=filename, STATUS='OLD', ACTION='READ', &
IOSTAT=istat )
! Was the OPEN successful?
fileopen: IF ( istat == 0 ) THEN ! Open successful
! The file was opened successfully, allocate space for each
! node, read the data into that node, and insert it into the
! binary tree.
input: DO
ALLOCATE (temp,STAT=istat) ! Allocate node
NULLIFY ( temp%before, temp%after) ! Nullify pointers
READ (9, 100, IOSTAT=istat) temp%last, temp%first, &
temp%mi, temp%phone ! Read data
100 FORMAT (A10,1X,A10,1X,A1,1X,A16)
IF ( istat /= 0 ) EXIT input ! Exit on end of data
CALL add_node(root, temp) ! Add to binary tree
END DO input
! Now, write out the sorted data.
WRITE (*,'(/,1X,A)') 'The sorted data list is: '
CALL write_node(root)
! Prompt for a name to search for in the tree.
WRITE (*,'(/,1X,A)') 'Enter name to recover from tree:'
WRITE (*,'(1X,A)',ADVANCE='NO') 'Last Name: '
READ (*,'(A)') temp%last
WRITE (*,'(1X,A)',ADVANCE='NO') 'First Name: '
READ (*,'(A)') temp%first
WRITE (*,'(1X,A)',ADVANCE='NO') 'Middle Initial: '
READ (*,'(A)') temp%mi
! Locate record
CALL find_node ( root, temp, error )
check: IF ( error == 0 ) THEN
WRITE (*,'(/,1X,A)') 'The record is:'
WRITE (*,'(1X,7A)') temp%last, ', ', temp%first, ' ', &
temp%mi, ' ', temp%phone
ELSE
WRITE (*,'(/,1X,A)') 'Specified node not found!'
END IF check
ELSE fileopen
! Else file open failed. Tell user.
WRITE (*,'(1X,A,I6)') 'File open failed--status = ', istat
END IF fileopen
END PROGRAM binary_tree
|
{"hexsha": "86352aefb2d5b96869fad60bdcaf881a8ac050be", "size": 10196, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "Fortran952003ForScientistsandEngineers3rdStephenJChapman/chap15/btree.f90", "max_stars_repo_name": "yangyang14641/FortranLearning", "max_stars_repo_head_hexsha": "3d4a91aacd957361aff5873054edf35c586e8a55", "max_stars_repo_licenses": ["AFL-3.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-03-12T02:18:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-05T07:58:56.000Z", "max_issues_repo_path": "Fortran952003ForScientistsandEngineers3rdStephenJChapman/chap15/btree.f90", "max_issues_repo_name": "yangyang14641/FortranLearning", "max_issues_repo_head_hexsha": "3d4a91aacd957361aff5873054edf35c586e8a55", "max_issues_repo_licenses": ["AFL-3.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Fortran952003ForScientistsandEngineers3rdStephenJChapman/chap15/btree.f90", "max_forks_repo_name": "yangyang14641/FortranLearning", "max_forks_repo_head_hexsha": "3d4a91aacd957361aff5873054edf35c586e8a55", "max_forks_repo_licenses": ["AFL-3.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-05-11T02:36:25.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-05T06:36:55.000Z", "avg_line_length": 29.4682080925, "max_line_length": 68, "alphanum_fraction": 0.566006277, "num_tokens": 2872}
|
import numpy as np
import argparse
parser = argparse.ArgumentParser(description="A silly script to convert the angular bin of a light curve in the simulation data to its angle in degrees")
parser.add_argument("--angular-bin", type=int, help="The angular bin")
args = parser.parse_args()
# Averge the upper and lower bounds of the angular bin
theta = np.degrees(0.5 * (np.arccos(1. - (2. * args.angular_bin / 54.)) + np.arccos(1. - (2. * (args.angular_bin - 1) / 54.))))
print(theta)
|
{"hexsha": "5f8f14b60872e46215f39b52a78df94931613d42", "size": 486, "ext": "py", "lang": "Python", "max_stars_repo_path": "bin/convert_angular_bin_to_theta.py", "max_stars_repo_name": "liz-champion/lc_fit", "max_stars_repo_head_hexsha": "f86d28781252783240a33a4b8854e9ecefeab27c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bin/convert_angular_bin_to_theta.py", "max_issues_repo_name": "liz-champion/lc_fit", "max_issues_repo_head_hexsha": "f86d28781252783240a33a4b8854e9ecefeab27c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bin/convert_angular_bin_to_theta.py", "max_forks_repo_name": "liz-champion/lc_fit", "max_forks_repo_head_hexsha": "f86d28781252783240a33a4b8854e9ecefeab27c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.5, "max_line_length": 153, "alphanum_fraction": 0.7181069959, "include": true, "reason": "import numpy", "num_tokens": 132}
|
# This file is auto-generated by AWSMetadata.jl
using AWS
using AWS.AWSServices: iotanalytics
using AWS.Compat
using AWS.UUIDs
"""
BatchPutMessage()
Sends messages to a channel.
# Required Parameters
- `channelName`: The name of the channel where the messages are sent.
- `messages`: The list of messages to be sent. Each message has the format: { \"messageId\": \"string\", \"payload\": \"string\"}. The field names of message payloads (data) that you send to AWS IoT Analytics: Must contain only alphanumeric characters and undescores (_). No other special characters are allowed. Must begin with an alphabetic character or single underscore (_). Cannot contain hyphens (-). In regular expression terms: \"^[A-Za-z_]([A-Za-z0-9]*|[A-Za-z0-9][A-Za-z0-9_]*)\". Cannot be more than 255 characters. Are case insensitive. (Fields named foo and FOO in the same payload are considered duplicates.) For example, {\"temp_01\": 29} or {\"_temp_01\": 29} are valid, but {\"temp-01\": 29}, {\"01_temp\": 29} or {\"__temp_01\": 29} are invalid in message payloads.
"""
batch_put_message(channelName, messages; aws_config::AWSConfig=global_aws_config()) = iotanalytics("POST", "/messages/batch", Dict{String, Any}("channelName"=>channelName, "messages"=>messages); aws_config=aws_config)
batch_put_message(channelName, messages, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = iotanalytics("POST", "/messages/batch", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("channelName"=>channelName, "messages"=>messages), args)); aws_config=aws_config)
"""
CancelPipelineReprocessing()
Cancels the reprocessing of data through the pipeline.
# Required Parameters
- `pipelineName`: The name of pipeline for which data reprocessing is canceled.
- `reprocessingId`: The ID of the reprocessing task (returned by StartPipelineReprocessing).
"""
cancel_pipeline_reprocessing(pipelineName, reprocessingId; aws_config::AWSConfig=global_aws_config()) = iotanalytics("DELETE", "/pipelines/$(pipelineName)/reprocessing/$(reprocessingId)"; aws_config=aws_config)
cancel_pipeline_reprocessing(pipelineName, reprocessingId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = iotanalytics("DELETE", "/pipelines/$(pipelineName)/reprocessing/$(reprocessingId)", args; aws_config=aws_config)
"""
CreateChannel()
Creates a channel. A channel collects data from an MQTT topic and archives the raw, unprocessed messages before publishing the data to a pipeline.
# Required Parameters
- `channelName`: The name of the channel.
# Optional Parameters
- `channelStorage`: Where channel data is stored. You can choose one of serviceManagedS3 or customerManagedS3 storage. If not specified, the default is serviceManagedS3. You cannot change this storage option after the channel is created.
- `retentionPeriod`: How long, in days, message data is kept for the channel. When customerManagedS3 storage is selected, this parameter is ignored.
- `tags`: Metadata which can be used to manage the channel.
"""
create_channel(channelName; aws_config::AWSConfig=global_aws_config()) = iotanalytics("POST", "/channels", Dict{String, Any}("channelName"=>channelName); aws_config=aws_config)
create_channel(channelName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = iotanalytics("POST", "/channels", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("channelName"=>channelName), args)); aws_config=aws_config)
"""
CreateDataset()
Creates a dataset. A dataset stores data retrieved from a data store by applying a queryAction (a SQL query) or a containerAction (executing a containerized application). This operation creates the skeleton of a dataset. The dataset can be populated manually by calling CreateDatasetContent or automatically according to a trigger you specify.
# Required Parameters
- `actions`: A list of actions that create the data set contents.
- `datasetName`: The name of the data set.
# Optional Parameters
- `contentDeliveryRules`: When dataset contents are created, they are delivered to destinations specified here.
- `lateDataRules`: A list of data rules that send notifications to Amazon CloudWatch, when data arrives late. To specify lateDataRules, the dataset must use a DeltaTimer filter.
- `retentionPeriod`: Optional. How long, in days, versions of dataset contents are kept for the dataset. If not specified or set to null, versions of dataset contents are retained for at most 90 days. The number of versions of dataset contents retained is determined by the versioningConfiguration parameter. For more information, see Keeping Multiple Versions of AWS IoT Analytics Data Sets in the AWS IoT Analytics User Guide.
- `tags`: Metadata which can be used to manage the data set.
- `triggers`: A list of triggers. A trigger causes data set contents to be populated at a specified time interval or when another data set's contents are created. The list of triggers can be empty or contain up to five DataSetTrigger objects.
- `versioningConfiguration`: Optional. How many versions of dataset contents are kept. If not specified or set to null, only the latest version plus the latest succeeded version (if they are different) are kept for the time period specified by the retentionPeriod parameter. For more information, see Keeping Multiple Versions of AWS IoT Analytics Data Sets in the AWS IoT Analytics User Guide.
"""
create_dataset(actions, datasetName; aws_config::AWSConfig=global_aws_config()) = iotanalytics("POST", "/datasets", Dict{String, Any}("actions"=>actions, "datasetName"=>datasetName); aws_config=aws_config)
create_dataset(actions, datasetName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = iotanalytics("POST", "/datasets", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("actions"=>actions, "datasetName"=>datasetName), args)); aws_config=aws_config)
"""
CreateDatasetContent()
Creates the content of a data set by applying a queryAction (a SQL query) or a containerAction (executing a containerized application).
# Required Parameters
- `datasetName`: The name of the dataset.
# Optional Parameters
- `versionId`: The version ID of the dataset content. To specify versionId for a dataset content, the dataset must use a DeltaTimer filter.
"""
create_dataset_content(datasetName; aws_config::AWSConfig=global_aws_config()) = iotanalytics("POST", "/datasets/$(datasetName)/content"; aws_config=aws_config)
create_dataset_content(datasetName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = iotanalytics("POST", "/datasets/$(datasetName)/content", args; aws_config=aws_config)
"""
CreateDatastore()
Creates a data store, which is a repository for messages.
# Required Parameters
- `datastoreName`: The name of the data store.
# Optional Parameters
- `datastoreStorage`: Where data store data is stored. You can choose one of serviceManagedS3 or customerManagedS3 storage. If not specified, the default is serviceManagedS3. You cannot change this storage option after the data store is created.
- `retentionPeriod`: How long, in days, message data is kept for the data store. When customerManagedS3 storage is selected, this parameter is ignored.
- `tags`: Metadata which can be used to manage the data store.
"""
create_datastore(datastoreName; aws_config::AWSConfig=global_aws_config()) = iotanalytics("POST", "/datastores", Dict{String, Any}("datastoreName"=>datastoreName); aws_config=aws_config)
create_datastore(datastoreName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = iotanalytics("POST", "/datastores", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("datastoreName"=>datastoreName), args)); aws_config=aws_config)
"""
CreatePipeline()
Creates a pipeline. A pipeline consumes messages from a channel and allows you to process the messages before storing them in a data store. You must specify both a channel and a datastore activity and, optionally, as many as 23 additional activities in the pipelineActivities array.
# Required Parameters
- `pipelineActivities`: A list of PipelineActivity objects. Activities perform transformations on your messages, such as removing, renaming or adding message attributes; filtering messages based on attribute values; invoking your Lambda functions on messages for advanced processing; or performing mathematical transformations to normalize device data. The list can be 2-25 PipelineActivity objects and must contain both a channel and a datastore activity. Each entry in the list must contain only one activity. For example: pipelineActivities = [ { \"channel\": { ... } }, { \"lambda\": { ... } }, ... ]
- `pipelineName`: The name of the pipeline.
# Optional Parameters
- `tags`: Metadata which can be used to manage the pipeline.
"""
create_pipeline(pipelineActivities, pipelineName; aws_config::AWSConfig=global_aws_config()) = iotanalytics("POST", "/pipelines", Dict{String, Any}("pipelineActivities"=>pipelineActivities, "pipelineName"=>pipelineName); aws_config=aws_config)
create_pipeline(pipelineActivities, pipelineName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = iotanalytics("POST", "/pipelines", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("pipelineActivities"=>pipelineActivities, "pipelineName"=>pipelineName), args)); aws_config=aws_config)
"""
DeleteChannel()
Deletes the specified channel.
# Required Parameters
- `channelName`: The name of the channel to delete.
"""
delete_channel(channelName; aws_config::AWSConfig=global_aws_config()) = iotanalytics("DELETE", "/channels/$(channelName)"; aws_config=aws_config)
delete_channel(channelName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = iotanalytics("DELETE", "/channels/$(channelName)", args; aws_config=aws_config)
"""
DeleteDataset()
Deletes the specified dataset. You do not have to delete the content of the dataset before you perform this operation.
# Required Parameters
- `datasetName`: The name of the data set to delete.
"""
delete_dataset(datasetName; aws_config::AWSConfig=global_aws_config()) = iotanalytics("DELETE", "/datasets/$(datasetName)"; aws_config=aws_config)
delete_dataset(datasetName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = iotanalytics("DELETE", "/datasets/$(datasetName)", args; aws_config=aws_config)
"""
DeleteDatasetContent()
Deletes the content of the specified dataset.
# Required Parameters
- `datasetName`: The name of the dataset whose content is deleted.
# Optional Parameters
- `versionId`: The version of the dataset whose content is deleted. You can also use the strings \"LATEST\" or \"LATEST_SUCCEEDED\" to delete the latest or latest successfully completed data set. If not specified, \"LATEST_SUCCEEDED\" is the default.
"""
delete_dataset_content(datasetName; aws_config::AWSConfig=global_aws_config()) = iotanalytics("DELETE", "/datasets/$(datasetName)/content"; aws_config=aws_config)
delete_dataset_content(datasetName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = iotanalytics("DELETE", "/datasets/$(datasetName)/content", args; aws_config=aws_config)
"""
DeleteDatastore()
Deletes the specified data store.
# Required Parameters
- `datastoreName`: The name of the data store to delete.
"""
delete_datastore(datastoreName; aws_config::AWSConfig=global_aws_config()) = iotanalytics("DELETE", "/datastores/$(datastoreName)"; aws_config=aws_config)
delete_datastore(datastoreName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = iotanalytics("DELETE", "/datastores/$(datastoreName)", args; aws_config=aws_config)
"""
DeletePipeline()
Deletes the specified pipeline.
# Required Parameters
- `pipelineName`: The name of the pipeline to delete.
"""
delete_pipeline(pipelineName; aws_config::AWSConfig=global_aws_config()) = iotanalytics("DELETE", "/pipelines/$(pipelineName)"; aws_config=aws_config)
delete_pipeline(pipelineName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = iotanalytics("DELETE", "/pipelines/$(pipelineName)", args; aws_config=aws_config)
"""
DescribeChannel()
Retrieves information about a channel.
# Required Parameters
- `channelName`: The name of the channel whose information is retrieved.
# Optional Parameters
- `includeStatistics`: If true, additional statistical information about the channel is included in the response. This feature cannot be used with a channel whose S3 storage is customer-managed.
"""
describe_channel(channelName; aws_config::AWSConfig=global_aws_config()) = iotanalytics("GET", "/channels/$(channelName)"; aws_config=aws_config)
describe_channel(channelName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = iotanalytics("GET", "/channels/$(channelName)", args; aws_config=aws_config)
"""
DescribeDataset()
Retrieves information about a dataset.
# Required Parameters
- `datasetName`: The name of the data set whose information is retrieved.
"""
describe_dataset(datasetName; aws_config::AWSConfig=global_aws_config()) = iotanalytics("GET", "/datasets/$(datasetName)"; aws_config=aws_config)
describe_dataset(datasetName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = iotanalytics("GET", "/datasets/$(datasetName)", args; aws_config=aws_config)
"""
DescribeDatastore()
Retrieves information about a data store.
# Required Parameters
- `datastoreName`: The name of the data store
# Optional Parameters
- `includeStatistics`: If true, additional statistical information about the data store is included in the response. This feature cannot be used with a data store whose S3 storage is customer-managed.
"""
describe_datastore(datastoreName; aws_config::AWSConfig=global_aws_config()) = iotanalytics("GET", "/datastores/$(datastoreName)"; aws_config=aws_config)
describe_datastore(datastoreName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = iotanalytics("GET", "/datastores/$(datastoreName)", args; aws_config=aws_config)
"""
DescribeLoggingOptions()
Retrieves the current settings of the AWS IoT Analytics logging options.
"""
describe_logging_options(; aws_config::AWSConfig=global_aws_config()) = iotanalytics("GET", "/logging"; aws_config=aws_config)
describe_logging_options(args::AbstractDict{String, Any}; aws_config::AWSConfig=global_aws_config()) = iotanalytics("GET", "/logging", args; aws_config=aws_config)
"""
DescribePipeline()
Retrieves information about a pipeline.
# Required Parameters
- `pipelineName`: The name of the pipeline whose information is retrieved.
"""
describe_pipeline(pipelineName; aws_config::AWSConfig=global_aws_config()) = iotanalytics("GET", "/pipelines/$(pipelineName)"; aws_config=aws_config)
describe_pipeline(pipelineName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = iotanalytics("GET", "/pipelines/$(pipelineName)", args; aws_config=aws_config)
"""
GetDatasetContent()
Retrieves the contents of a data set as presigned URIs.
# Required Parameters
- `datasetName`: The name of the data set whose contents are retrieved.
# Optional Parameters
- `versionId`: The version of the data set whose contents are retrieved. You can also use the strings \"LATEST\" or \"LATEST_SUCCEEDED\" to retrieve the contents of the latest or latest successfully completed data set. If not specified, \"LATEST_SUCCEEDED\" is the default.
"""
get_dataset_content(datasetName; aws_config::AWSConfig=global_aws_config()) = iotanalytics("GET", "/datasets/$(datasetName)/content"; aws_config=aws_config)
get_dataset_content(datasetName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = iotanalytics("GET", "/datasets/$(datasetName)/content", args; aws_config=aws_config)
"""
ListChannels()
Retrieves a list of channels.
# Optional Parameters
- `maxResults`: The maximum number of results to return in this request. The default value is 100.
- `nextToken`: The token for the next set of results.
"""
list_channels(; aws_config::AWSConfig=global_aws_config()) = iotanalytics("GET", "/channels"; aws_config=aws_config)
list_channels(args::AbstractDict{String, Any}; aws_config::AWSConfig=global_aws_config()) = iotanalytics("GET", "/channels", args; aws_config=aws_config)
"""
ListDatasetContents()
Lists information about data set contents that have been created.
# Required Parameters
- `datasetName`: The name of the data set whose contents information you want to list.
# Optional Parameters
- `maxResults`: The maximum number of results to return in this request.
- `nextToken`: The token for the next set of results.
- `scheduledBefore`: A filter to limit results to those data set contents whose creation is scheduled before the given time. See the field triggers.schedule in the CreateDataset request. (timestamp)
- `scheduledOnOrAfter`: A filter to limit results to those data set contents whose creation is scheduled on or after the given time. See the field triggers.schedule in the CreateDataset request. (timestamp)
"""
list_dataset_contents(datasetName; aws_config::AWSConfig=global_aws_config()) = iotanalytics("GET", "/datasets/$(datasetName)/contents"; aws_config=aws_config)
list_dataset_contents(datasetName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = iotanalytics("GET", "/datasets/$(datasetName)/contents", args; aws_config=aws_config)
"""
ListDatasets()
Retrieves information about data sets.
# Optional Parameters
- `maxResults`: The maximum number of results to return in this request. The default value is 100.
- `nextToken`: The token for the next set of results.
"""
list_datasets(; aws_config::AWSConfig=global_aws_config()) = iotanalytics("GET", "/datasets"; aws_config=aws_config)
list_datasets(args::AbstractDict{String, Any}; aws_config::AWSConfig=global_aws_config()) = iotanalytics("GET", "/datasets", args; aws_config=aws_config)
"""
ListDatastores()
Retrieves a list of data stores.
# Optional Parameters
- `maxResults`: The maximum number of results to return in this request. The default value is 100.
- `nextToken`: The token for the next set of results.
"""
list_datastores(; aws_config::AWSConfig=global_aws_config()) = iotanalytics("GET", "/datastores"; aws_config=aws_config)
list_datastores(args::AbstractDict{String, Any}; aws_config::AWSConfig=global_aws_config()) = iotanalytics("GET", "/datastores", args; aws_config=aws_config)
"""
ListPipelines()
Retrieves a list of pipelines.
# Optional Parameters
- `maxResults`: The maximum number of results to return in this request. The default value is 100.
- `nextToken`: The token for the next set of results.
"""
list_pipelines(; aws_config::AWSConfig=global_aws_config()) = iotanalytics("GET", "/pipelines"; aws_config=aws_config)
list_pipelines(args::AbstractDict{String, Any}; aws_config::AWSConfig=global_aws_config()) = iotanalytics("GET", "/pipelines", args; aws_config=aws_config)
"""
ListTagsForResource()
Lists the tags (metadata) that you have assigned to the resource.
# Required Parameters
- `resourceArn`: The ARN of the resource whose tags you want to list.
"""
list_tags_for_resource(resourceArn; aws_config::AWSConfig=global_aws_config()) = iotanalytics("GET", "/tags", Dict{String, Any}("resourceArn"=>resourceArn); aws_config=aws_config)
list_tags_for_resource(resourceArn, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = iotanalytics("GET", "/tags", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("resourceArn"=>resourceArn), args)); aws_config=aws_config)
"""
PutLoggingOptions()
Sets or updates the AWS IoT Analytics logging options. If you update the value of any loggingOptions field, it takes up to one minute for the change to take effect. Also, if you change the policy attached to the role you specified in the roleArn field (for example, to correct an invalid policy), it takes up to five minutes for that change to take effect.
# Required Parameters
- `loggingOptions`: The new values of the AWS IoT Analytics logging options.
"""
put_logging_options(loggingOptions; aws_config::AWSConfig=global_aws_config()) = iotanalytics("PUT", "/logging", Dict{String, Any}("loggingOptions"=>loggingOptions); aws_config=aws_config)
put_logging_options(loggingOptions, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = iotanalytics("PUT", "/logging", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("loggingOptions"=>loggingOptions), args)); aws_config=aws_config)
"""
RunPipelineActivity()
Simulates the results of running a pipeline activity on a message payload.
# Required Parameters
- `payloads`: The sample message payloads on which the pipeline activity is run.
- `pipelineActivity`: The pipeline activity that is run. This must not be a channel activity or a datastore activity because these activities are used in a pipeline only to load the original message and to store the (possibly) transformed message. If a lambda activity is specified, only short-running Lambda functions (those with a timeout of less than 30 seconds or less) can be used.
"""
run_pipeline_activity(payloads, pipelineActivity; aws_config::AWSConfig=global_aws_config()) = iotanalytics("POST", "/pipelineactivities/run", Dict{String, Any}("payloads"=>payloads, "pipelineActivity"=>pipelineActivity); aws_config=aws_config)
run_pipeline_activity(payloads, pipelineActivity, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = iotanalytics("POST", "/pipelineactivities/run", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("payloads"=>payloads, "pipelineActivity"=>pipelineActivity), args)); aws_config=aws_config)
"""
SampleChannelData()
Retrieves a sample of messages from the specified channel ingested during the specified timeframe. Up to 10 messages can be retrieved.
# Required Parameters
- `channelName`: The name of the channel whose message samples are retrieved.
# Optional Parameters
- `endTime`: The end of the time window from which sample messages are retrieved.
- `maxMessages`: The number of sample messages to be retrieved. The limit is 10. The default is also 10.
- `startTime`: The start of the time window from which sample messages are retrieved.
"""
sample_channel_data(channelName; aws_config::AWSConfig=global_aws_config()) = iotanalytics("GET", "/channels/$(channelName)/sample"; aws_config=aws_config)
sample_channel_data(channelName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = iotanalytics("GET", "/channels/$(channelName)/sample", args; aws_config=aws_config)
"""
StartPipelineReprocessing()
Starts the reprocessing of raw message data through the pipeline.
# Required Parameters
- `pipelineName`: The name of the pipeline on which to start reprocessing.
# Optional Parameters
- `endTime`: The end time (exclusive) of raw message data that is reprocessed.
- `startTime`: The start time (inclusive) of raw message data that is reprocessed.
"""
start_pipeline_reprocessing(pipelineName; aws_config::AWSConfig=global_aws_config()) = iotanalytics("POST", "/pipelines/$(pipelineName)/reprocessing"; aws_config=aws_config)
start_pipeline_reprocessing(pipelineName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = iotanalytics("POST", "/pipelines/$(pipelineName)/reprocessing", args; aws_config=aws_config)
"""
TagResource()
Adds to or modifies the tags of the given resource. Tags are metadata that can be used to manage a resource.
# Required Parameters
- `resourceArn`: The ARN of the resource whose tags you want to modify.
- `tags`: The new or modified tags for the resource.
"""
tag_resource(resourceArn, tags; aws_config::AWSConfig=global_aws_config()) = iotanalytics("POST", "/tags", Dict{String, Any}("resourceArn"=>resourceArn, "tags"=>tags); aws_config=aws_config)
tag_resource(resourceArn, tags, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = iotanalytics("POST", "/tags", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("resourceArn"=>resourceArn, "tags"=>tags), args)); aws_config=aws_config)
"""
UntagResource()
Removes the given tags (metadata) from the resource.
# Required Parameters
- `resourceArn`: The ARN of the resource whose tags you want to remove.
- `tagKeys`: The keys of those tags which you want to remove.
"""
untag_resource(resourceArn, tagKeys; aws_config::AWSConfig=global_aws_config()) = iotanalytics("DELETE", "/tags", Dict{String, Any}("resourceArn"=>resourceArn, "tagKeys"=>tagKeys); aws_config=aws_config)
untag_resource(resourceArn, tagKeys, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = iotanalytics("DELETE", "/tags", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("resourceArn"=>resourceArn, "tagKeys"=>tagKeys), args)); aws_config=aws_config)
"""
UpdateChannel()
Updates the settings of a channel.
# Required Parameters
- `channelName`: The name of the channel to be updated.
# Optional Parameters
- `channelStorage`: Where channel data is stored. You can choose one of serviceManagedS3 or customerManagedS3 storage. If not specified, the default is serviceManagedS3. You cannot change this storage option after the channel is created.
- `retentionPeriod`: How long, in days, message data is kept for the channel. The retention period cannot be updated if the channel's S3 storage is customer-managed.
"""
update_channel(channelName; aws_config::AWSConfig=global_aws_config()) = iotanalytics("PUT", "/channels/$(channelName)"; aws_config=aws_config)
update_channel(channelName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = iotanalytics("PUT", "/channels/$(channelName)", args; aws_config=aws_config)
"""
UpdateDataset()
Updates the settings of a data set.
# Required Parameters
- `actions`: A list of DatasetAction objects.
- `datasetName`: The name of the data set to update.
# Optional Parameters
- `contentDeliveryRules`: When dataset contents are created, they are delivered to destinations specified here.
- `lateDataRules`: A list of data rules that send notifications to Amazon CloudWatch, when data arrives late. To specify lateDataRules, the dataset must use a DeltaTimer filter.
- `retentionPeriod`: How long, in days, dataset contents are kept for the dataset.
- `triggers`: A list of DatasetTrigger objects. The list can be empty or can contain up to five DatasetTrigger objects.
- `versioningConfiguration`: Optional. How many versions of dataset contents are kept. If not specified or set to null, only the latest version plus the latest succeeded version (if they are different) are kept for the time period specified by the retentionPeriod parameter. For more information, see Keeping Multiple Versions of AWS IoT Analytics Data Sets in the AWS IoT Analytics User Guide.
"""
update_dataset(actions, datasetName; aws_config::AWSConfig=global_aws_config()) = iotanalytics("PUT", "/datasets/$(datasetName)", Dict{String, Any}("actions"=>actions); aws_config=aws_config)
update_dataset(actions, datasetName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = iotanalytics("PUT", "/datasets/$(datasetName)", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("actions"=>actions), args)); aws_config=aws_config)
"""
UpdateDatastore()
Updates the settings of a data store.
# Required Parameters
- `datastoreName`: The name of the data store to be updated.
# Optional Parameters
- `datastoreStorage`: Where data store data is stored. You can choose one of serviceManagedS3 or customerManagedS3 storage. If not specified, the default isserviceManagedS3. You cannot change this storage option after the data store is created.
- `retentionPeriod`: How long, in days, message data is kept for the data store. The retention period cannot be updated if the data store's S3 storage is customer-managed.
"""
update_datastore(datastoreName; aws_config::AWSConfig=global_aws_config()) = iotanalytics("PUT", "/datastores/$(datastoreName)"; aws_config=aws_config)
update_datastore(datastoreName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = iotanalytics("PUT", "/datastores/$(datastoreName)", args; aws_config=aws_config)
"""
UpdatePipeline()
Updates the settings of a pipeline. You must specify both a channel and a datastore activity and, optionally, as many as 23 additional activities in the pipelineActivities array.
# Required Parameters
- `pipelineActivities`: A list of PipelineActivity objects. Activities perform transformations on your messages, such as removing, renaming or adding message attributes; filtering messages based on attribute values; invoking your Lambda functions on messages for advanced processing; or performing mathematical transformations to normalize device data. The list can be 2-25 PipelineActivity objects and must contain both a channel and a datastore activity. Each entry in the list must contain only one activity. For example: pipelineActivities = [ { \"channel\": { ... } }, { \"lambda\": { ... } }, ... ]
- `pipelineName`: The name of the pipeline to update.
"""
update_pipeline(pipelineActivities, pipelineName; aws_config::AWSConfig=global_aws_config()) = iotanalytics("PUT", "/pipelines/$(pipelineName)", Dict{String, Any}("pipelineActivities"=>pipelineActivities); aws_config=aws_config)
update_pipeline(pipelineActivities, pipelineName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = iotanalytics("PUT", "/pipelines/$(pipelineName)", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("pipelineActivities"=>pipelineActivities), args)); aws_config=aws_config)
|
{"hexsha": "00459cdf705e355ed50aacfcfdd42ba132cef6a7", "size": 29904, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/services/iotanalytics.jl", "max_stars_repo_name": "BradLyman/AWS.jl", "max_stars_repo_head_hexsha": "30a46ae732050585e62def0c4955f5e1363c4d86", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/services/iotanalytics.jl", "max_issues_repo_name": "BradLyman/AWS.jl", "max_issues_repo_head_hexsha": "30a46ae732050585e62def0c4955f5e1363c4d86", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/services/iotanalytics.jl", "max_forks_repo_name": "BradLyman/AWS.jl", "max_forks_repo_head_hexsha": "30a46ae732050585e62def0c4955f5e1363c4d86", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 63.4904458599, "max_line_length": 799, "alphanum_fraction": 0.7755484216, "num_tokens": 6964}
|
[STATEMENT]
lemma msteps0_msteps0_stateless: "fst (msteps0 w st) = msteps0_stateless w st"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. fst (msteps0 w st) = msteps0_stateless w st
[PROOF STEP]
by (induct w arbitrary: st) (auto simp: split_beta)
|
{"llama_tokens": 105, "file": "MFODL_Monitor_Optimized_Monitor", "length": 1}
|
from enum import Enum
import numpy as np
NUMERICAL_TYPES = tuple([float, int, np.float, np.float64])
class Comparator(Enum):
"""
Enum for less equals, equals and greater equals.
"""
LESS_EQUALS = 0
"""Less equals (<=)"""
EQUALS = 1
"""Equals (=)"""
GREATER_EQUALS = 2
"""Greater equals (>=)"""
class VarTypes(Enum):
"""
Enum for variable types, i.e. continuous, binary or integer.
"""
CONTINUOUS = 1
"""Continuous"""
BINARY = 2
"""Binary"""
INTEGER = 3
"""Integer"""
class LPStatus(Enum):
"""
Enum for status of linear program, i.e. unbounded, infeasible, optimal, unknown or error
"""
UNBOUNDED = 0
"""Problem is unbounded"""
INFEASIBLE = 1
"""Problem is infeasible"""
OPTIMAL = 2
"""Problem has been solved and an optimal solution has been found"""
UNKNOWN = 3
"""The status of the problem is unknown"""
ERROR = 4
"""An error occurred"""
class ProblemSense(Enum):
"""
Enum for sense of the problem, i.e. max or min
"""
MAX = -1
"""Maximization"""
MIN = 1
"Minimization"
class VariableBound:
"""
Enum for variable bound, i.e. lower bound or upper bound.
"""
LB = 0
"""Lower bound"""
UB = 1
"""Upper bound"""
class HeuristicStatus(Enum):
"""
Enum for status of heuristic
"""
SOL_FOUND = 0
"""Solution found"""
NO_SOL_FOUND = 1
"""No solution found"""
ERROR = 2
"""Error"""
class BoundDirection(Enum):
"""
Enum for the direction to bound in HeuristicBounds.
Use this exclusively for HeuristicBounds!
"""
LOWER = 0
"""Bound variables to their lower bound"""
UPPER = 1
"""Bound variables to their upper bound"""
CLOSEST = 2
"""Bound variables to their closest bound w.r.t the relaxation solution"""
|
{"hexsha": "3d9b6ac52256c430553c201f13eeb214a787c48e", "size": 1867, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/hips/constants.py", "max_stars_repo_name": "cxlvinchau/hips", "max_stars_repo_head_hexsha": "696942e76fb3b131d5215726191ae0f751229c39", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2021-06-23T12:08:25.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-11T14:49:48.000Z", "max_issues_repo_path": "src/hips/constants.py", "max_issues_repo_name": "cxlvinchau/hips", "max_issues_repo_head_hexsha": "696942e76fb3b131d5215726191ae0f751229c39", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/hips/constants.py", "max_forks_repo_name": "cxlvinchau/hips", "max_forks_repo_head_hexsha": "696942e76fb3b131d5215726191ae0f751229c39", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.7444444444, "max_line_length": 92, "alphanum_fraction": 0.5897161221, "include": true, "reason": "import numpy", "num_tokens": 489}
|
(************************************************************************)
(* v * The Coq Proof Assistant / The Coq Development Team *)
(* <O___,, * INRIA - CNRS - LIX - LRI - PPS - Copyright 1999-2017 *)
(* \VV/ **************************************************************)
(* // * This file is distributed under the terms of the *)
(* * GNU Lesser General Public License Version 2.1 *)
(************************************************************************)
(*********************************************************************)
(** * List permutations as a composition of adjacent transpositions *)
(*********************************************************************)
(* Adapted in May 2006 by Jean-Marc Notin from initial contents by
Laurent Théry (Huffmann contribution, October 2003) *)
Require Import Setoid Compare_dec Morphisms FinFun.
From UnivalentParametricity Require Import FP.
Require Import list.
Notation NoDup := list.NoDup.
Set Implicit Arguments.
(************************************************************************)
(* We need to copy this file in order to get universe polymorphism *)
(************************************************************************)
Set Universe Polymorphism.
Section Permutation.
Variable A:Type.
Inductive Permutation : list A -> list A -> Prop :=
| perm_nil: Permutation [] []
| perm_skip x l l' : Permutation l l' -> Permutation (x::l) (x::l')
| perm_swap x y l : Permutation (y::x::l) (x::y::l)
| perm_trans l l' l'' :
Permutation l l' -> Permutation l' l'' -> Permutation l l''.
Local Hint Constructors Permutation.
(** Some facts about [Permutation] *)
Theorem Permutation_nil : forall (l : list A), Permutation [] l -> l = [].
Proof.
intros l HF.
remember (@nil A) as m in HF.
induction HF; discriminate || auto.
Qed.
Theorem Permutation_nil_cons : forall (l : list A) (x : A),
~ Permutation nil (x::l).
Proof.
intros l x HF.
apply Permutation_nil in HF; discriminate.
Qed.
(** Permutation over lists is a equivalence relation *)
Theorem Permutation_refl : forall l : list A, Permutation l l.
Proof.
induction l; constructor. exact IHl.
Qed.
Theorem Permutation_sym : forall l l' : list A,
Permutation l l' -> Permutation l' l.
Proof.
intros l l' Hperm; induction Hperm; auto.
apply perm_trans with (l':=l'); assumption.
Qed.
Theorem Permutation_trans : forall l l' l'' : list A,
Permutation l l' -> Permutation l' l'' -> Permutation l l''.
Proof.
exact perm_trans.
Qed.
End Permutation.
Hint Resolve Permutation_refl perm_nil perm_skip.
(* These hints do not reduce the size of the problem to solve and they
must be used with care to avoid combinatoric explosions *)
Local Hint Resolve perm_swap perm_trans.
Local Hint Resolve Permutation_sym Permutation_trans.
(* This provides reflexivity, symmetry and transitivity and rewriting
on morphims to come *)
Instance Permutation_Equivalence A : Equivalence (@Permutation A) | 10 := {
Equivalence_Reflexive := @Permutation_refl A ;
Equivalence_Symmetric := @Permutation_sym A ;
Equivalence_Transitive := @Permutation_trans A }.
Instance Permutation_cons A :
Proper (Logic.eq ==> @Permutation A ==> @Permutation A) (@cons A) | 10.
Proof.
repeat intro; subst; auto using perm_skip.
Qed.
Section Permutation_properties.
Variable A:Type.
Implicit Types a b : A.
Implicit Types l m : list A.
(** Compatibility with others operations on lists *)
Theorem Permutation_in : forall (l l' : list A) (x : A),
Permutation l l' -> In x l -> In x l'.
Proof.
intros l l' x Hperm; induction Hperm; simpl; tauto.
Qed.
Global Instance Permutation_in' :
Proper (Logic.eq ==> @Permutation A ==> iff) (@In A) | 10.
Proof.
repeat red; intros; subst; eauto using Permutation_in.
Qed.
Lemma Permutation_app_tail : forall (l l' tl : list A),
Permutation l l' -> Permutation (l++tl) (l'++tl).
Proof.
intros l l' tl Hperm; induction Hperm as [|x l l'|x y l|l l' l'']; simpl; auto.
eapply Permutation_trans with (l':=l'++tl); trivial.
Qed.
Lemma Permutation_app_head : forall (l tl tl' : list A),
Permutation tl tl' -> Permutation (l++tl) (l++tl').
Proof.
intros l tl tl' Hperm; induction l;
[trivial | repeat rewrite <- app_comm_cons; constructor; assumption].
Qed.
Theorem Permutation_app : forall (l m l' m' : list A),
Permutation l l' -> Permutation m m' -> Permutation (l++m) (l'++m').
Proof.
intros l m l' m' Hpermll' Hpermmm';
induction Hpermll' as [|x l l'|x y l|l l' l''];
repeat rewrite <- app_comm_cons; auto.
apply Permutation_trans with (l' := (x :: y :: l ++ m));
[idtac | repeat rewrite app_comm_cons; apply Permutation_app_head]; trivial.
apply Permutation_trans with (l' := (l' ++ m')); try assumption.
apply Permutation_app_tail; assumption.
Qed.
Global Instance Permutation_app' :
Proper (@Permutation A ==> @Permutation A ==> @Permutation A) (@app A) | 10.
Proof.
repeat intro; now apply Permutation_app.
Qed.
Lemma Permutation_add_inside : forall a (l l' tl tl' : list A),
Permutation l l' -> Permutation tl tl' ->
Permutation (l ++ a :: tl) (l' ++ a :: tl').
Proof.
intros; apply Permutation_app; auto.
Qed.
Lemma Permutation_cons_append : forall (l : list A) x,
Permutation (x :: l) (l ++ x :: nil).
Proof. induction l; intros; auto. simpl. rewrite <- IHl; auto. Qed.
Local Hint Resolve Permutation_cons_append.
Theorem Permutation_app_comm : forall (l l' : list A),
Permutation (l ++ l') (l' ++ l).
Proof.
induction l as [|x l]; simpl; intro l'.
rewrite app_nil_r; trivial. rewrite IHl.
rewrite app_comm_cons, Permutation_cons_append.
now rewrite <- app_assoc.
Qed.
Local Hint Resolve Permutation_app_comm.
Theorem Permutation_cons_app : forall (l l1 l2:list A) a,
Permutation l (l1 ++ l2) -> Permutation (a :: l) (l1 ++ a :: l2).
Proof.
intros l l1 l2 a H. rewrite H.
rewrite app_comm_cons, Permutation_cons_append.
now rewrite <- app_assoc.
Qed.
Local Hint Resolve Permutation_cons_app.
Lemma Permutation_Add a l l' : Add a l l' -> Permutation (a::l) l'.
Proof.
induction 1; simpl; trivial.
rewrite perm_swap. now apply perm_skip.
Qed.
Theorem Permutation_middle : forall (l1 l2:list A) a,
Permutation (a :: l1 ++ l2) (l1 ++ a :: l2).
Proof.
auto.
Qed.
Local Hint Resolve Permutation_middle.
Theorem Permutation_rev : forall (l : list A), Permutation l (rev l).
Proof.
induction l as [| x l]; simpl; trivial. now rewrite IHl at 1.
Qed.
Global Instance Permutation_rev' :
Proper (@Permutation A ==> @Permutation A) (@rev A) | 10.
Proof.
repeat intro; now rewrite <- 2 Permutation_rev.
Qed.
Theorem Permutation_length : forall (l l' : list A),
Permutation l l' -> length l = length l'.
Proof.
intros l l' Hperm; induction Hperm; simpl; auto. now transitivity (length l').
Qed.
Global Instance Permutation_length' :
Proper (@Permutation A ==> Logic.eq) (@length A) | 10.
Proof.
exact Permutation_length.
Qed.
Theorem Permutation_ind_bis :
forall P : list A -> list A -> Prop,
P [] [] ->
(forall x l l', Permutation l l' -> P l l' -> P (x :: l) (x :: l')) ->
(forall x y l l', Permutation l l' -> P l l' -> P (y :: x :: l) (x :: y :: l')) ->
(forall l l' l'', Permutation l l' -> P l l' -> Permutation l' l'' -> P l' l'' -> P l l'') ->
forall l l', Permutation l l' -> P l l'.
Proof.
intros P Hnil Hskip Hswap Htrans.
induction 1; auto.
apply Htrans with (x::y::l); auto.
apply Hswap; auto.
induction l; auto.
apply Hskip; auto.
apply Hskip; auto.
induction l; auto.
eauto.
Qed.
Theorem Permutation_nil_app_cons : forall (l l' : list A) (x : A),
~ Permutation nil (l++x::l').
Proof.
intros l l' x HF.
apply Permutation_nil in HF. destruct l; discriminate.
Qed.
Ltac InvAdd := repeat (match goal with
| H: Add ?x _ (_ :: _) |- _ => inversion H; clear H; subst
end).
Ltac finish_basic_perms H :=
try constructor; try rewrite perm_swap; try constructor; trivial;
(rewrite <- H; now apply Permutation_Add) ||
(rewrite H; symmetry; now apply Permutation_Add).
Theorem Permutation_Add_inv a l1 l2 :
Permutation l1 l2 -> forall l1' l2', Add a l1' l1 -> Add a l2' l2 ->
Permutation l1' l2'.
Proof.
revert l1 l2. refine (Permutation_ind_bis _ _ _ _ _).
- (* nil *)
inversion_clear 1.
- (* skip *)
intros x l1 l2 PE IH. intros. InvAdd; try finish_basic_perms PE.
constructor. now apply IH.
- (* swap *)
intros x y l1 l2 PE IH. intros. InvAdd; try finish_basic_perms PE.
rewrite perm_swap; do 2 constructor. now apply IH.
- (* trans *)
intros l1 l l2 PE IH PE' IH' l1' l2' AD1 AD2.
assert (Ha : In a l). { rewrite <- PE. rewrite (Add_in AD1). simpl; auto. }
destruct (Add_inv _ _ Ha) as (l',AD).
transitivity l'; auto.
Qed.
Theorem Permutation_app_inv (l1 l2 l3 l4:list A) a :
Permutation (l1++a::l2) (l3++a::l4) -> Permutation (l1++l2) (l3 ++ l4).
Proof.
intros. eapply Permutation_Add_inv; eauto using Add_app.
Qed.
Theorem Permutation_cons_inv l l' a :
Permutation (a::l) (a::l') -> Permutation l l'.
Proof.
intro. eapply Permutation_Add_inv; eauto using Add_head.
Qed.
Theorem Permutation_cons_app_inv l l1 l2 a :
Permutation (a :: l) (l1 ++ a :: l2) -> Permutation l (l1 ++ l2).
Proof.
intro. eapply Permutation_Add_inv; eauto using Add_head, Add_app.
Qed.
Theorem Permutation_app_inv_l : forall l l1 l2,
Permutation (l ++ l1) (l ++ l2) -> Permutation l1 l2.
Proof.
induction l; simpl; auto.
intros.
apply IHl.
apply Permutation_cons_inv with a; auto.
Qed.
Theorem Permutation_app_inv_r l l1 l2 :
Permutation (l1 ++ l) (l2 ++ l) -> Permutation l1 l2.
Proof.
rewrite 2 (Permutation_app_comm _ l). apply Permutation_app_inv_l.
Qed.
Lemma Permutation_length_1_inv: forall a l, Permutation [a] l -> l = [a].
Proof.
intros a l H; remember [a] as m in H.
induction H; try (injection Heqm as -> ->);
discriminate || auto.
apply Permutation_nil in H as ->; trivial.
Qed.
Lemma Permutation_length_1: forall a b, Permutation [a] [b] -> a = b.
Proof.
intros a b H.
apply Permutation_length_1_inv in H; injection H as ->; trivial.
Qed.
Lemma Permutation_length_2_inv :
forall a1 a2 l, Permutation [a1;a2] l -> l = [a1;a2] \/ l = [a2;a1].
Proof.
intros a1 a2 l H; remember [a1;a2] as m in H.
revert a1 a2 Heqm.
induction H; intros; try (injection Heqm as ? ?; subst);
discriminate || (try tauto).
apply Permutation_length_1_inv in H as ->; left; auto.
apply IHPermutation1 in Heqm as [H1|H1]; apply IHPermutation2 in H1 as [];
auto.
Qed.
Lemma Permutation_length_2 :
forall a1 a2 b1 b2, Permutation [a1;a2] [b1;b2] ->
a1 = b1 /\ a2 = b2 \/ a1 = b2 /\ a2 = b1.
Proof.
intros a1 b1 a2 b2 H.
apply Permutation_length_2_inv in H as [H|H]; injection H as -> ->; auto.
Qed.
Lemma NoDup_Permutation l l' : NoDup l -> NoDup l' ->
(forall x:A, In x l <-> In x l') -> Permutation l l'.
Proof.
intros N. revert l'. induction N as [|a l Hal Hl IH].
- destruct l'; simpl; auto.
intros Hl' H. exfalso. rewrite (H a); auto.
- intros l' Hl' H.
assert (Ha : In a l') by (apply H; simpl; auto).
destruct (Add_inv _ _ Ha) as (l'' & AD).
rewrite <- (Permutation_Add AD).
apply perm_skip.
apply IH; clear IH.
* now apply (NoDup_Add AD).
* split.
+ apply incl_Add_inv with a l'; trivial. intro. apply H.
+ intro Hx.
assert (Hx' : In x (a::l)).
{ apply H. rewrite (Add_in AD). now right. }
destruct Hx'; simpl; trivial. subst.
rewrite (NoDup_Add AD) in Hl'. tauto.
Qed.
Lemma NoDup_Permutation_bis l l' : NoDup l -> NoDup l' ->
length l' <= length l -> incl l l' -> Permutation l l'.
Proof.
intros. apply NoDup_Permutation; auto.
split; auto. apply NoDup_length_incl; trivial.
Qed.
Lemma Permutation_NoDup l l' : Permutation l l' -> NoDup l -> NoDup l'.
Proof.
induction 1; auto.
* inversion_clear 1; constructor; eauto using Permutation_in.
* inversion_clear 1 as [|? ? H1 H2]. inversion_clear H2; simpl in *.
constructor. simpl; intuition. constructor; intuition.
Qed.
Global Instance Permutation_NoDup' :
Proper (@Permutation A ==> iff) (@NoDup A) | 10.
Proof.
repeat red; eauto using Permutation_NoDup.
Qed.
End Permutation_properties.
Section Permutation_map.
Variable A B : Type.
Variable f : A -> B.
Lemma Permutation_map l l' :
Permutation l l' -> Permutation (map f l) (map f l').
Proof.
induction 1; simpl; eauto.
Qed.
Global Instance Permutation_map' :
Proper (@Permutation A ==> @Permutation B) (map f) | 10.
Proof.
exact Permutation_map.
Qed.
End Permutation_map.
(* Lemma nat_bijection_Permutation n f : *)
(* bFun n f -> *)
(* Injective f -> *)
(* let l := seq 0 n in Permutation (map f l) l. *)
(* Proof. *)
(* intros Hf BD. *)
(* apply NoDup_Permutation_bis; auto using Injective_map_NoDup, seq_NoDup. *)
(* * rewrite map_length. *)
(* * intros x. rewrite in_map_iff. intros (y & <- & Hy'). *)
(* rewrite in_seq in *. simpl in *. *)
(* destruct Hy' as (_,Hy'). auto with arith. *)
(* Qed. *)
(* Section Permutation_alt. *)
(* Variable A:Type. *)
(* Implicit Type a : A. *)
(* Implicit Type l : list A. *)
(* (** Alternative characterization of permutation *)
(* via [nth_error] and [nth] *) *)
(* Let adapt f n := *)
(* let m := f (S n) in if le_lt_dec m (f 0) then m else pred m. *)
(* Let adapt_injective f : Injective f -> Injective (adapt f). *)
(* Proof. *)
(* unfold adapt. intros Hf x y EQ. *)
(* destruct le_lt_dec as [LE|LT]; destruct le_lt_dec as [LE'|LT']. *)
(* - now apply eq_add_S, Hf. *)
(* - apply Lt.le_lt_or_eq in LE. *)
(* destruct LE as [LT|EQ']; [|now apply Hf in EQ']. *)
(* unfold lt in LT. rewrite EQ in LT. *)
(* rewrite <- (Lt.S_pred _ _ LT') in LT. *)
(* elim (Lt.lt_not_le _ _ LT' LT). *)
(* - apply Lt.le_lt_or_eq in LE'. *)
(* destruct LE' as [LT'|EQ']; [|now apply Hf in EQ']. *)
(* unfold lt in LT'. rewrite <- EQ in LT'. *)
(* rewrite <- (Lt.S_pred _ _ LT) in LT'. *)
(* elim (Lt.lt_not_le _ _ LT LT'). *)
(* - apply eq_add_S, Hf. *)
(* now rewrite (Lt.S_pred _ _ LT), (Lt.S_pred _ _ LT'), EQ. *)
(* Qed. *)
(* Let adapt_ok a l1 l2 f : Injective f -> length l1 = f 0 -> *)
(* forall n, nth_error (l1++a::l2) (f (S n)) = nth_error (l1++l2) (adapt f n). *)
(* Proof. *)
(* unfold adapt. intros Hf E n. *)
(* destruct le_lt_dec as [LE|LT]. *)
(* - apply Lt.le_lt_or_eq in LE. *)
(* destruct LE as [LT|EQ]; [|now apply Hf in EQ]. *)
(* rewrite <- E in LT. *)
(* rewrite 2 nth_error_app1; auto. *)
(* - rewrite (Lt.S_pred _ _ LT) at 1. *)
(* rewrite <- E, (Lt.S_pred _ _ LT) in LT. *)
(* rewrite 2 nth_error_app2; auto with arith. *)
(* rewrite <- Minus.minus_Sn_m; auto with arith. *)
(* Qed. *)
(* Lemma Permutation_nth_error l l' : *)
(* Permutation l l' <-> *)
(* (length l = length l' /\ *)
(* exists f:nat->nat, *)
(* Injective f /\ forall n, nth_error l' n = nth_error l (f n)). *)
(* Proof. *)
(* split. *)
(* { intros P. *)
(* split; [now apply Permutation_length|]. *)
(* induction P. *)
(* - exists (fun n => n). *)
(* split; try red; auto. *)
(* - destruct IHP as (f & Hf & Hf'). *)
(* exists (fun n => match n with O => O | S n => S (f n) end). *)
(* split; try red. *)
(* * intros [|y] [|z]; simpl; now auto. *)
(* * intros [|n]; simpl; auto. *)
(* - exists (fun n => match n with 0 => 1 | 1 => 0 | n => n end). *)
(* split; try red. *)
(* * intros [|[|z]] [|[|t]]; simpl; now auto. *)
(* * intros [|[|n]]; simpl; auto. *)
(* - destruct IHP1 as (f & Hf & Hf'). *)
(* destruct IHP2 as (g & Hg & Hg'). *)
(* exists (fun n => f (g n)). *)
(* split; try red. *)
(* * auto. *)
(* * intros n. rewrite <- Hf'; auto. } *)
(* { revert l. induction l'. *)
(* - intros [|l] (E & _); now auto. *)
(* - intros l (E & f & Hf & Hf'). *)
(* simpl in E. *)
(* assert (Ha : nth_error l (f 0) = Some a) *)
(* by (symmetry; apply (Hf' 0)). *)
(* destruct (nth_error_split l (f 0) Ha) as (l1 & l2 & L12 & L1). *)
(* rewrite L12. rewrite <- Permutation_middle. constructor. *)
(* apply IHl'; split; [|exists (adapt f); split]. *)
(* * revert E. rewrite L12, !app_length. simpl. *)
(* rewrite <- plus_n_Sm. now injection 1. *)
(* * now apply adapt_injective. *)
(* * intro n. rewrite <- (adapt_ok a), <- L12; auto. *)
(* apply (Hf' (S n)). } *)
(* Qed. *)
(* Lemma Permutation_nth_error_bis l l' : *)
(* Permutation l l' <-> *)
(* exists f:nat->nat, *)
(* Injective f /\ *)
(* bFun (length l) f /\ *)
(* (forall n, nth_error l' n = nth_error l (f n)). *)
(* Proof. *)
(* rewrite Permutation_nth_error; split. *)
(* - intros (E & f & Hf & Hf'). *)
(* exists f. do 2 (split; trivial). *)
(* intros n Hn. *)
(* destruct (Lt.le_or_lt (length l) (f n)) as [LE|LT]; trivial. *)
(* rewrite <- nth_error_None, <- Hf', nth_error_None, <- E in LE. *)
(* elim (Lt.lt_not_le _ _ Hn LE). *)
(* - intros (f & Hf & Hf2 & Hf3); split; [|exists f; auto]. *)
(* assert (H : length l' <= length l') by auto with arith. *)
(* rewrite <- nth_error_None, Hf3, nth_error_None in H. *)
(* destruct (Lt.le_or_lt (length l) (length l')) as [LE|LT]; *)
(* [|apply Hf2 in LT; elim (Lt.lt_not_le _ _ LT H)]. *)
(* apply Lt.le_lt_or_eq in LE. destruct LE as [LT|EQ]; trivial. *)
(* rewrite <- nth_error_Some, Hf3, nth_error_Some in LT. *)
(* assert (Hf' : bInjective (length l) f). *)
(* { intros x y _ _ E. now apply Hf. } *)
(* rewrite (bInjective_bSurjective Hf2) in Hf'. *)
(* destruct (Hf' _ LT) as (y & Hy & Hy'). *)
(* apply Hf in Hy'. subst y. elim (Lt.lt_irrefl _ Hy). *)
(* Qed. *)
(* Lemma Permutation_nth l l' d : *)
(* Permutation l l' <-> *)
(* (let n := length l in *)
(* length l' = n /\ *)
(* exists f:nat->nat, *)
(* bFun n f /\ *)
(* bInjective n f /\ *)
(* (forall x, x < n -> nth x l' d = nth (f x) l d)). *)
(* Proof. *)
(* split. *)
(* - intros H. *)
(* assert (E := Permutation_length H). *)
(* split; auto. *)
(* apply Permutation_nth_error_bis in H. *)
(* destruct H as (f & Hf & Hf2 & Hf3). *)
(* exists f. split; [|split]; auto. *)
(* intros x y _ _ Hxy. now apply Hf. *)
(* intros n Hn. rewrite <- 2 nth_default_eq. unfold nth_default. *)
(* now rewrite Hf3. *)
(* - intros (E & f & Hf1 & Hf2 & Hf3). *)
(* rewrite Permutation_nth_error. *)
(* split; auto. *)
(* exists (fun n => if le_lt_dec (length l) n then n else f n). *)
(* split. *)
(* * intros x y. *)
(* destruct le_lt_dec as [LE|LT]; *)
(* destruct le_lt_dec as [LE'|LT']; auto. *)
(* + apply Hf1 in LT'. intros ->. *)
(* elim (Lt.lt_irrefl (f y)). eapply Lt.lt_le_trans; eauto. *)
(* + apply Hf1 in LT. intros <-. *)
(* elim (Lt.lt_irrefl (f x)). eapply Lt.lt_le_trans; eauto. *)
(* * intros n. *)
(* destruct le_lt_dec as [LE|LT]. *)
(* + assert (LE' : length l' <= n) by (now rewrite E). *)
(* rewrite <- nth_error_None in LE, LE'. congruence. *)
(* + assert (LT' : n < length l') by (now rewrite E). *)
(* specialize (Hf3 n LT). rewrite <- 2 nth_default_eq in Hf3. *)
(* unfold nth_default in Hf3. *)
(* apply Hf1 in LT. *)
(* rewrite <- nth_error_Some in LT, LT'. *)
(* do 2 destruct nth_error; congruence. *)
(* Qed. *)
(* End Permutation_alt. *)
(* begin hide *)
Notation Permutation_app_swap := Permutation_app_comm (only parsing).
(* end hide *)
|
{"author": "uwplse", "repo": "pumpkin-pi", "sha": "a7743d4665d8c7a0ebfd9d39e1247ee7804d402b", "save_path": "github-repos/coq/uwplse-pumpkin-pi", "path": "github-repos/coq/uwplse-pumpkin-pi/pumpkin-pi-a7743d4665d8c7a0ebfd9d39e1247ee7804d402b/plugin/eval/equiv4free/perm.v"}
|
#include <stdexcept>
#include <boost\test\unit_test.hpp>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
using namespace std;
BOOST_AUTO_TEST_SUITE( TestFloatingPoint )
BOOST_AUTO_TEST_CASE( ShouldNotEqualWhen1Over3 )
{
float a1 = (float)( 1.0/3.0 );
double a2 = 1.0/3.0;
BOOST_CHECK( (double)a1 != a2 );
}
BOOST_AUTO_TEST_CASE( TestPointer )
{
float a = 1.0f;
float* b = &a;
BOOST_CHECK_EQUAL( a, *b);
}
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "717f37e37aaa4d05d33744c10e9b24aed56e069f", "size": 459, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/Sandbox/CPPTest/TestFloatingPoint.cpp", "max_stars_repo_name": "SungwooNam/ProgrammingStudy", "max_stars_repo_head_hexsha": "3c2fe6096fea29547f05ff29bbde14a48c4afa9b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2019-07-22T04:58:15.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-22T04:58:15.000Z", "max_issues_repo_path": "src/Sandbox/CPPTest/TestFloatingPoint.cpp", "max_issues_repo_name": "SungwooNam/ProgrammingStudy", "max_issues_repo_head_hexsha": "3c2fe6096fea29547f05ff29bbde14a48c4afa9b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2017-02-21T16:02:41.000Z", "max_issues_repo_issues_event_max_datetime": "2017-02-21T16:02:41.000Z", "max_forks_repo_path": "src/Sandbox/CPPTest/TestFloatingPoint.cpp", "max_forks_repo_name": "SungwooNam/ProgrammingStudy", "max_forks_repo_head_hexsha": "3c2fe6096fea29547f05ff29bbde14a48c4afa9b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 16.3928571429, "max_line_length": 48, "alphanum_fraction": 0.7015250545, "num_tokens": 140}
|
# Copyright (C) 2016 Enrique Saurez Georgia Institute of Technology
# Copyright (C) 2016 Li Cheng BUPT www.muzixing.com.
# Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Contributors
# Li Cheng, (http://www.muzixing.com)
# Enrique Saurez (esaurez@gatech.edu)
from ryu.base import app_manager
from ryu.controller import mac_to_port
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.ofproto import ofproto_v1_3_parser
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
from ryu.lib import mac
from ryu.topology.api import get_switch, get_link
from ryu.app.wsgi import ControllerBase
from ryu.topology import event, switches
import networkx as nx
class ProjectController(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(ProjectController, self).__init__(*args, **kwargs)
self.mac_to_port = {}
self.datapaths = {}
self.FLAGS = True
@set_ev_cls(ofp_event.EventOFPStateChange,
[MAIN_DISPATCHER, DEAD_DISPATCHER])
def _state_change_handler(self, ev):
datapath = ev.datapath
if ev.state == MAIN_DISPATCHER:
if not datapath.id in self.datapaths:
self.logger.debug('register datapath: %016x', datapath.id)
self.datapaths[datapath.id] = datapath
elif ev.state == DEAD_DISPATCHER:
if datapath.id in self.datapaths:
self.logger.debug('unregister datapath: %016x', datapath.id)
del self.datapaths[datapath.id]
def add_flow(self, datapath, in_port, dst, actions):
"""
Pushes a new flow to the datapath (=switch)
:type datapath: ryu.controller.controller.Datapath
:type in_port: int - input port
:type dst: string - destination information
:type actions: list
:return: None
:rtype: None
"""
#TODO: 1) Get the OpenFlow protocol from the datapath
ofproto =
#TODO: 1) Get the Parser for the protocol from the datapath
parser =
#TODO: 1) Generate the Match Rule for the flow. What other parameters could we try to match? What is a wildcard?
match =
#TODO: 1) Create the required instruction that indicates the operation
inst =
#TODO: 1) Create the modify flow message with fields: datapath, match, cookie=0, command, idle_timeout =0, hard_timeout=0, priority=1 and instructions
## Why do you think priority is one?
mod =
datapath.send_msg(mod)
#TODO: 1) Do you know what is a decorator? What is it use in a Ryu Controller?
@set_ev_cls(ofp_event.EventOFPSwitchFeatures , CONFIG_DISPATCHER)
def switch_features_handler(self , ev):
"""
Called during handshake, defines rule to send all unknown packets to controller
:type ev: ryu.controller.ofp_event.EventOFPSwitchFeatures
:return: None
:rtype: None
"""
print "switch_features_handler is called"
#TODO: 1) Get the datapath (switch) from the ev object
datapath =
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
#TODO: 1) Why do you think we need the empty Match?
#TODO: 1) Why is it call "table-miss flow entry"?
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER, ofproto.OFPCML_NO_BUFFER)]
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS , actions)]
#TODO: 1) Why is the priority zero here?
mod = datapath.ofproto_parser.OFPFlowMod(
datapath=datapath, match=match, cookie=0,
command=ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
priority=0, instructions=inst)
datapath.send_msg(mod)
def _build_packet_out(self, datapath, buffer_id, src_port, dst_port, data):
"""
This function creates the packet that is going to be sent to the switch
:type datapath: ryu.controller.controller.Datapath
:type buffer_id: integer - ID assigned by datapath
:type src_port: integer - source port
:type dst_port: integer- output port
:type data: Packet data of a binary type value or an instances of packet.Packet.
:return: packet to be sent
:rtype: OFPPacketOut
"""
actions = []
if dst_port:
actions.append(datapath.ofproto_parser.OFPActionOutput(dst_port))
msg_data = None
if buffer_id == datapath.ofproto.OFP_NO_BUFFER:
if data is None:
return None
msg_data = data
out = datapath.ofproto_parser.OFPPacketOut(
datapath=datapath, buffer_id=buffer_id,
data=msg_data, in_port=src_port, actions=actions)
return out
def send_packet_out(self, datapath, buffer_id, src_port, dst_port, data):
"""
This function sents the packet to the corresponding switch
:type datapath: ryu.controller.controller.Datapath
:type buffer_id: integer - ID assigned by datapath
:type src_port: integer - source port
:type dst_port: integer- output port
:type data: Packet data of a binary type value or an instances of packet.Packet.
:return: packet to be sent
:rtype: OFPPacketOut
"""
out = self._build_packet_out(datapath, buffer_id,
src_port, dst_port, data)
if out:
datapath.send_msg(out)
#TODO: 1) What is the use on the flood? Why do wee need the mac address of a device?
def flood(self, msg):
"""
This function sents a message to flood the network to obtain ------------. What are we obtaining here?
:type msg: ryu.ofproto.ofproto_v1_3_parser.OFPPacketIn - An object which describes the corresponding OpenFlow message.
:return: None
:rtype: None
"""
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
out = self._build_packet_out(datapath, ofproto.OFP_NO_BUFFER,
ofproto.OFPP_CONTROLLER,
ofproto.OFPP_FLOOD, msg.data)
datapath.send_message(out)
#TODO: 1) What is the ARP protocol?
def arp_forwarding(self, msg, src_ip, dst_ip, eth_pkt):
"""
This forwards the ARP message, to obtain the MAC address, depending if it is now different acctions are taken.
:type msg: ryu.ofproto.ofproto_v1_3_parser.OFPPacketIn - An object which describes the corresponding OpenFlow message.
:type src_ip: string
:type dst_ip: string
:type eth_pkt: ryu.lib.packet.ethernet
:return: None
:rtype: None
"""
datapath = msg.datapath
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
out_port = self.mac_to_port[datapath.id].get(eth_pkt.dst)
#What is the difference if we know the mac address and if we don't
if out_port is not None:
#TODO: 1) What is the eth_dst parameter in the match?
match = parser.OFPMatch(in_port=in_port, eth_dst=eth_pkt.dst,
eth_type=eth_pkt.ethertype)
actions = [parser.OFPActionOutput(out_port)]
self.add_flow(datapath, 0, 1, match, actions)
self.send_packet_out(datapath, msg.buffer_id, in_port,
out_port, msg.data)
self.logger.debug("Reply ARP to knew host")
else:
self.flood(msg)
def mac_learning(self, dpid, src_mac, in_port):
"""
If an unknown mac address is found, learn that for future packages
:type dpip: string - name for the switch (datapath)
:type src_mac: string
:type in_port: int
:return: if it was correctly learned
:rtype: Bool
"""
# Initialize value on the dictionary
self.mac_to_port.setdefault(dpid, {})
#If the mac is already known
if src_mac in self.mac_to_port[dpid]:
#If the mac is comming from a different port that it was initiallly known
if in_port != self.mac_to_port[dpid][src_mac]:
return False
else:
# Store port used for the given MAC address.
self.mac_to_port[dpid][src_mac] = in_port
return True
def send_group_mod(self, datapath):
"""
This function creates the group rule for the corresponding datapath
:type datapath: ryu.controller.controller.Datapath
:return: None
:rtype: None
"""
ofproto = datapath.ofproto
ofp_parser = datapath.ofproto_parser
'''
TODO
Multi Path Transmission
It behaves as a load balancer for the topology of the workshop
'''
#TODO: Complete switch one
#TODO: 1) What is the event here and what is the difference with the CONFIG_DISPATCHER in the previous function?
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
"""
Called every time, when the controller receives a PACKET_IN message
:type ev: ryu.controller.ofp_event.EventOFPPacketIn
:return: None
:rtype: None
"""
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
dpid = datapath.id
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
# create a Packet object out of the payload
# TODO: 1) Create a Packet from the message data
pkt =
# TODO: 1) Why do we need obtain the information for four different protocols?
eth = pkt.get_protocols(ethernet.ethernet)[0]
ip_pkt = pkt.get_protocol(ipv4.ipv4)
ip_pkt_6 = pkt.get_protocol(ipv6.ipv6)
arp_pkg = pkt.get_protocol(arp.arp)
# Don't do anything with IPV6 packets.
if isinstance(ip_pkt_6, ipv6.ipv6):
actions = []
match = parser.OFPMatch(eth_type=ether.ETH_TYPE_IPV6)
self.add_flow(datapath, 0, 1, match, actions)
return
# ARP Protcol
if isinstance(arp_pkt, arp.arp):
if self.mac_learning(dpid, eth.src, in_port) is False:
self.logger.debug("ARP packet enter in different ports")
return
# Complete ARP protocol
self.arp_forwarding(msg, arp_pkt.src_ip, arp_pkt.dst_ip, eth)
# This is the focus of this workshop -> Process the IPv4 message
if isinstance(ip_pkt, ipv4.ipv4):
# find the switch in the mac_to_port table
mac_to_port_table = self.mac_to_port.get(dpid)
if mac_to_port_table is None:
self.logger.info("Dpid is not in mac_to_port")
return
# source and destination mac address of the ethernet packet
dst = eth.dst
src = eth.src
out_port = None
# "Known destination MAC address" -> We have seen this before
if dst in mac_to_port_table:
#TODO: Final Questions - Why do we need the foolowing special cases?
if dpid == 1 and in_port == 1:
# This is the special case for host 1 only do create create the group the first time
if self.FLAGS is True:
self.send_group_mod(datapath)
self.FLAGS = False
#TODO: Final Questions - Where is this group defined?
actions = [parser.OFPActionGroup(group_id=7)]
#TODO: Final Questions - Why do we need to create groups and flows in different steps?
match = parser.OFPMatch(in_port=in_port,
eth_type=eth.ethertype,
ipv4_src=ip_pkt.src)
self.add_flow(datapath, 0, 3, match, actions)
# asign output at 2
self.send_packet_out(datapath, msg.buffer_id,
in_port, 2, msg.data)
else:
#Normal flows
# "Install a flow to avoid packet_in next time">
out_port = mac_to_port_table[eth.dst]
actions = [parser.OFPActionOutput(out_port)]
match = parser.OFPMatch(in_port=in_port, eth_dst=eth.dst,
eth_type=eth.ethertype)
#Add the flow to the switch
self.add_flow(datapath, 0, 1, match, actions)
#Send packet to its destination
self.send_packet_out(datapath, msg.buffer_id, in_port,
out_port, msg.data)
# "Unknown destination MAC address"
else:
# MAC is not Known
if self.mac_learning(dpid, eth.src, in_port) is False:
self.logger.debug("IPV4 packet enter in different ports")
return
else:
# we don't know anything, so flood the network
self.flood(msg)
#TODO: Final Questions - What do you think this decorator could be useful for?
#@set_ev_cls(event.EventSwitchEnter)
|
{"hexsha": "0e332ec70cd072ca924bba132699ef6b60286e50", "size": 14279, "ext": "py", "lang": "Python", "max_stars_repo_path": "loadbalancer_controller.py", "max_stars_repo_name": "esaurez/workshop4", "max_stars_repo_head_hexsha": "15d5e8c9fe0760e6849655ef2a3f60224a12dffb", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "loadbalancer_controller.py", "max_issues_repo_name": "esaurez/workshop4", "max_issues_repo_head_hexsha": "15d5e8c9fe0760e6849655ef2a3f60224a12dffb", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "loadbalancer_controller.py", "max_forks_repo_name": "esaurez/workshop4", "max_forks_repo_head_hexsha": "15d5e8c9fe0760e6849655ef2a3f60224a12dffb", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.751497006, "max_line_length": 158, "alphanum_fraction": 0.6150990966, "include": true, "reason": "import networkx", "num_tokens": 3316}
|
# Illustrate the concept of mutual consistency
function illustrate_mc()
dgp = illustration_dgp()
assumptions = Dict{Symbol, Any}(:lb => 0, :ub => 1)
basis1 = (interacted_bernstein_basis(1, notℓ = 2),
interacted_bernstein_basis(1, notℓ = 2))
basis2 = (interacted_bernstein_basis(3, notℓ = 1),
interacted_bernstein_basis(3, notℓ = 1))
bases = [basis1, basis2]
tp = ey1(dgp, ℓ = 2)
# for plotting
ev = DataFrame(u = 0:.01:1, d = 1)
results = DataFrame(u = ev.u)
# Impose mutual consistency
assumptions[:mutually_consistent] = true
r = compute_bounds(tp, bases, assumptions, dgp)
ev.z = fill([0,0], nrow(ev))
results[:, "MTR (l=1 d=1 z2=0)"] = evaluate_mtr(r[:mtr_ub][1][2], ev)
ev.z = fill([0,1], nrow(ev))
results[:, "MTR (l=1 d=1 z2=1)"] = evaluate_mtr(r[:mtr_ub][1][2], ev)
ev.z = fill([0,0], nrow(ev))
results[:, "MTR (l=2 d=1 z1=0) con"] = evaluate_mtr(r[:mtr_ub][2][2], ev)
ev.z = fill([1,0], nrow(ev))
results[:, "MTR (l=2 d=1 z1=1) con"] = evaluate_mtr(r[:mtr_ub][2][2], ev)
# Do not impose mutual consistency. Keep target parameter defined in terms
# of the second model. Then the max MTR will be a second model MTR that is
# inconsistent with the first model MTR.
assumptions[:mutually_consistent] = false
r = compute_bounds(tp, bases, assumptions, dgp)
ev.z = fill([0,0], nrow(ev))
results[:, "MTR (l=2 d=1 z1=0) incon"] = evaluate_mtr(r[:mtr_ub][2][2], ev)
ev.z = fill([1,0], nrow(ev))
results[:, "MTR (l=2 d=1 z1=1) incon"] = evaluate_mtr(r[:mtr_ub][2][2], ev)
return results
end
export illustrate_mc
# Show the impact of mutual consistency for an instrument-invariant parameter
# like the ATT
function simulation_att()
dgp = simulation_dgp()
results = DataFrame(degree = 1:1:9)
results[:, "LB (l = 1; MC off)"] .= NaN
results[:, "UB (l = 1; MC off)"] .= NaN
results[:, "LB (l = 2; MC off)"] .= NaN
results[:, "UB (l = 2; MC off)"] .= NaN
results[:, "LB (MC on)"] .= NaN
results[:, "UB (MC on)"] .= NaN
assumptions = Dict{Symbol, Any}(:lb => 0, :ub => 1)
for k in 1:nrow(results)
bases = [(interacted_bernstein_basis(results[k, :degree], notℓ = 2),
interacted_bernstein_basis(results[k, :degree], notℓ = 2)),
(interacted_bernstein_basis(results[k, :degree], notℓ = 1),
interacted_bernstein_basis(results[k, :degree], notℓ = 1))]
assumptions[:mutually_consistent] = false
tp = att(dgp, ℓ = 1)
r = compute_bounds(tp, bases, assumptions, dgp)
results[k, 2:3] = [r[:lb], r[:ub]]
tp = att(dgp, ℓ = 2)
r = compute_bounds(tp, bases, assumptions, dgp)
results[k, 4:5] = [r[:lb], r[:ub]]
assumptions[:mutually_consistent] = true
r = compute_bounds(tp, bases, assumptions, dgp)
results[k, 6:7] = [r[:lb], r[:ub]]
end
knots = vcat(0, dgp.pscore, 1)
bases = [(interacted_constantspline_basis(knots, notℓ = 2),
interacted_constantspline_basis(knots, notℓ = 2)),
(interacted_constantspline_basis(knots, notℓ = 1),
interacted_constantspline_basis(knots, notℓ = 1))]
assumptions[:mutually_consistent] = false
tp = att(dgp, ℓ = 1)
r = compute_bounds(tp, bases, assumptions, dgp)
results[:, "NP LB (l = 1; MC off)"] .= r[:lb]
results[:, "NP UB (l = 1; MC off)"] .= r[:ub]
tp = att(dgp, ℓ = 2)
r = compute_bounds(tp, bases, assumptions, dgp)
results[:, "NP LB (l = 2; MC off)"] .= r[:lb]
results[:, "NP UB (l = 2; MC off)"] .= r[:ub]
assumptions[:mutually_consistent] = true
r = compute_bounds(tp, bases, assumptions, dgp)
results[:, "NP LB (MC on)"] .= r[:lb]
results[:, "NP UB (MC on)"] .= r[:ub]
return results
end
export simulation_att
# Show that mutual consistency can also help tighten inference on instrument-
# dependent parameters.
function simulation_prte()
dgp = simulation_dgp()
δ = .2
tp = prte_plusδpercent(dgp, δ, ℓ = 1)
knots = vcat(0, dgp.pscore, (1 + δ) .* dgp.pscore, 1)
results = DataFrame(degree = 1:1:9)
results[:, "LB (MC off)"] .= NaN
results[:, "UB (MC off)"] .= NaN
results[:, "LB (l = 2; NP)"] .= NaN
results[:, "UB (l = 2; NP)"] .= NaN
results[:, "LB (l = 2; NP; Decr)"] .= NaN
results[:, "UB (l = 2; NP; Decr)"] .= NaN
results[:, "LB (l = 2; NP; Linear)"] .= NaN
results[:, "UB (l = 2; NP; Linear)"] .= NaN
for k in 1:nrow(results)
bases = [(interacted_bernstein_basis(results[k, :degree], notℓ = 2),
interacted_bernstein_basis(results[k, :degree], notℓ = 2)),
(interacted_constantspline_basis(knots, notℓ = 1),
interacted_constantspline_basis(knots, notℓ = 1))]
assumptions = Dict{Symbol, Any}(:lb => 0, :ub => 1,
:mutually_consistent => false)
r = compute_bounds(tp, bases, assumptions, dgp)
results[k, 2:3] = [r[:lb], r[:ub]]
assumptions[:mutually_consistent] = true
r = compute_bounds(tp, bases, assumptions, dgp)
results[k, 4:5] = [r[:lb], r[:ub]]
assumptions[:decreasing_level] = [(2, 0), (2,1)]
assumptions[:decreasing_difference] = [2]
r = compute_bounds(tp, bases, assumptions, dgp)
results[k, 6:7] = [r[:lb], r[:ub]]
bases[2] = (interacted_bernstein_basis(1, notℓ = 1),
interacted_bernstein_basis(1, notℓ = 1))
r = compute_bounds(tp, bases, assumptions, dgp)
results[k, 8:9] = [r[:lb], r[:ub]]
end
return results
end
export simulation_prte
# Illustrate misspecification caused for a PRTE by using the wrong choice model
function prte_misspecification(; ℓ_gen = 1)
dgp = prte_dgp(ℓ_gen = ℓ_gen) # ℓ_gen shouldn't matter here (and doesn't)
knots = vcat(0, dgp.pscore, 1)
# Set up PRTE target parameter
dgp_new = DGP(suppZ = dgp.suppZ,
pscore = dgp.pscore,
densZ = fill(.25, 4),
mtrs = dgp.mtrs)
tp = prte_newz(dgp, dgp_new, ℓ = 1)
nrows = 5
results = DataFrame(name = fill("", nrows),
lb = fill(+Inf, nrows), ub = fill(+Inf, nrows))
# True value
truth = eval_tp(tp, [dgp.mtrs], dgp)
results[1, :name] = "True value"
results[1, :lb], results[1, :ub] = truth, truth
# IAM model using both instruments
# --> should get bounds that are non-positive
bases = [(constantspline_basis(knots), constantspline_basis(knots))]
assumptions = Dict{Symbol, Any}(:lb => 0, :ub => 1)
r = compute_bounds(tp, bases, assumptions, dgp)
results[2, :name] = "Both instruments with IAM"
results[2, :lb] = r[:lb]
results[2, :ub] = r[:ub]
# Use the first instrument only
bases = [(interacted_constantspline_basis(knots, notℓ = 2),
interacted_constantspline_basis(knots, notℓ = 2))]
r = compute_bounds(tp, bases, assumptions, dgp)
results[3, :name] = "Instrument 1 with IAM"
results[3, :lb] = r[:lb]
results[3, :ub] = r[:ub]
# Use the second instrument only
bases = [(interacted_constantspline_basis(knots, notℓ = 1),
interacted_constantspline_basis(knots, notℓ = 1))]
r = compute_bounds(tp, bases, assumptions, dgp)
results[4, :name] = "Instrument 2 with IAM"
results[4, :lb] = r[:lb]
results[4, :ub] = r[:ub]
# Use both instruments together and impose MC
assumptions = Dict{Symbol, Any}(:lb => 0, :ub => 1,
:mutually_consistent => true)
bases = [(interacted_constantspline_basis(knots, notℓ = 2),
interacted_constantspline_basis(knots, notℓ = 2)),
(interacted_constantspline_basis(knots, notℓ = 1),
interacted_constantspline_basis(knots, notℓ = 1))]
r = compute_bounds(tp, bases, assumptions, dgp)
results[5, :name] = "Both instruments with PM"
results[5, :lb] = r[:lb]
results[5, :ub] = r[:ub]
return results
end
export prte_misspecification
|
{"hexsha": "631645416b8e9d9597edc264f1f97b6bace71b7a", "size": 8142, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/paper_simulations.jl", "max_stars_repo_name": "omkarakatta/MarginalTreatmentEffectsWithMultipleInstruments.jl", "max_stars_repo_head_hexsha": "8bc8a7f42817fa3fcc45b6b9a78aaaa11b3092bb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-08-05T14:29:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-05T14:29:24.000Z", "max_issues_repo_path": "src/paper_simulations.jl", "max_issues_repo_name": "omkarakatta/MarginalTreatmentEffectsWithMultipleInstruments.jl", "max_issues_repo_head_hexsha": "8bc8a7f42817fa3fcc45b6b9a78aaaa11b3092bb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/paper_simulations.jl", "max_forks_repo_name": "omkarakatta/MarginalTreatmentEffectsWithMultipleInstruments.jl", "max_forks_repo_head_hexsha": "8bc8a7f42817fa3fcc45b6b9a78aaaa11b3092bb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-07-26T12:38:19.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-26T12:38:19.000Z", "avg_line_length": 38.956937799, "max_line_length": 79, "alphanum_fraction": 0.5879390813, "num_tokens": 2615}
|
"""
deflation criterion.
"""
function deflation_criterion(sub::T, da::T, db::T) where {T<:Number}
deflation_criterion1(abs(sub), (abs(da) + abs(db) ))
end
@inline deflation_criterion1(sub::T, da::T) where {T<:Number} = sub <= eps(da) / 4
|
{"hexsha": "4568b9973b6b2fea9e770f48e18da7549c8f4db7", "size": 243, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/deflationcrit.jl", "max_stars_repo_name": "KlausC/LinAlgBigFloat", "max_stars_repo_head_hexsha": "5dcfbd8605d39cf0f82b8d4db4a7cafa0dbb3df8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-08-03T02:29:42.000Z", "max_stars_repo_stars_event_max_datetime": "2017-08-03T02:29:42.000Z", "max_issues_repo_path": "src/deflationcrit.jl", "max_issues_repo_name": "KlausC/LinAlgBigFloat", "max_issues_repo_head_hexsha": "5dcfbd8605d39cf0f82b8d4db4a7cafa0dbb3df8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/deflationcrit.jl", "max_forks_repo_name": "KlausC/LinAlgBigFloat", "max_forks_repo_head_hexsha": "5dcfbd8605d39cf0f82b8d4db4a7cafa0dbb3df8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.0909090909, "max_line_length": 82, "alphanum_fraction": 0.658436214, "num_tokens": 83}
|
"""
EA-compatible analogue to to np.putmask
"""
from typing import Any, Tuple
import warnings
import numpy as np
from pandas._libs import lib
from pandas._typing import ArrayLike
from pandas.core.dtypes.cast import (
convert_scalar_for_putitemlike,
find_common_type,
infer_dtype_from,
)
from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype, is_list_like
from pandas.core.dtypes.missing import isna_compat
from pandas.core.arrays import ExtensionArray
def putmask_inplace(values: ArrayLike, mask: np.ndarray, value: Any) -> None:
"""
ExtensionArray-compatible implementation of np.putmask. The main
difference is we do not handle repeating or truncating like numpy.
Parameters
----------
mask : np.ndarray[bool]
We assume extract_bool_array has already been called.
value : Any
"""
if lib.is_scalar(value) and isinstance(values, np.ndarray):
value = convert_scalar_for_putitemlike(value, values.dtype)
if not isinstance(values, np.ndarray) or (
values.dtype == object and not lib.is_scalar(value)
):
# GH#19266 using np.putmask gives unexpected results with listlike value
if is_list_like(value) and len(value) == len(values):
values[mask] = value[mask]
else:
values[mask] = value
else:
# GH#37833 np.putmask is more performant than __setitem__
np.putmask(values, mask, value)
def putmask_smart(values: np.ndarray, mask: np.ndarray, new) -> np.ndarray:
"""
Return a new ndarray, try to preserve dtype if possible.
Parameters
----------
values : np.ndarray
`values`, updated in-place.
mask : np.ndarray[bool]
Applies to both sides (array like).
new : `new values` either scalar or an array like aligned with `values`
Returns
-------
values : ndarray with updated values
this *may* be a copy of the original
See Also
--------
ndarray.putmask
"""
# we cannot use np.asarray() here as we cannot have conversions
# that numpy does when numeric are mixed with strings
# n should be the length of the mask or a scalar here
if not is_list_like(new):
new = np.repeat(new, len(mask))
# see if we are only masking values that if putted
# will work in the current dtype
try:
nn = new[mask]
except TypeError:
# TypeError: only integer scalar arrays can be converted to a scalar index
pass
else:
# make sure that we have a nullable type if we have nulls
if not isna_compat(values, nn[0]):
pass
elif not (is_float_dtype(nn.dtype) or is_integer_dtype(nn.dtype)):
# only compare integers/floats
pass
elif not (is_float_dtype(values.dtype) or is_integer_dtype(values.dtype)):
# only compare integers/floats
pass
else:
# we ignore ComplexWarning here
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", np.ComplexWarning)
nn_at = nn.astype(values.dtype)
comp = nn == nn_at
if is_list_like(comp) and comp.all():
nv = values.copy()
nv[mask] = nn_at
return nv
new = np.asarray(new)
if values.dtype.kind == new.dtype.kind:
# preserves dtype if possible
return _putmask_preserve(values, new, mask)
dtype = find_common_type([values.dtype, new.dtype])
values = values.astype(dtype)
return _putmask_preserve(values, new, mask)
def _putmask_preserve(new_values: np.ndarray, new, mask: np.ndarray):
try:
new_values[mask] = new[mask]
except (IndexError, ValueError):
new_values[mask] = new
return new_values
def putmask_without_repeat(values: np.ndarray, mask: np.ndarray, new: Any) -> None:
"""
np.putmask will truncate or repeat if `new` is a listlike with
len(new) != len(values). We require an exact match.
Parameters
----------
values : np.ndarray
mask : np.ndarray[bool]
new : Any
"""
if getattr(new, "ndim", 0) >= 1:
new = new.astype(values.dtype, copy=False)
# TODO: this prob needs some better checking for 2D cases
nlocs = mask.sum()
if nlocs > 0 and is_list_like(new) and getattr(new, "ndim", 1) == 1:
if nlocs == len(new):
# GH#30567
# If length of ``new`` is less than the length of ``values``,
# `np.putmask` would first repeat the ``new`` array and then
# assign the masked values hence produces incorrect result.
# `np.place` on the other hand uses the ``new`` values at it is
# to place in the masked locations of ``values``
np.place(values, mask, new)
# i.e. values[mask] = new
elif mask.shape[-1] == len(new) or len(new) == 1:
np.putmask(values, mask, new)
else:
raise ValueError("cannot assign mismatch length to masked array")
else:
np.putmask(values, mask, new)
def validate_putmask(values: ArrayLike, mask: np.ndarray) -> Tuple[np.ndarray, bool]:
"""
Validate mask and check if this putmask operation is a no-op.
"""
mask = extract_bool_array(mask)
if mask.shape != values.shape:
raise ValueError("putmask: mask and data must be the same size")
noop = not mask.any()
return mask, noop
def extract_bool_array(mask: ArrayLike) -> np.ndarray:
"""
If we have a SparseArray or BooleanArray, convert it to ndarray[bool].
"""
if isinstance(mask, ExtensionArray):
# We could have BooleanArray, Sparse[bool], ...
# Except for BooleanArray, this is equivalent to just
# np.asarray(mask, dtype=bool)
mask = mask.to_numpy(dtype=bool, na_value=False)
mask = np.asarray(mask, dtype=bool)
return mask
def setitem_datetimelike_compat(values: np.ndarray, num_set: int, other):
"""
Parameters
----------
values : np.ndarray
num_set : int
For putmask, this is mask.sum()
other : Any
"""
if values.dtype == object:
dtype, _ = infer_dtype_from(other, pandas_dtype=True)
if isinstance(dtype, np.dtype) and dtype.kind in ["m", "M"]:
# https://github.com/numpy/numpy/issues/12550
# timedelta64 will incorrectly cast to int
if not is_list_like(other):
other = [other] * num_set
else:
other = list(other)
return other
|
{"hexsha": "917aace233ee571bbf4bbb9b5c2386cd3e30a03c", "size": 6604, "ext": "py", "lang": "Python", "max_stars_repo_path": "pandas/core/array_algos/putmask.py", "max_stars_repo_name": "yuvalmarciano/pandas", "max_stars_repo_head_hexsha": "a9cacd9efee31e6cff28125ffbad00fad1617833", "max_stars_repo_licenses": ["PSF-2.0", "Apache-2.0", "BSD-3-Clause-No-Nuclear-License-2014", "MIT", "ECL-2.0", "BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-02-02T02:05:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-02T02:09:37.000Z", "max_issues_repo_path": "pandas/core/array_algos/putmask.py", "max_issues_repo_name": "north-star-saj/pandas", "max_issues_repo_head_hexsha": "fc9fdba6592bdb5d0d1147ce4d65639acd897565", "max_issues_repo_licenses": ["PSF-2.0", "Apache-2.0", "BSD-3-Clause-No-Nuclear-License-2014", "MIT", "ECL-2.0", "BSD-3-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-02-16T06:43:48.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-19T00:07:02.000Z", "max_forks_repo_path": "pandas/core/array_algos/putmask.py", "max_forks_repo_name": "north-star-saj/pandas", "max_forks_repo_head_hexsha": "fc9fdba6592bdb5d0d1147ce4d65639acd897565", "max_forks_repo_licenses": ["PSF-2.0", "Apache-2.0", "BSD-3-Clause-No-Nuclear-License-2014", "MIT", "ECL-2.0", "BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-10-28T03:32:40.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-28T03:32:40.000Z", "avg_line_length": 31.4476190476, "max_line_length": 85, "alphanum_fraction": 0.6250757117, "include": true, "reason": "import numpy", "num_tokens": 1536}
|
* DF01MD EXAMPLE PROGRAM TEXT
* Copyright (c) 2002-2020 NICONET e.V.
*
* .. Parameters ..
INTEGER NIN, NOUT
PARAMETER ( NIN = 5, NOUT = 6 )
INTEGER NMAX
PARAMETER ( NMAX = 129 )
* .. Local Scalars ..
DOUBLE PRECISION DT
INTEGER I, INFO, N
CHARACTER*1 SICO
* .. Local Arrays ..
DOUBLE PRECISION A(NMAX), DWORK(NMAX+1)
* .. External Functions ..
LOGICAL LSAME
EXTERNAL LSAME
* .. External Subroutines ..
EXTERNAL DF01MD
* .. Executable Statements ..
*
WRITE ( NOUT, FMT = 99999 )
* Skip the heading in the data file and read the data.
READ ( NIN, FMT = '()' )
READ ( NIN, FMT = * ) N, DT, SICO
IF ( N.LE.1 .OR. N.GT.NMAX ) THEN
WRITE ( NOUT, FMT = 99994 ) N
ELSE
READ ( NIN, FMT = * ) ( A(I), I = 1,N )
* Compute the sine/cosine transform of the given real signal.
CALL DF01MD( SICO, N, DT, A, DWORK, INFO )
*
IF ( INFO.NE.0 ) THEN
WRITE ( NOUT, FMT = 99998 ) INFO
ELSE
IF ( LSAME( SICO, 'S' ) ) THEN
WRITE ( NOUT, FMT = 99997 )
DO 20 I = 1, N
WRITE ( NOUT, FMT = 99995 ) I, A(I)
20 CONTINUE
ELSE
WRITE ( NOUT, FMT = 99996 )
DO 40 I = 1, N
WRITE ( NOUT, FMT = 99995 ) I, A(I)
40 CONTINUE
END IF
END IF
END IF
*
STOP
*
99999 FORMAT (' DF01MD EXAMPLE PROGRAM RESULTS',/1X)
99998 FORMAT (' INFO on exit from DF01MD = ',I2)
99997 FORMAT (' Components of sine transform are',//' i',6X,'A(i)',/)
99996 FORMAT (' Components of cosine transform are',//' i',6X,'A(i)',
$ /)
99995 FORMAT (I4,3X,F8.4)
99994 FORMAT (/' N is out of range.',/' N = ',I5)
END
|
{"hexsha": "f795eef4833e8c1a8437925c699d771d48871980", "size": 1908, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "examples/TDF01MD.f", "max_stars_repo_name": "bnavigator/SLICOT-Reference", "max_stars_repo_head_hexsha": "7b96b6470ee0eaf75519a612d15d5e3e2857407d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2020-11-10T23:47:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T04:34:43.000Z", "max_issues_repo_path": "examples/TDF01MD.f", "max_issues_repo_name": "bnavigator/SLICOT-Reference", "max_issues_repo_head_hexsha": "7b96b6470ee0eaf75519a612d15d5e3e2857407d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-02-07T22:26:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T11:01:07.000Z", "max_forks_repo_path": "examples/TDF01MD.f", "max_forks_repo_name": "bnavigator/SLICOT-Reference", "max_forks_repo_head_hexsha": "7b96b6470ee0eaf75519a612d15d5e3e2857407d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2020-11-26T11:06:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T00:37:21.000Z", "avg_line_length": 31.8, "max_line_length": 71, "alphanum_fraction": 0.4848008386, "num_tokens": 595}
|
import numpy as np
class FilterTypeException(Exception):
"""
Creates custom exception for File Type
"""
pass
class ImageDimensionException(Exception):
"""
Creates custom exception for Image Dimension
"""
pass
class FilterDimensionException(Exception):
"""
Creates custom exception for Filter Dimension
"""
pass
def filter_pyfect_2D(image, kernel):
"""
Performs convolution type filtering using 2D kernel on a 2D numpy array.
Parameters
----------
image : numpy.ndarray
A 2D numpy array representing a single channel image
kernel : numpy.ndarray
A 2D numpy array representing a convolution filter
Returns:
---------
filtered_image: numpy.ndarray
Result of the filtering as a 2D numpy array. Please note that the
values are scaled so that they are between range of 0 and 1 using
minmax scaler for plotting stability
Examples
--------
>>> image = np.arange(1, 26).reshape(5, 5)
>>> kernel = np.ones((2,2))
>>> filter_pyfect_2D(image, kernel)
array([[0. , 0.05555556, 0.11111111, 0.16666667],
[0.27777778, 0.33333333, 0.38888889, 0.44444444],
[0.55555556, 0.61111111, 0.66666667, 0.72222222],
[0.83333333, 0.88888889, 0.94444444, 1. ]])
"""
padding_size = 0 # for future enhancement
stride_size = 1 # for future enhancement
expanded_input = np.lib.stride_tricks.as_strided(
image,
shape=(
int(
(image.shape[0] - kernel.shape[0] + (2 * padding_size))
/ stride_size
)
+ 1,
int(
(image.shape[1] - kernel.shape[1] + (2 * padding_size))
/ stride_size
)
+ 1,
kernel.shape[0],
kernel.shape[1],
),
strides=(
image.strides[0],
image.strides[1],
image.strides[0],
image.strides[1],
),
writeable=False,
)
filtered_image = (expanded_input * kernel).sum(axis=(2, 3))
if filtered_image.max() != filtered_image.min():
filtered_image = (filtered_image - filtered_image.min()) / (
filtered_image.max() - filtered_image.min()
)
return filtered_image
def filter_pyfect_3D(image, kernel):
"""
Performs convolution type filtering using 3D kernel on a 3D numpy array.
Both the kernel and image should have 3 channels in the 3rd dimension.
Parameters
----------
image : numpy.ndarray
A 3D numpy array representing a 3 channel image
kernel : numpy.ndarray
A 3D numpy array representing a 3D convolution filter with 3 channels
Returns:
---------
filtered_image: numpy.ndarray
Result of the filtering as a 3D numpy array. Please note that the
values are scaled so that they are between range of 0 and 1 using
minmax scaler within each channel for plotting stability
Examples
--------
>>> image = np.arange(1, 76).reshape(5, 5, 3)
>>> kernel = np.ones((2,2,3))
>>> result = filter_pyfect_3D(image, kernel)
>>> result[:,:,0]
array([[0. , 0.05555556, 0.11111111, 0.16666667],
[0.27777778, 0.33333333, 0.38888889, 0.44444444],
[0.55555556, 0.61111111, 0.66666667, 0.72222222],
[0.83333333, 0.88888889, 0.94444444, 1. ]])
>>> result[:,:,1]
array([[0. , 0.05555556, 0.11111111, 0.16666667],
[0.27777778, 0.33333333, 0.38888889, 0.44444444],
[0.55555556, 0.61111111, 0.66666667, 0.72222222],
[0.83333333, 0.88888889, 0.94444444, 1. ]])
>>> result[:,:,2]
array([[0. , 0.05555556, 0.11111111, 0.16666667],
[0.27777778, 0.33333333, 0.38888889, 0.44444444],
[0.55555556, 0.61111111, 0.66666667, 0.72222222],
[0.83333333, 0.88888889, 0.94444444, 1. ]])
"""
padding_size = 0 # for future enhancement
stride_size = 1 # for future enhancement
filtered_image = np.zeros(
(
int(
(image.shape[0] - kernel.shape[0] + (2 * padding_size))
/ stride_size
)
+ 1,
int(
(image.shape[1] - kernel.shape[1] + (2 * padding_size))
/ stride_size
)
+ 1,
3,
)
)
for k in range(3):
temp_image = image[:, :, k]
temp_kernel = kernel[:, :, k]
filtered_image[:, :, k] = filter_pyfect_2D(temp_image, temp_kernel)
return filtered_image
def build_filter(kernel_type, kernel_size):
"""
This function can be used to build predefined filters.
Parameters
----------
filter_type : string
One of the following values:
blur: Used to blur the picture
sharpen: Used to increase the sharpness of the image
More options will be added as enhancements
filter_size : int
An integer determining the filter size.
Returns:
---------
image_property: numpy.ndarray
A kernel_size * kernel_size numpy array representing the filter.
Examples
--------
>>> build_filter("blur", 3)
array([[0.01, 0.01, 0.01],
[0.01, 0.01, 0.01],
[0.01, 0.01, 0.01]])
>>> build_filter("sharpen", 7)
array([[ 0, 0, 0, -1, 0, 0, 0],
[ 0, 0, -1, -1, -1, 0, 0],
[ 0, -1, -1, -1, -1, -1, 0],
[-1, -1, -1, 5, -1, -1, -1],
[ 0, -1, -1, -1, -1, -1, 0],
[ 0, 0, -1, -1, -1, 0, 0],
[ 0, 0, 0, -1, 0, 0, 0]])
"""
if kernel_type == "blur":
kernel = np.full((kernel_size, kernel_size), 0.01)
elif kernel_type == "sharpen":
kernel = np.full((kernel_size, kernel_size), -1)
kernel[int(kernel_size / 2), int(kernel_size / 2)] = 5
for i in range(int(kernel_size / 2)):
kernel[i, np.arange(int(kernel_size / 2) - i)] = 0
kernel[
i, np.arange(int(kernel_size / 2) + 1 + i, int(kernel_size))
] = 0
kernel[
int(kernel_size) - i - 1, np.arange(int(kernel_size / 2) - i)
] = 0
kernel[
int(kernel_size) - i - 1,
np.arange(int(kernel_size / 2) + 1 + i, int(kernel_size)),
] = 0
else:
raise FilterTypeException("Invalid filter_type.")
return kernel
def filter_pyfect(
image, filter_type="blur", filter_size=3, custom_filter=None
):
"""
This function applies predefined or custom filters on an image.
The function can be applied on single channel or 3-channel images.
The users can choose from predefined filters or can create their new
filters. This can be used for various purposes like entertainment
application or visualization of convolutional neural network.
Parameters
----------
image : numpy.ndarray
A n1*n2 or n1*n2*3 numpy array to representing single channel
or 3-channel image
filter_type : string
One of the following values:
blur: Used to blur the picture
sharpen: Used to increase the sharpness of the image
custom: Allows users to use their own filter
More options will be added as enhancements
filter_size : int
An integer determining the filter size.
This is used if the filter_type is not custom. Default: 3
custom_filter: numpy.ndarray
A k1*k2 or k1*k2*3 numpy array allows users to pass their own filter.
This is only used if the users select filter_type = "custom"
Returns:
---------
filtered_image: numpy.ndarray
A numpy array representing the transformed image.
Examples
--------
>>> image = np.arange(1, 26).reshape(5, 5)
>>> kernel = np.ones((2,2))
>>> filter_pyfect(image, filter_type="custom", custom_filter=kernel)
array([[0. , 0.05555556, 0.11111111, 0.16666667],
[0.27777778, 0.33333333, 0.38888889, 0.44444444],
[0.55555556, 0.61111111, 0.66666667, 0.72222222],
[0.83333333, 0.88888889, 0.94444444, 1. ]])
>>> img = np.arange(1, 76).reshape(5, 5, 3)
>>> kernel = np.ones((2,2,3))
>>> filter_pyfect(img, filter_type="custom", custom_filter=kernel)[:,:,1]
array([[0. , 0.05555556, 0.11111111, 0.16666667],
[0.27777778, 0.33333333, 0.38888889, 0.44444444],
[0.55555556, 0.61111111, 0.66666667, 0.72222222],
[0.83333333, 0.88888889, 0.94444444, 1. ]])
>>> img = np.arange(1, 26).reshape(5, 5)
>>> filter_pyfect(img, filter_type="blur")
array([[0. , 0.08333333, 0.16666667],
[0.41666667, 0.5 , 0.58333333],
[0.83333333, 0.91666667, 1. ]])
"""
valid_filters = ["blur", "sharpen", "custom"]
valid_dimensions = [2, 3]
if not (filter_type in valid_filters):
raise FilterTypeException(
f"Invalid filter_type. Please use one out of {valid_filters}"
)
if (not (image.ndim in valid_dimensions)) or (
image.ndim == 3 and image.shape[2] != 3
):
raise ImageDimensionException(
"""Invalid dimension of the image. Please use 2D or 3D images.
In case of 3D images, there should be 3 channels
"""
)
if filter_type == "custom" and (
(not (custom_filter.ndim in valid_dimensions))
or (custom_filter.ndim == 3 and custom_filter.shape[2] != 3)
):
raise FilterDimensionException(
"""
Invalid dimension of the filter. Please use 2D or 3D filters.
In case of 3D filters, there should be 3 channels"
"""
)
if (
filter_type == "custom"
and (
image.shape[0] <= custom_filter.shape[0]
or image.shape[1] <= custom_filter.shape[1]
)
) or (
filter_type != "custom"
and (image.shape[0] <= filter_size or image.shape[1] <= filter_size)
):
raise ImageDimensionException(
"Image size has to be bigger than filter size"
)
if image.ndim == 2:
if filter_type != "custom":
kernel = build_filter(filter_type, filter_size)
else:
if custom_filter.ndim == 2:
kernel = custom_filter.copy()
else:
kernel = custom_filter[:, :, 0]
filtered_image = filter_pyfect_2D(image, kernel)
else:
if filter_type != "custom":
temp = build_filter(filter_type, filter_size)
kernel = temp[:, :, np.newaxis] + np.zeros(
(temp.shape[0], temp.shape[1], 3)
)
else:
if custom_filter.ndim == 3:
kernel = custom_filter.copy()
else:
kernel = custom_filter[:, :, np.newaxis] + np.zeros(
(custom_filter.shape[0], custom_filter.shape[1], 3)
)
filtered_image = filter_pyfect_3D(image, kernel)
return filtered_image
|
{"hexsha": "e1f48d1c66b325d1cf9d374980ca026ae2677501", "size": 11151, "ext": "py", "lang": "Python", "max_stars_repo_path": "picturepyfect/applyfilter.py", "max_stars_repo_name": "UBC-MDS/picturepyfect", "max_stars_repo_head_hexsha": "0a9e292cdb6b974edb94b6c3d4e30964276e7ffb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "picturepyfect/applyfilter.py", "max_issues_repo_name": "UBC-MDS/picturepyfect", "max_issues_repo_head_hexsha": "0a9e292cdb6b974edb94b6c3d4e30964276e7ffb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 27, "max_issues_repo_issues_event_min_datetime": "2021-02-25T23:25:52.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-26T06:55:13.000Z", "max_forks_repo_path": "picturepyfect/applyfilter.py", "max_forks_repo_name": "UBC-MDS/DSCI_524_Group_2", "max_forks_repo_head_hexsha": "0a9e292cdb6b974edb94b6c3d4e30964276e7ffb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-02-26T06:44:32.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-19T21:16:56.000Z", "avg_line_length": 30.975, "max_line_length": 77, "alphanum_fraction": 0.561025917, "include": true, "reason": "import numpy", "num_tokens": 3146}
|
import json
import pandas
import numpy
import logging
logger = logging.getLogger(__name__)
from yahist import Hist1D
import matplotlib.pyplot as plt
def make_ratio_plot(data, mc, save_name, bins, **kwargs):
normalize = kwargs.get("normalize", False)
x_label = kwargs.get("x_label", None)
y_label = kwargs.get("y_label", "Events" if not normalize else "Fraction of events")
rat_label = kwargs.get("rat_label", "Data/MC")
title = kwargs.get("title", None)
y_lim = kwargs.get("y_lim", None)
x_lim = kwargs.get("x_lim", None)
rat_lim = kwargs.get("rat_lim", None)
h_data = Hist1D(data, bins = bins)
h_mc = Hist1D(mc, bins = bins)
if normalize:
h_data = h_data.normalize()
h_mc = h_mc.normalize()
fig, (ax1,ax2) = plt.subplots(2, sharex=True, figsize=(8,6), gridspec_kw=dict(height_ratios=[3, 1]))
plt.grid()
h_data.plot(ax=ax1, alpha = 0.8, color = "black", errors = True, label = "Data")
h_mc.plot(ax=ax1, alpha = 0.8, color = "C3", label = "MC", histtype="stepfilled")
ratio = h_data / h_mc
ratio.plot(ax=ax2, errors = True, color = "black")
if x_label is not None:
ax2.set_xlabel(x_label)
if y_label is not None:
ax1.set_ylabel(y_label)
if title is not None:
ax1.set_title(title)
if y_lim is not None:
ax1.set_ylim(y_lim)
if rat_lim is not None:
ax2.set_ylim(rat_lim)
if x_lim is not None:
ax1.set_xlim(x_lim)
plt.savefig(save_name)
|
{"hexsha": "cfddad59041a71f4a3735a1595c6c34e2a49546d", "size": 1516, "ext": "py", "lang": "Python", "max_stars_repo_path": "hrl/plots/plotter.py", "max_stars_repo_name": "sam-may/HistogramRestrictedLearning", "max_stars_repo_head_hexsha": "39c8470eca438bd547bf348d7f8312db1daebde3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "hrl/plots/plotter.py", "max_issues_repo_name": "sam-may/HistogramRestrictedLearning", "max_issues_repo_head_hexsha": "39c8470eca438bd547bf348d7f8312db1daebde3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hrl/plots/plotter.py", "max_forks_repo_name": "sam-may/HistogramRestrictedLearning", "max_forks_repo_head_hexsha": "39c8470eca438bd547bf348d7f8312db1daebde3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.5964912281, "max_line_length": 104, "alphanum_fraction": 0.639182058, "include": true, "reason": "import numpy", "num_tokens": 433}
|
"""
Simulate inverter model with ADP formulation from the paper
"High-Speed Finite Control Set Model Predictive Control for Power Electronics", B. Stellato, T. Geyer and P. Goulart
"""
# Import numpy
import numpy as np
import pandas as pd
# Power converter model files
from .power_converter import Model
# Import plotting library
import matplotlib.pylab as plt
colors = {'b': '#1f77b4',
'g': '#2ca02c',
'o': '#ff7f0e'}
def run_example():
'''
Simulation parameters
'''
Ts = 25.0e-06 # Sampling time
freq = 50. # Switching frequency
torque = 1. # Desired torque
t0 = 0.0 # Initial time
init_periods = 1 # Number of periods to settle before simulation
sim_periods = 2 # Numer of simulated periods
flag_steady_trans = 0 # Flag Steady State (0) or Transients (1)
'''
ADP Parameters
'''
gamma = 0.95 # Forgetting factor
N_adp = np.arange(1, 6) # Horizon length
# N_adp = np.array([2])
delta = 5.5 # Switching frequency penalty
N_tail = 50
# Switching filter parameters
k1 = 0.8e03
k2 = 0.8e03
fsw_des = 300
'''
Setup model
'''
model = Model()
# Set model parameters
model.set_params(Ts, freq, k1, k2, torque)
# Set simulation time
model.set_time(t0, init_periods, sim_periods)
# Set initial conditions
model.set_initial_conditions()
'''
Allocate output statistics
'''
# THD_adp = []
# fsw_adp = []
# Te_adp = []
# Times_adp = []
'''
Run simulations
'''
# Generate extended adp model
model.gen_dynamical_system(fsw_des, delta)
# Generate tail cost
model.gen_tail_cost(N_tail, gamma, name='delta_550.mat')
gurobi_std_time = np.zeros(len(N_adp))
gurobi_avg_time = np.zeros(len(N_adp))
gurobi_min_time = np.zeros(len(N_adp))
gurobi_max_time = np.zeros(len(N_adp))
miosqp_std_time = np.zeros(len(N_adp))
miosqp_avg_time = np.zeros(len(N_adp))
miosqp_min_time = np.zeros(len(N_adp))
miosqp_max_time = np.zeros(len(N_adp))
miosqp_avg_osqp_iter = np.zeros(len(N_adp))
miosqp_osqp_avg_time = np.zeros(len(N_adp))
# Simulate model
for i in range(len(N_adp)):
stats_gurobi = model.simulate_cl(N_adp[i], flag_steady_trans,
solver='gurobi')
gurobi_std_time[i] = stats_gurobi.std_solve_time
gurobi_avg_time[i] = stats_gurobi.avg_solve_time
gurobi_min_time[i] = stats_gurobi.min_solve_time
gurobi_max_time[i] = stats_gurobi.max_solve_time
# Make plots for horizon 3
if N_adp[i] == 3:
plot_flag = 1
else:
plot_flag = 0
stats_miosqp = model.simulate_cl(N_adp[i], flag_steady_trans,
solver='miosqp', plot=plot_flag)
miosqp_std_time[i] = stats_miosqp.std_solve_time
miosqp_avg_time[i] = stats_miosqp.avg_solve_time
miosqp_min_time[i] = stats_miosqp.min_solve_time
miosqp_max_time[i] = stats_miosqp.max_solve_time
miosqp_avg_osqp_iter[i] = stats_miosqp.miosqp_avg_osqp_iter
miosqp_osqp_avg_time[i] = stats_miosqp.miosqp_osqp_avg_time
# Create pandas dataframe
timings = pd.DataFrame({'T': N_adp,
'grb_avg': gurobi_avg_time,
'grb_std': gurobi_std_time,
'grb_min': gurobi_min_time,
'grb_max': gurobi_max_time,
'miosqp_avg': miosqp_avg_time,
'miosqp_std': miosqp_std_time,
'miosqp_min': miosqp_min_time,
'miosqp_max': miosqp_max_time,
'miosqp_osqp_avg_time': miosqp_osqp_avg_time,
'miosqp_avg_osqp_iter': miosqp_avg_osqp_iter})
print("Results")
print(timings)
timings.to_csv('results/power_converter_timings.csv')
# Create error plots with fill_between
plt.figure()
ax = plt.gca()
plt.semilogy(N_adp, gurobi_avg_time, color=colors['o'],
label='GUROBI')
plt.semilogy(N_adp, miosqp_avg_time, color=colors['b'],
label='miOSQP')
plt.xticks(N_adp)
ax.set_xlabel(r'Horizon length $T$')
ax.set_ylabel(r'Time [s]')
ax.legend(loc='upper left')
plt.grid(True, which="both")
plt.tight_layout()
# plt.show()
plt.savefig('results/power_converter_timings.pdf')
|
{"hexsha": "2eca5032528d6ac4fd9a2b8b2af8d69d0899cb73", "size": 4627, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/power_converter/run_example.py", "max_stars_repo_name": "bstellato/miosqp", "max_stars_repo_head_hexsha": "ac672338b0593d865dd15b7a76434f25e24244a9", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 62, "max_stars_repo_stars_event_min_datetime": "2017-11-23T09:57:47.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-21T06:49:40.000Z", "max_issues_repo_path": "examples/power_converter/run_example.py", "max_issues_repo_name": "bstellato/miosqp", "max_issues_repo_head_hexsha": "ac672338b0593d865dd15b7a76434f25e24244a9", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2017-08-31T17:28:14.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-19T15:19:17.000Z", "max_forks_repo_path": "examples/power_converter/run_example.py", "max_forks_repo_name": "bstellato/miosqp", "max_forks_repo_head_hexsha": "ac672338b0593d865dd15b7a76434f25e24244a9", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2019-01-23T14:10:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-17T17:08:28.000Z", "avg_line_length": 30.4407894737, "max_line_length": 116, "alphanum_fraction": 0.5980116706, "include": true, "reason": "import numpy", "num_tokens": 1269}
|
import numpy as np
import cv2
img = cv2.imread('assets/soccer_practice.jpg', 0)
template = cv2.imread('assets/ball.png', 0)
heigth, width = template.shape
methods = [cv2.TM_CCOEFF, cv2.TM_CCOEFF_NORMED, cv2.TM_CCORR,
cv2.TM_CCORR_NORMED, cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]
for method in methods:
img2 = img.copy()
result = cv2.matchTemplate(img2, template, method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
location = min_loc
else:
location = max_loc
bottom_right = (location[0] + width, location[1] + heigth)
cv2.rectangle(img2, location, bottom_right, 255, 5)
cv2.imshow('Match', img2)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
{"hexsha": "0f52ac2821b93216b7d74e93d979844c7e298f9e", "size": 770, "ext": "py", "lang": "Python", "max_stars_repo_path": "openCV/FindPartOfImage.py", "max_stars_repo_name": "MatveiAleksandrovich/Artificial-Intelligence", "max_stars_repo_head_hexsha": "d3d6f253e7c2256f6f9d490b077bdb50ca1da229", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "openCV/FindPartOfImage.py", "max_issues_repo_name": "MatveiAleksandrovich/Artificial-Intelligence", "max_issues_repo_head_hexsha": "d3d6f253e7c2256f6f9d490b077bdb50ca1da229", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "openCV/FindPartOfImage.py", "max_forks_repo_name": "MatveiAleksandrovich/Artificial-Intelligence", "max_forks_repo_head_hexsha": "d3d6f253e7c2256f6f9d490b077bdb50ca1da229", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.6153846154, "max_line_length": 68, "alphanum_fraction": 0.6935064935, "include": true, "reason": "import numpy", "num_tokens": 253}
|
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import forward_lenet5
import os
import numpy as np
# 定义训练过程中的超参数
BATCH_SIZE = 100 # 一个batch的数量
LEARNING_RATE_BASE = 0.005 # 初始学习率
LEARNING_RATE_DECAY = 0.99 # 学习率的衰减率
REGULARIZER = 0.0001 # 正则化项的权重
STEPS = 50000 # 最大迭代次数
MOVING_AVERAGE_DECAY = 0.99 # 滑动平均衰减率
MODEL_SAVE_PATH = '../data' # 保存模型的路径
MODEL_NAME = 'letnet5_model' # 模型命名
# 训练过程
def backward(mnist):
'''
x,y_是定义的占符,需要指定参数的类型,维度(和网络的输入与输出维度一致),类似于函数的形参,运行时必须传入
'''
x = tf.placeholder(tf.float32, [
BATCH_SIZE, forward_lenet5.IMAGE_SIZE, forward_lenet5.IMAGE_SIZE,
forward_lenet5.NUM_CHANNELS
])
y_ = tf.placeholder(tf.float32, [None, forward_lenet5.OUTPUT_NODE])
# 调用前向传播网络得到维度为10的tensor
y = forward_lenet5.forward(x, True, REGULARIZER)
global_step = tf.Variable(0, trainable=False) # 声明一个全局计数器,并输出化为0
'''
先是对网络最后一层输出y做softmax,通常是求取输出属于某一类的概率,其实就是一个num_classess大小的向量,
再将此向量和实际标签值做交叉熵,需要说明的是该函数返回的是一个向量
'''
ce = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=y, labels=tf.argmax(y_, 1))
cem = tf.reduce_mean(ce) # 再对得到的向量求均值就得到loss
loss = cem + tf.add_n(tf.get_collection('losses')) # 添加正则化中的losses
'''
实现指数级的减小学习率,可以让模型在训练时前期快速接近较解,又可以保证模型在训练后期不会有太大波动
decayed_learning_rate=learning_rate*decay_rate^(global_step/decay_steps)
'''
learning_rate = tf.train.exponential_decay(
LEARNING_RATE_BASE,
global_step,
mnist.train.num_examples / BATCH_SIZE,
LEARNING_RATE_DECAY,
staircase=True
# 当staircase为True时,global_step/decay_steps则被转化为整数,以此来选择不同的衰减方式
)
# 传入学习率,构造一个实现梯度下降算法的优化器,再通过使用minimize更新存储要训练的变量的列表来减小loss
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(
loss, global_step=global_step)
'''
实现滑动平均模型,参数MOVING_AVERAGE_DECAY用于控制模型的更新速速
训练过程中会对每一个变量维护一个影子变量,这个影子变量的初始值就是相应变量的初始值,
每次变量更新时,影子变量就会随之更新
'''
ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
ema_op = ema.apply(tf.trainable_variables())
# 将train_step和ema_op两个训练操作绑定到train_op上
with tf.control_dependencies([train_step, ema_op]):
train_op = tf.no_op(name='train')
saver = tf.train.Saver() # 实例化一个保存和恢复变量的saver
# 创建一个会话,并通过python中的上下文管理器来管理这个会话
with tf.Session() as sess:
init_op = tf.global_variables_initializer() # 初始化计算图中的变量
sess.run(init_op)
# 通过checkpoint文件定位到最新保存的模型
ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
if ckpt and ckpt.model_checkpoint_path:
# 加载最新模型
saver.restore(sess, ckpt.model_checkpoint_path)
for i in range(STEPS):
# 读取一个batch的数据
xs, ys = mnist.train.next_batch(BATCH_SIZE)
# 将输入数据xs转换成与网络输入相同形状的矩阵
reshaped_xs = np.reshape(
xs, (BATCH_SIZE, forward_lenet5.IMAGE_SIZE,
forward_lenet5.IMAGE_SIZE, forward_lenet5.NUM_CHANNELS))
# 喂入训练图像和标签,开始训练
_, loss_value, step = sess.run([train_op, loss, global_step],
feed_dict={
x: reshaped_xs,
y_: ys
})
if i % 100 == 0:
print("After %d training step,loss on training batch is %g." %
(step, loss_value))
saver.save(
sess,
os.path.join(MODEL_SAVE_PATH, MODEL_NAME),
global_step=global_step)
def main(argv=None):
mnist = input_data.read_data_sets("../MNIST_data", one_hot=True)
backward(mnist)
if __name__ == "__main__":
tf.app.run()
|
{"hexsha": "be9355b30311ca139825d7abc1ff02e597d62651", "size": 3805, "ext": "py", "lang": "Python", "max_stars_repo_path": "course-code/imooc-tf-mnist-flask/mnist/lenet5/backforward_lenet5.py", "max_stars_repo_name": "le3t/ko-repo", "max_stars_repo_head_hexsha": "50eb0b4cadb9db9bf608a9e5d36376f38ff5cce5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 30, "max_stars_repo_stars_event_min_datetime": "2018-12-06T02:17:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-07T09:03:36.000Z", "max_issues_repo_path": "course-code/imooc-tf-mnist-flask/mnist/lenet5/backforward_lenet5.py", "max_issues_repo_name": "Artister/tutorials-java", "max_issues_repo_head_hexsha": "50eb0b4cadb9db9bf608a9e5d36376f38ff5cce5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2019-08-26T13:41:57.000Z", "max_issues_repo_issues_event_max_datetime": "2019-08-26T13:44:21.000Z", "max_forks_repo_path": "course-code/imooc-tf-mnist-flask/mnist/lenet5/backforward_lenet5.py", "max_forks_repo_name": "Artister/tutorials-java", "max_forks_repo_head_hexsha": "50eb0b4cadb9db9bf608a9e5d36376f38ff5cce5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 20, "max_forks_repo_forks_event_min_datetime": "2018-12-27T08:31:02.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-03T08:35:28.000Z", "avg_line_length": 37.6732673267, "max_line_length": 78, "alphanum_fraction": 0.6467805519, "include": true, "reason": "import numpy", "num_tokens": 1384}
|
//
// Copyright 2014 QuarksLab
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef PE_RESOURCE_PARSER_HPP_INCLUDED
#define PE_RESOURCE_PARSER_HPP_INCLUDED
#include "binmap/collectors/pe.hpp"
#include "binmap/collectors/pe/data.hpp"
#include <boost/filesystem/path.hpp>
#include <map>
#include <string>
// typedefs used by ResourceParser class and methods.
typedef std::map<uint16_t, PeImageResourceDirectoryEntry> map_id_t;
typedef std::map<std::string, PeImageResourceDirectoryEntry> map_name_t;
struct AssemblyIdentity {
std::string type;
std::string name;
std::string version;
std::string processorArchitecture;
std::string publicKeyToken;
std::string language;
};
// looks like this: 6.0.9600.16384
class AssemblyVersion {
private:
uint32_t _major;
uint32_t _minor1;
uint32_t _minor2;
uint32_t _minor3;
boost::filesystem::path _dir;
void set_major_minors(const std::string &str_ver);
public:
AssemblyVersion(const boost::filesystem::path &dir);
AssemblyVersion(const std::string &version);
AssemblyVersion() {}
uint32_t major() const { return _major; }
uint32_t minor1() const { return _minor1; }
uint32_t minor2() const { return _minor2; }
uint32_t minor3() const { return _minor3; }
const boost::filesystem::path &directory_path(void) const { return _dir; }
friend bool operator>(const AssemblyVersion &v1, const AssemblyVersion &v2);
friend bool operator<(const AssemblyVersion &v1, const AssemblyVersion &v2);
friend bool operator==(const AssemblyVersion &v1, const AssemblyVersion &v2);
friend bool operator!=(const AssemblyVersion &v1, const AssemblyVersion &v2);
friend bool operator>=(const AssemblyVersion &v1, const AssemblyVersion &v2);
};
class DirStartsWithComparator {
private:
std::string _dir_start;
public:
explicit DirStartsWithComparator(const std::string &dir_start)
: _dir_start(dir_start) {}
inline bool operator()(const boost::filesystem::path &dir_full_path) {
return this->operator()(dir_full_path.string());
}
inline bool operator()(const std::string &dir_full_path) {
return dir_full_path.find(_dir_start) != std::string::npos;
}
};
template <typename _Bits> class ResourceParser {
private:
// data from PE
const PeData<_Bits> *_pe_data;
// whole resource section content
char *_resource_section;
// tells whether current PE file has a resource section or not.
bool _has_resource;
// map for resource directory entries with names
map_name_t _name_map;
// map for resource directory entries with IDs
map_id_t _id_map;
public:
// ctor
ResourceParser(const PeData<_Bits> *const pe_data);
// dtor
virtual ~ResourceParser();
// return the associated resource directory entry from an ID.
bool find_entry_by_id(uint16_t id,
PeImageResourceDirectoryEntry &dir_entry) const;
// given a resource directory entry, return the associated (lower level)
// resource directory
bool get_directory_for_entry(const PeImageResourceDirectoryEntry &entry,
PeImageResourceDirectory &directory) const;
// given a resource directory entry, return all resource directory entries
// from it.
bool get_all_resource_dir_entries_for_entry(
const PeImageResourceDirectoryEntry &entry, map_id_t &id_map,
map_name_t &name_map) const;
// given a resource directory, return all resource directory entries from it.
bool get_all_resource_dir_entries_for_dir(PeImageResourceDirectory *dir,
map_id_t &id_map,
map_name_t &name_map) const;
// given a resource data entry, return the data content assoicated to it.
bool get_data_from_data_entry(const PeImageResourceDataEntry &data_entry,
char **buffer) const;
// given a resource directory entry, return the first resource directory entry
// associated to it.
bool get_first_dir_entry_from_dir_entry(
const PeImageResourceDirectoryEntry &in_entry,
PeImageResourceDirectoryEntry &out_entry, uint32_t &num_entries) const;
// given a resource directory entry, return the resource data entry associated
// to it.
bool
get_data_entry_from_dir_entry(const PeImageResourceDirectoryEntry &dir_entry,
PeImageResourceDataEntry &data_entry) const;
// given a data entry, return data content from it.
bool get_data_from_dir_entry(const PeImageResourceDirectoryEntry &dir_entry,
char **buffer) const;
bool parse_manifest(std::istream &stream,
std::vector<AssemblyIdentity> &vec_asm) const;
/* WinSXS */
bool get_winsxs_directory_for_assembly(const AssemblyIdentity &asm_id,
boost::filesystem::path &dir_path,
const uint16_t machine =
PeFileHeader::kMachineI386) const;
bool
get_version_from_winsxs_directory(const boost::filesystem::path &dir_path,
AssemblyVersion &version) const;
// tell whether the current PE file has resources or not.
bool has_resource(void) const { return _has_resource; }
// return a pointer to a copy of the resource section.
char *resource_section(void) const { return _resource_section; }
};
#endif // include guard
|
{"hexsha": "01cc4f516d87b37bed8d56510f22d22611007352", "size": 5940, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/binmap/collectors/pe/resource_parser.hpp", "max_stars_repo_name": "H1d3r/binmap", "max_stars_repo_head_hexsha": "78f5454690958871846a2417513ae75e9a4bacdd", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 235.0, "max_stars_repo_stars_event_min_datetime": "2016-03-09T16:04:19.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-13T09:05:39.000Z", "max_issues_repo_path": "include/binmap/collectors/pe/resource_parser.hpp", "max_issues_repo_name": "romainthomas/binmap", "max_issues_repo_head_hexsha": "4d565fb4a0931c3e45509edc6807caf3508d0ad0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 12.0, "max_issues_repo_issues_event_min_datetime": "2016-03-10T12:35:42.000Z", "max_issues_repo_issues_event_max_datetime": "2018-07-13T11:06:52.000Z", "max_forks_repo_path": "include/binmap/collectors/pe/resource_parser.hpp", "max_forks_repo_name": "romainthomas/binmap", "max_forks_repo_head_hexsha": "4d565fb4a0931c3e45509edc6807caf3508d0ad0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 64.0, "max_forks_repo_forks_event_min_datetime": "2016-03-08T14:19:29.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-09T12:08:10.000Z", "avg_line_length": 37.5949367089, "max_line_length": 80, "alphanum_fraction": 0.7089225589, "num_tokens": 1326}
|
import cv2
import time
import numpy as np
import subprocess
def read_to_frames(video_file, hight, width, start_time='00:00:00'):
process = subprocess.Popen([
'ffmpeg', '-v', 'error',
'-ss', start_time,
'-i', video_file,
'-an',
'-f', 'rawvideo',
'-pix_fmt', 'rgb24',
'-r', '1', # 跳秒
'-vframes', '1', # 取10帧
'-'
], stdout=subprocess.PIPE, stdin=None)
t = time.time()
return_code = process.poll()
frames = []
# TODO 这里有优化
while not return_code:
raw_image = process.stdout.read(width*hight*3)
if not raw_image:
break
process.stdout.flush()
frame_array = np.fromstring(raw_image, dtype='uint8')
img = frame_array.reshape((hight, width, 3))
frame = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
frames.append(frame)
return_code = process.poll()
print(time.time() - t)
return frames
if __name__ == '__main__':
a = read_to_frames('/tmp/videos/D86639983/D86639983_2021-08-12_13-45-42.mp4', 720, 1280)
# cv2.imshow('a', a[0])
# cv2.waitKey()
|
{"hexsha": "fe1ae01cb86bd6ec5ebc296fac1171ea6871261a", "size": 1130, "ext": "py", "lang": "Python", "max_stars_repo_path": "intelab_python_sdk/ffmpeg/ffmpeg_read_frames.py", "max_stars_repo_name": "zwwangoo/intelab-python-sdk", "max_stars_repo_head_hexsha": "b0b40d873f277b20886c5e5bdd5b37d1f060a883", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-28T02:29:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-28T02:29:18.000Z", "max_issues_repo_path": "intelab_python_sdk/ffmpeg/ffmpeg_read_frames.py", "max_issues_repo_name": "zwwangoo/intelab-python-sdk", "max_issues_repo_head_hexsha": "b0b40d873f277b20886c5e5bdd5b37d1f060a883", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "intelab_python_sdk/ffmpeg/ffmpeg_read_frames.py", "max_forks_repo_name": "zwwangoo/intelab-python-sdk", "max_forks_repo_head_hexsha": "b0b40d873f277b20886c5e5bdd5b37d1f060a883", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.5652173913, "max_line_length": 92, "alphanum_fraction": 0.5743362832, "include": true, "reason": "import numpy", "num_tokens": 325}
|
# --------------------------------------------------------------------------- #
# TEST CLASSIFICATION #
# --------------------------------------------------------------------------- #
#%%
# Imports
import math
import numpy as np
import pandas as pd
import pytest
from pytest import mark
from ml_studio.supervised_learning.classification import LogisticRegression
from ml_studio.supervised_learning.classification import MultinomialLogisticRegression
from ml_studio.supervised_learning.training.early_stop import EarlyStopImprovement
from ml_studio.supervised_learning.training.early_stop import EarlyStopStrips
from ml_studio.supervised_learning.training.metrics import Metric
# --------------------------------------------------------------------------- #
#%%
# --------------------------------------------------------------------------- #
# LOGISTIC REGRESSION #
# --------------------------------------------------------------------------- #
class LogisticRegressionTests:
@mark.logistic_regression
@mark.logistic_regression_name
def test_logistic_regression_name(self, get_binary_classification_data):
X, y = get_binary_classification_data
clf = LogisticRegression(epochs=50)
clf.fit(X,y)
assert clf.name == 'Logistic Regression with Batch Gradient Descent'
clf = LogisticRegression(epochs=50, batch_size=1)
clf.fit(X,y)
assert clf.name == 'Logistic Regression with Stochastic Gradient Descent'
clf = LogisticRegression(epochs=50, batch_size=32)
clf.fit(X,y)
assert clf.name == 'Logistic Regression with Minibatch Gradient Descent'
@mark.logistic_regression
@mark.logistic_regression_val
def test_logistic_regression_validation(self, get_binary_classification_data):
X, y = get_binary_classification_data
clf = LogisticRegression(epochs=50, metric='mean')
with pytest.raises(ValueError):
clf.fit(X,y)
clf = LogisticRegression(epochs=50, cost='quadratic')
with pytest.raises(ValueError):
clf.fit(X,y)
@mark.logistic_regression
@mark.logistic_regression_predict
def test_logistic_regression_predict(self, get_binary_classification_data):
X, y = get_binary_classification_data
clf = LogisticRegression(epochs=100, learning_rate=0.01, checkpoint=10)
clf.fit(X,y)
y_pred = clf._predict(X)
assert y_pred.shape == (y.shape[0],), "y_pred has wrong shape for binary problem"
y_pred = clf.predict(X)
score = clf.score(X,y)
assert y_pred.shape == (y.shape[0],), "y_pred has wrong shape for binary problem"
assert score > 0.3, "Accuracy below 0.3"
assert score < 1, "Accuracy is greater than or equal to 1"
@mark.logistic_regression
@mark.logistic_regression_history
def test_logistic_regression_history_w_early_stop(self, get_binary_classification_data):
X, y = get_binary_classification_data
es = EarlyStopImprovement()
clf = LogisticRegression(epochs=10, early_stop=es)
clf.fit(X, y)
# Test epoch history
assert clf.history.total_epochs == len(clf.history.epoch_log.get('epoch')), "number of epochs in log doesn't match epochs"
assert clf.history.total_epochs == len(clf.history.epoch_log.get('learning_rate')), "number of learning rates in log doesn't match epochs"
assert clf.history.total_epochs == len(clf.history.epoch_log.get('theta')), "number of thetas in log doesn't match epochs"
assert clf.history.total_epochs == len(clf.history.epoch_log.get('train_cost')), "number of train costs in log doesn't match epochs"
assert clf.history.total_epochs == len(clf.history.epoch_log.get('val_cost')), "number of val costs in log doesn't match epochs"
assert clf.history.total_epochs == len(clf.history.epoch_log.get('train_score')), "number of train score in log doesn't match epochs"
assert clf.history.total_epochs == len(clf.history.epoch_log.get('val_score')), "number of val score in log doesn't match epochs"
assert all(np.equal(clf.theta, clf.history.epoch_log.get('theta')[-1])), "Last theta in log doesn't equal final theta."
assert clf.history.epoch_log.get('train_cost')[0] > clf.history.epoch_log.get('train_cost')[-1], "train_cost does not decrease"
#assert clf.history.epoch_log.get('train_score')[0] > clf.history.epoch_log.get('train_score')[-1], "train_score does not decrease"
assert clf.history.epoch_log.get('val_cost')[0] > clf.history.epoch_log.get('val_cost')[-1], "val_cost does not decrease"
#assert clf.history.epoch_log.get('val_score')[0] > clf.history.epoch_log.get('val_score')[-1], "val_score does not decrease"
# Test batch history
assert clf.history.total_batches == len(clf.history.batch_log.get('batch')), "number of batches in log doesn't match total batches"
assert clf.history.total_batches == len(clf.history.batch_log.get('batch_size')), "number of batch sizes in log doesn't match total batches"
assert clf.history.total_batches == len(clf.history.batch_log.get('theta')), "number of thetas in log doesn't match total batches"
assert clf.history.total_batches == len(clf.history.batch_log.get('train_cost')), "number of train_costs in log doesn't match total batches"
@mark.logistic_regression
@mark.logistic_regression_learning_rate_schedules
def test_logistic_regression_learning_rate_schedules(self, learning_rate_schedules, get_binary_classification_data):
X, y = get_binary_classification_data
clf = LogisticRegression(epochs=200, checkpoint=10, learning_rate=learning_rate_schedules, patience=40)
clf.fit(X, y)
# Confirm learning rates decreased
assert clf.history.epoch_log.get('learning_rate')[0] > clf.history.epoch_log.get('learning_rate')[-1], "Learning rate didn't decrease"
assert clf.history.epoch_log.get('learning_rate')[0] != clf.eta, "Learning rate didn't change"
@mark.logistic_regression
def test_logistic_regression_early_stop_from_estimator_train_cost(self, get_binary_classification_data):
X, y = get_binary_classification_data
clf = LogisticRegression(epochs=5000, early_stop=False, val_size=0.3, metric=None)
clf.fit(X, y)
assert clf.convergence_monitor.monitor == 'train_cost', "Estimator is not sending correct metric"
@mark.logistic_regression
def test_logistic_regression_early_stop_from_estimator_train_score(self, get_binary_classification_data):
X, y = get_binary_classification_data
clf = LogisticRegression(epochs=5000, early_stop=False, val_size=0.3, metric='accuracy')
clf.fit(X, y)
assert clf.convergence_monitor.monitor == 'train_score', "Estimator is not sending correct metric"
@mark.logistic_regression
def test_logistic_regression_early_stop_from_estimator_val_cost(self, get_binary_classification_data):
X, y = get_binary_classification_data
clf = LogisticRegression(epochs=5000, early_stop=True, val_size=0.3, metric=None)
clf.fit(X, y)
assert clf.convergence_monitor.monitor == 'val_cost', "Estimator is not sending correct metric"
@mark.logistic_regression
def test_logistic_regression_early_stop_from_estimator_val_score(self, get_binary_classification_data):
X, y = get_binary_classification_data
clf = LogisticRegression(epochs=5000, early_stop=True, val_size=0.3, metric='accuracy')
clf.fit(X, y)
assert clf.convergence_monitor.monitor == 'val_score', "Estimator is not sending correct metric"
# --------------------------------------------------------------------------- #
# MULTINOMIAL LOGISTIC REGRESSION #
# --------------------------------------------------------------------------- #
class MultinomialLogisticRegressionTests:
@mark.logistic_regression
@mark.multinomial_logistic_regression
@mark.multinomial_logistic_regression_name
def test_multinomial_logistic_regression_name(self, get_multinomial_classification_data):
X, y = get_multinomial_classification_data
clf = MultinomialLogisticRegression(epochs=50, cost='categorical_cross_entropy')
clf.fit(X,y)
assert clf.name == 'Multinomial Logistic Regression with Batch Gradient Descent'
clf = MultinomialLogisticRegression(epochs=50, batch_size=1,cost='categorical_cross_entropy')
clf.fit(X,y)
assert clf.name == 'Multinomial Logistic Regression with Stochastic Gradient Descent'
clf = MultinomialLogisticRegression(epochs=50, batch_size=32, cost='categorical_cross_entropy')
clf.fit(X,y)
assert clf.name == 'Multinomial Logistic Regression with Minibatch Gradient Descent'
@mark.logistic_regression
@mark.multinomial_logistic_regression
@mark.multinomial_logistic_regression_val
def test_multinomial_logistic_regression_validation(self, get_multinomial_classification_data):
X, y = get_multinomial_classification_data
clf = MultinomialLogisticRegression(epochs=50, metric='mse')
with pytest.raises(ValueError):
clf.fit(X,y)
clf = MultinomialLogisticRegression(epochs=50, cost='binary_cross_entropy')
with pytest.raises(ValueError):
clf.fit(X,y)
@mark.logistic_regression
@mark.multinomial_logistic_regression
@mark.multinomial_logistic_regression_prep_data
def test_multinomial_logistic_regression_prep_data(self, get_multinomial_classification_data):
X, y = get_multinomial_classification_data
clf = MultinomialLogisticRegression(epochs=50, cost='categorical_cross_entropy',
val_size=0, early_stop=False)
clf.fit(X,y)
assert X.shape[0] == clf.X.shape[0], "X.shape[0] incorrect in prep data"
assert X.shape[1]+1 == clf._X_design.shape[1], "X.shape[1] incorrect in prep data"
@mark.logistic_regression
@mark.multinomial_logistic_regression
@mark.multinomial_logistic_regression_init_weights
def test_multinomial_logistic_regression_init_weights(self, get_multinomial_classification_data):
X, y = get_multinomial_classification_data
n_features = X.shape[1]+1
n_classes = len(np.unique(y))
clf = MultinomialLogisticRegression(epochs=50)
clf.fit(X,y)
assert clf.theta.shape == (n_features,n_classes), "theta shape incorrect for multi classification"
@mark.logistic_regression
@mark.multinomial_logistic_regression
@mark.multinomial_logistic_regression_predict
def test_multinomial_logistic_regression_predict(self, get_multinomial_classification_data):
X, y = get_multinomial_classification_data
clf = MultinomialLogisticRegression(epochs=1000, cost='categorical_cross_entropy', patience=40)
clf.fit(X,y)
y_pred = clf._predict(X)
assert y_pred.shape == (y.shape[0],3), "Shape of prediction is not correct."
y_pred = clf.predict(X)
score = clf.score(X,y)
assert y_pred.shape == (y.shape[0],), "Shape of prediction is not correct."
assert clf.history.epoch_log.get('train_cost')[0] > clf.history.epoch_log.get('train_cost')[-1], "Training costs didn't decrease"
assert clf.history.epoch_log.get('train_score')[0] < clf.history.epoch_log.get('train_score')[-1], "Training score didn't increase"
assert score >= 0.5, "Accuracy below 0.5"
assert score < 1, "Accuracy is greater than or equal to 1"
@mark.logistic_regression
@mark.multinomial_logistic_regression
@mark.multinomial_logistic_regression_early_stop
def test_multinomial_logistic_regression_early_stop(self, get_multinomial_classification_data):
X, y = get_multinomial_classification_data
es = EarlyStopImprovement(precision=0.001, patience=5)
clf = MultinomialLogisticRegression(epochs=100, early_stop=es, checkpoint=10)
clf.fit(X, y)
# Confirm early stop happened
assert clf.history.total_epochs < clf.epochs, "Early stop didn't happen."
# Test epoch history
assert clf.history.total_epochs == len(clf.history.epoch_log.get('epoch')), "number of epochs in log doesn't match epochs"
assert clf.history.total_epochs == len(clf.history.epoch_log.get('learning_rate')), "number of learning rates in log doesn't match epochs"
assert clf.history.total_epochs == len(clf.history.epoch_log.get('theta')), "number of thetas in log doesn't match epochs"
assert clf.history.total_epochs == len(clf.history.epoch_log.get('train_cost')), "number of train costs in log doesn't match epochs"
assert clf.history.total_epochs == len(clf.history.epoch_log.get('val_cost')), "number of val costs in log doesn't match epochs"
assert clf.history.total_epochs == len(clf.history.epoch_log.get('train_score')), "number of train score in log doesn't match epochs"
assert clf.history.total_epochs == len(clf.history.epoch_log.get('val_score')), "number of val score in log doesn't match epochs"
assert np.array_equal(clf.theta, clf.history.epoch_log.get('theta')[-1]) == True, "Last theta in log doesn't equal final theta."
# Test Performance Trends
assert clf.history.epoch_log.get('train_cost')[0] > clf.history.epoch_log.get('train_cost')[-1], "Training costs didn't decrease"
#assert clf.history.epoch_log.get('train_score')[0] < clf.history.epoch_log.get('train_score')[-1], "Training score didn't increase"
assert clf.history.epoch_log.get('val_cost')[0] > clf.history.epoch_log.get('val_cost')[-1], "Validation costs didn't decrease"
#assert clf.history.epoch_log.get('val_score')[0] < clf.history.epoch_log.get('val_score')[-1], "Validation score didn't increase"
# Test batch history
assert clf.history.total_batches == len(clf.history.batch_log.get('batch')), "number of batches in log doesn't match total batches"
assert clf.history.total_batches == len(clf.history.batch_log.get('batch_size')), "number of batch sizes in log doesn't match total batches"
assert clf.history.total_batches == len(clf.history.batch_log.get('theta')), "number of thetas in log doesn't match total batches"
assert clf.history.total_batches == len(clf.history.batch_log.get('train_cost')), "number of train_costs in log doesn't match total batches"
@mark.logistic_regression
@mark.multinomial_logistic_regression
@mark.multinomial_logistic_regression_learning_rate_schedules
def test_multinomial_logistic_regression_learning_rate_schedules(self, learning_rate_schedules, get_multinomial_classification_data):
X, y = get_multinomial_classification_data
clf = MultinomialLogisticRegression(epochs=50, checkpoint=10, learning_rate=learning_rate_schedules)
clf.fit(X, y)
# Confirm learning rates decreased
assert clf.history.epoch_log.get('learning_rate')[0] > clf.history.epoch_log.get('learning_rate')[-1], "Learning rate didn't decrease"
assert clf.history.epoch_log.get('learning_rate')[0] != clf.eta, "Learning rate didn't change"
@mark.logistic_regression
@mark.multinomial_logistic_regression
def test_multinomial_logistic_regression_early_stop_from_estimator_train_cost(self, get_multinomial_classification_data):
X, y = get_multinomial_classification_data
clf = MultinomialLogisticRegression(epochs=5000, early_stop=False, val_size=0.3, metric=None)
clf.fit(X, y)
assert clf.convergence_monitor.monitor == 'train_cost', "Estimator is not sending correct metric"
@mark.logistic_regression
@mark.multinomial_logistic_regression
def test_multinomial_logistic_regression_early_stop_from_estimator_train_score(self, get_multinomial_classification_data):
X, y = get_multinomial_classification_data
clf = MultinomialLogisticRegression(epochs=5000, early_stop=False, val_size=0.3, metric='accuracy')
clf.fit(X, y)
assert clf.convergence_monitor.monitor == 'train_score', "Estimator is not sending correct metric"
@mark.logistic_regression
@mark.multinomial_logistic_regression
def test_multinomial_logistic_regression_early_stop_from_estimator_val_cost(self, get_multinomial_classification_data):
X, y = get_multinomial_classification_data
clf = MultinomialLogisticRegression(epochs=5000, early_stop=True, val_size=0.3, metric=None)
clf.fit(X, y)
assert clf.convergence_monitor.monitor == 'val_cost', "Estimator is not sending correct metric"
@mark.logistic_regression
@mark.multinomial_logistic_regression
def test_multinomial_logistic_regression_early_stop_from_estimator_val_score(self, get_multinomial_classification_data):
X, y = get_multinomial_classification_data
clf = MultinomialLogisticRegression(epochs=5000, early_stop=True, val_size=0.3, metric='accuracy')
clf.fit(X, y)
assert clf.convergence_monitor.monitor == 'val_score', "Estimator is not sending correct metric"
|
{"hexsha": "e543781fc8f8d63854bc8d652327da8acd312163", "size": 17937, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_supervised_learning/test_classification.py", "max_stars_repo_name": "john-james-ai/ml-studio", "max_stars_repo_head_hexsha": "2230fcd6579d2291c761e559ec93b18ddd7a96e6", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-01-30T09:37:00.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-30T09:37:00.000Z", "max_issues_repo_path": "tests/test_supervised_learning/test_classification.py", "max_issues_repo_name": "john-james-ai/ml-studio", "max_issues_repo_head_hexsha": "2230fcd6579d2291c761e559ec93b18ddd7a96e6", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2019-12-05T19:37:59.000Z", "max_issues_repo_issues_event_max_datetime": "2020-03-31T05:49:53.000Z", "max_forks_repo_path": "tests/test_supervised_learning/test_classification.py", "max_forks_repo_name": "john-james-ai/ml-studio", "max_forks_repo_head_hexsha": "2230fcd6579d2291c761e559ec93b18ddd7a96e6", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 66.4333333333, "max_line_length": 166, "alphanum_fraction": 0.6812175949, "include": true, "reason": "import numpy", "num_tokens": 3855}
|
import os.path
import argparse
import numpy as np
from utils.logger import setup_logger
from utils_balanced_sampling import train_boundary
def parse_args():
"""Parses arguments."""
parser = argparse.ArgumentParser(
description='Train semantic boundary with given latent codes and '
'attribute scores.')
parser.add_argument('-o', '--output_dir', type=str, required=True,
help='Directory to save the output results. (required)')
parser.add_argument('-c', '--latent_codes_path', type=str, required=True,
help='Path to the input latent codes. (required)')
parser.add_argument('-s', '--scores_path', type=str, required=True,
help='Path to the dictionary containing all the attributes scores. (required)')
parser.add_argument('-a', '--attribute', type=str,
help='Attribute for which to compute the boundary (required).')
parser.add_argument('-n', '--num_samples_boundary', type=int,
help='Number of samples to use to compute the boundary (required).')
parser.add_argument('-t', '--confidence_threshold', type=float, default=None,
help='Confidence threshold to filter ambiguous samples.')
parser.add_argument('--boundary_name', type=str, default='boundary.npy')
return parser.parse_args()
def main():
"""Main function."""
args = parse_args()
logger = setup_logger(args.output_dir, logger_name='generate_data')
logger.info('Loading latent codes.')
if not os.path.isfile(args.latent_codes_path):
raise ValueError(f'Latent codes `{args.latent_codes_path}` does not exist!')
latent_codes = np.load(args.latent_codes_path)
logger.info('Loading attribute scores.')
if not os.path.isfile(args.scores_path):
raise ValueError(f'Attribute scores `{args.scores_path}` does not exist!')
scores_dict = np.load(args.scores_path, allow_pickle=True)[()]
boundary = train_boundary(latent_codes=latent_codes,
scores_dict=scores_dict,
attribute=args.attribute,
confidence_t=args.confidence_threshold,
n=args.num_samples_boundary)
np.save(os.path.join(args.output_dir, args.boundary_name), boundary)
if __name__ == '__main__':
main()
|
{"hexsha": "6688e6967d4827621d498023d3821cbb9b5c514a", "size": 2338, "ext": "py", "lang": "Python", "max_stars_repo_path": "train_boundary_balanced_sampling.py", "max_stars_repo_name": "perladoubinsky/balanced_sampling_gan_controls", "max_stars_repo_head_hexsha": "cbec7a38176406c0e19d4b6ebbc6c6b52d268036", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-11T19:58:12.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-11T19:58:12.000Z", "max_issues_repo_path": "train_boundary_balanced_sampling.py", "max_issues_repo_name": "perladoubinsky/balanced_sampling_gan_controls", "max_issues_repo_head_hexsha": "cbec7a38176406c0e19d4b6ebbc6c6b52d268036", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "train_boundary_balanced_sampling.py", "max_forks_repo_name": "perladoubinsky/balanced_sampling_gan_controls", "max_forks_repo_head_hexsha": "cbec7a38176406c0e19d4b6ebbc6c6b52d268036", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.5090909091, "max_line_length": 101, "alphanum_fraction": 0.6672369547, "include": true, "reason": "import numpy", "num_tokens": 464}
|
""" Plots figure 2.
"""
from matplotlib.lines import Line2D
import ccobra
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
def get_response_vector(pattern):
""" Extracts a vector of responses from a given pattern by identifying the response with
maximum weight for each syllogistic problem.
"""
result = []
prediction_matrix = pattern.reshape(64, 9)
for i in range(64):
result.append(ccobra.syllogistic.RESPONSES[prediction_matrix[i].argmax()])
return result
def evaluate_all_models(datasets, models):
""" Evaluates all models for the given datasets. Produces a data table containing
information about the data-table combination and evaluation results (i.e., predictions, hits,
etc.).
"""
result = []
# Iterate over models
for model in models:
model_name = ("{} model ({} - {})" \
.format(model["model_target"], model["model_datasets"][0], model["model_datasets"][1])) \
.replace("ccobra_", "")
model_pattern = model["model_pattern"]
model_pred = get_response_vector(model_pattern)
model_group = model["model_target"]
model_origin = model["model_datasets"]
# Iterate over datasets
for dataset_name, dataset_content in datasets.items():
data_df = dataset_content["data"]
# Iterate over subjects contained in the dataset
for subj, subj_df in data_df.groupby("id"):
# Query model for individual predictions and compare with truth to obtain hits
for _, task in subj_df.iterrows():
task_list = [x.split(";") for x in task["task"].split("/")]
resp_list = task["response"].split(";")
task_enc = ccobra.syllogistic.encode_task(task_list)
resp_enc = ccobra.syllogistic.encode_response(resp_list, task_list)
pred = model_pred[ccobra.syllogistic.SYLLOGISMS.index(task_enc)]
hit = (resp_enc == pred)
# Store the evaluation result
result.append({
"dataset" : dataset_name,
"model" : model_name.replace('exp3_', '').replace('_full', ''),
"model_group" : model_group,
"model_origin" : (x.replace('exp3_', '').replace('_full', '') for x in model_origin),
"subj" : subj,
"task" : task_enc,
"truth" : resp_enc,
"pred" : pred,
"hit" : hit
})
return pd.DataFrame(result)
# Load the datasets
datasets = {
"control": {
"fname": "ccobra_control",
"data": pd.read_csv("data/ccobra_control.csv")
},
"1 sec": {
"fname": "ccobra_1s",
"data": pd.read_csv("data/ccobra_1s.csv")
},
"10 sec": {
"fname": "ccobra_10s",
"data": pd.read_csv("data/ccobra_10s.csv")
}
}
names = [x["fname"] for x in datasets.values()]
# Prepare colors for plotting
dashes = ["dashed", "dotted", "solid"]
common_colors = sns.color_palette("Greys_d", 3)
other_colors = [
sns.color_palette("Blues_d", 2),
sns.color_palette("Reds_d", 2),
sns.color_palette("Greens_d", 2)
]
# Prepare plot settings
hue_order = []
palette = {}
models = []
# Iterate over filename indices
for i in range(len(names) - 1):
# Iterate over remaining filename indices
for j in range(i + 1, len(names)):
# Load the JNMF data corresponding to the filenames
template = "fit_results/fit_{}_{}_result_W_{}.npy" # dataset1, dataset2, matrix, dataset
W_lower_feedback = np.load(template.format(names[i], names[j], names[i]))
W_higher_feedback = np.load(template.format(names[i], names[j], names[j]))
# Extract the common pattern as the average between both common patterns
common = (W_lower_feedback[:,0] + W_higher_feedback[:,0]) / 2
# Store the patterns as models
models.append({
"model_datasets" : (names[i], names[j]),
"model_target" : "common",
"model_pattern" : common
})
models.append({
"model_datasets" : (names[i], names[j]),
"model_target" : names[i],
"model_pattern" : W_lower_feedback[:,1]
})
models.append({
"model_datasets" : (names[i], names[j]),
"model_target" : names[j],
"model_pattern" : W_higher_feedback[:,1]
})
# Update plot color options
palette[("common model ({} - {})".format(names[i], names[j])).replace("ccobra_", "").replace('exp3_', '').replace('_full', '')] = common_colors[(i + j + 2) % 3]
palette[("{} model ({} - {})".format(names[i], names[i], names[j])).replace("ccobra_", "").replace('exp3_', '').replace('_full', '')] = other_colors[i][j - 1]
palette[("{} model ({} - {})".format(names[j], names[i], names[j])).replace("ccobra_", "").replace('exp3_', '').replace('_full', '')] = other_colors[j][i]
hue_order.append(("common model ({} - {})".format(names[i], names[j])).replace("ccobra_", "").replace('exp3_', '').replace('_full', ''))
hue_order.append(("{} model ({} - {})".format(names[i], names[i], names[j])).replace("ccobra_", "").replace('exp3_', '').replace('_full', ''))
hue_order.append(("{} model ({} - {})".format(names[j], names[i], names[j])).replace("ccobra_", "").replace('exp3_', '').replace('_full', ''))
# Evaluate the models
result_df = evaluate_all_models(datasets, models)
print(result_df.head())
# Prepare the plot
sns.set(style='whitegrid', palette="colorblind")
f = plt.figure(figsize=(10, 3.5))
ax = plt.gca()
hue_order = [
'common model (control - 1s)',
'common model (control - 10s)',
'common model (1s - 10s)',
'control model (control - 1s)',
'control model (control - 10s)',
'1s model (control - 1s)',
'1s model (1s - 10s)',
'10s model (control - 10s)',
'10s model (1s - 10s)',
]
# Plot the data
cp = sns.pointplot(x="dataset", y="hit", hue="model", data=result_df, ax=ax, palette=palette, hue_order=hue_order)
cp.set(xlabel='Dataset', ylabel='Accuracy')
# Plot a custom legend
legend_els = [
Line2D([0], [0], marker='o', color='w', markerfacecolor=palette['common model (control - 1s)'], markersize=10, label='(control - 1s) $\\rightarrow$ common'),
Line2D([0], [0], marker='o', color='w', markerfacecolor=palette['common model (control - 10s)'], markersize=10, label='(control - 10s) $\\rightarrow$ common'),
Line2D([0], [0], marker='o', color='w', markerfacecolor=palette['common model (1s - 10s)'], markersize=10, label='(1s - 10s) $\\rightarrow$ common'),
Line2D([0], [0], marker='o', color='w', markerfacecolor=palette['control model (control - 1s)'], markersize=10, label='(control - 1s) $\\rightarrow$ control'),
Line2D([0], [0], marker='o', color='w', markerfacecolor=palette['control model (control - 10s)'], markersize=10, label='(control - 10s) $\\rightarrow$ control'),
Line2D([0], [0], marker='o', color='w', markerfacecolor=palette['1s model (control - 1s)'], markersize=10, label='(control - 1s) $\\rightarrow$ 1s'),
Line2D([0], [0], marker='o', color='w', markerfacecolor=palette['1s model (1s - 10s)'], markersize=10, label='(1s - 10s) $\\rightarrow$ 1s'),
Line2D([0], [0], marker='o', color='w', markerfacecolor=palette['10s model (control - 10s)'], markersize=10, label='(control - 10s) $\\rightarrow$ 10s'),
Line2D([0], [0], marker='o', color='w', markerfacecolor=palette['10s model (1s - 10s)'], markersize=10, label='(1s - 10s) $\\rightarrow$ 10s'),
]
ax.legend(title="Models",
handles=legend_els, frameon=True, loc='center left',
ncol=1, bbox_to_anchor=(1, 0.5), mode='None'
)
# Store and show the image
plt.tight_layout()
plt.savefig('results/all_models.pdf')
plt.show()
|
{"hexsha": "4d02419bce589a8079b44d378eb7f0694c0d2568", "size": 8044, "ext": "py", "lang": "Python", "max_stars_repo_path": "analysis/plot_fig2.py", "max_stars_repo_name": "nriesterer/iccm-nmffeedback", "max_stars_repo_head_hexsha": "11c1595a94d282c469e84a3ad60217abe80e9ec4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-06-25T11:40:41.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-07T12:33:17.000Z", "max_issues_repo_path": "analysis/plot_fig2.py", "max_issues_repo_name": "nriesterer/iccm-nmffeedback", "max_issues_repo_head_hexsha": "11c1595a94d282c469e84a3ad60217abe80e9ec4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "analysis/plot_fig2.py", "max_forks_repo_name": "nriesterer/iccm-nmffeedback", "max_forks_repo_head_hexsha": "11c1595a94d282c469e84a3ad60217abe80e9ec4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.6262626263, "max_line_length": 168, "alphanum_fraction": 0.5958478369, "include": true, "reason": "import numpy", "num_tokens": 2125}
|
"""
Measure transmission / time of flight with the synchronized CCT / pulser.
If both the sensor (spotWave sensor input) and the transmitter (spotWave CCT output) are mounted on
the same structure, the pulser functionality can be used to analyze transmission or measure the time
of flight (using thresholds or timepickers).
Setting a negativ value for CCT (or enabling the sync flag) will synchronize the pulser with the
first sample of the snapshot acquired with `get_data`.
The example requires matplotlib for plotting (install with `pip install matplotlib`).
"""
import logging
from tkinter import TclError
import matplotlib.pyplot as plt
import numpy as np
from waveline import SpotWave
logging.basicConfig(level=logging.INFO)
def main():
port = SpotWave.discover()[0]
# prepare plot
plt.ion()
_, ax = plt.subplots(figsize=(10, 3), tight_layout=True)
with SpotWave(port) as sw:
sw.set_cct(1, sync=True)
sw.set_filter(90e3, 150e3, 4) # 90-150 kHz bandpass
while True:
data = sw.get_data(2048) # read snapshot -> trigger pulser
t = np.arange(len(data)) / sw.CLOCK # create time axis
ax.clear()
ax.plot(t * 1e6, data * 1e6)
ax.set_xlabel("Time [µs]")
ax.set_ylabel("Amplitude [µV]")
plt.pause(1)
if __name__ == "__main__":
try:
main()
except (KeyboardInterrupt, TclError):
...
|
{"hexsha": "cdd1e1dfa983aadb31df4ee0266c7c3a072e1eec", "size": 1447, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/spotwave_transmission.py", "max_stars_repo_name": "vallen-systems/pyWaveLine", "max_stars_repo_head_hexsha": "d5bddda16c1f41261dc2d76896b768c469c7218d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-01-20T08:31:58.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-09T19:18:41.000Z", "max_issues_repo_path": "examples/spotwave_transmission.py", "max_issues_repo_name": "vallen-systems/pyWaveLine", "max_issues_repo_head_hexsha": "d5bddda16c1f41261dc2d76896b768c469c7218d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/spotwave_transmission.py", "max_forks_repo_name": "vallen-systems/pyWaveLine", "max_forks_repo_head_hexsha": "d5bddda16c1f41261dc2d76896b768c469c7218d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-01-20T08:32:03.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-18T01:42:32.000Z", "avg_line_length": 28.3725490196, "max_line_length": 100, "alphanum_fraction": 0.6710435384, "include": true, "reason": "import numpy", "num_tokens": 351}
|
#ifndef _MATRIX_PERMUTE_HPP_
#define _MATRIX_PERMUTE_HPP_
#include <boost/numeric/ublas/matrix.hpp>
#include <boost/numeric/ublas/matrix_expression.hpp>
namespace matrix {
template<typename T>
struct permutation {
typedef permutation self;
template<class M, class Permutation = self>
struct expression { // : ublas::matrix_expression< matrix<Matrix, Permutation> > {
typedef expression self;
typedef typename M::size_type size_type;
typedef typename M::reference reference;
typedef typename M::const_reference const_reference;
expression(M &A, const Permutation &P) : A(A), P(P) {}
size_type size1() const { return A.size1(); }
size_type size2() const { return A.size2(); }
reference operator()(size_type i, size_type j) {
return A(P[i], P[j]);
}
const_reference operator()(size_type i, size_type j) const {
return A(P[i], P[j]);
}
template<class E>
self& operator=(const E &e) { assign(*this, e); return *this; }
private:
M &A;
const Permutation &P;
};
const std::vector<T> &index;
permutation(const std::vector<T> &index) : index(index) {}
index_type operator[](index_type i) const { return index[i]; }
template<class M>
expression<M,self> operator()(M &A) const {
return expression<M,self>(A, *this);
}
};
}
#endif /* _MATRIX_PERMUTE_HPP_ */
|
{"hexsha": "3598a3eca68357019f3c8fee116060b8ab99805a", "size": 1362, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "gamess/libqc/src/matrix/permute.hpp", "max_stars_repo_name": "andremirt/v_cond", "max_stars_repo_head_hexsha": "6b5c364d7cd4243686488b2bd4318be3927e07ea", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gamess/libqc/src/matrix/permute.hpp", "max_issues_repo_name": "andremirt/v_cond", "max_issues_repo_head_hexsha": "6b5c364d7cd4243686488b2bd4318be3927e07ea", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gamess/libqc/src/matrix/permute.hpp", "max_forks_repo_name": "andremirt/v_cond", "max_forks_repo_head_hexsha": "6b5c364d7cd4243686488b2bd4318be3927e07ea", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.24, "max_line_length": 83, "alphanum_fraction": 0.671071953, "num_tokens": 346}
|
#include <boost/core/enable_if.hpp>
#include <type_traits>
#include <iostream>
template <typename T>
typename boost::disable_if_c<std::is_pointer_v<T>>::type print(T t)
{
std::cout << t << '\n';
}
template <typename T>
typename boost::enable_if_c<std::is_pointer_v<T>>::type print(T t)
{
std::cout << *t << '\n';
}
int main()
{
int i{ 1 };
print(i);
int* p{ &i };
print(p);
}
|
{"hexsha": "acd11f173787505eb699dea43973713219e0d8a9", "size": 386, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "18_boost_enable_if/main.cpp", "max_stars_repo_name": "BorisSchaeling/boost-meta-programming-2020", "max_stars_repo_head_hexsha": "1bb70e88070953daa4bc19f91f891b43583df06e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "18_boost_enable_if/main.cpp", "max_issues_repo_name": "BorisSchaeling/boost-meta-programming-2020", "max_issues_repo_head_hexsha": "1bb70e88070953daa4bc19f91f891b43583df06e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "18_boost_enable_if/main.cpp", "max_forks_repo_name": "BorisSchaeling/boost-meta-programming-2020", "max_forks_repo_head_hexsha": "1bb70e88070953daa4bc19f91f891b43583df06e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 15.44, "max_line_length": 67, "alphanum_fraction": 0.6347150259, "num_tokens": 111}
|
import os
import numpy as np
from cae.hybridsvd import HybridSVD
os.environ["THEANO_FLAGS"] = "mode=FAST_RUN,device=cpu,floatX=float32,OMP_NUM_THREADS=4"
os.environ["OMP_NUM_THREADS"] = "4"
os.environ["MKL_NUM_THREADS"] = "4"
os.environ["NUMEXPR_NUM_THREADS"] = "4"
X = np.random.rand(100, 200)
K = np.identity(X.shape[0])
S = np.identity(X.shape[1])
S[1, 6] = 1
S[6, 1] = 1
hsvd = HybridSVD(hybrid=True)
V_hybrid = hsvd.fit(X, K, S, alpha=0, beta=0,
abs_components=True)
hsvd = HybridSVD(hybrid=False)
V_pure = hsvd.fit(X, K, S)
# note: results comparison between pure and hybrid SVD
assert type(V_pure) == type(V_hybrid)
assert V_pure.shape == V_hybrid.shape
np.testing.assert_allclose(V_pure, V_hybrid)
S[1, 6] = 1
S[6, 1] = 1
hsvd = HybridSVD(hybrid=True, log=False)
V_hybrid = hsvd.fit(X, K, S, alpha=0.3, beta=0.3, log=False, seed=15)
print(V_hybrid)
|
{"hexsha": "5e84d214a8f0939d7537a46eb64a05d4f1279c19", "size": 888, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/decomposition.py", "max_stars_repo_name": "arsast/cae", "max_stars_repo_head_hexsha": "14021e4504f56a2214a0b4644fc8a0ce8863648a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-03-31T13:37:38.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-31T13:37:38.000Z", "max_issues_repo_path": "examples/decomposition.py", "max_issues_repo_name": "arsast/cae", "max_issues_repo_head_hexsha": "14021e4504f56a2214a0b4644fc8a0ce8863648a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/decomposition.py", "max_forks_repo_name": "arsast/cae", "max_forks_repo_head_hexsha": "14021e4504f56a2214a0b4644fc8a0ce8863648a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.3684210526, "max_line_length": 88, "alphanum_fraction": 0.6914414414, "include": true, "reason": "import numpy", "num_tokens": 315}
|
import numpy as np
import torch.nn as nn
import torch.distributions as td
class DenseModel(nn.Module):
def __init__(
self,
input_size,
output_size,
info,
dropout = 0,
init_params = None,
zerone = False
):
"""
:param output_shape: tuple containing shape of expected output
:param input_size: size of input features
:param info: dict containing num of hidden layers, size of hidden layers, activation function, output distribution etc.
"""
super().__init__()
self._output_size = output_size
self._input_size = input_size
self._layers = info['layers']
self._node_size = info['node_size']
self.activation = info['activation']
self.dist = info['dist']
self.dropout = dropout
self.init_params = init_params
self.model = self.build_model(zerone)
def build_model(self, zerone=False):
model = [nn.Linear(self._input_size, self._node_size)]
if self.init_params:
nn.init.normal_(model[-1].weight, self.init_params[0], self.init_params[1])
model += [self.activation()]
for i in range(self._layers-1):
model += [nn.Linear(self._node_size, self._node_size)]
if self.init_params:
nn.init.normal_(model[-1].weight, self.init_params[0], self.init_params[1])
model += [nn.Dropout(self.dropout)]
model += [self.activation()]
model += [nn.Linear(self._node_size, self._output_size)]
if self.init_params:
nn.init.normal_(model[-1].weight, self.init_params[0], self.init_params[1])
if zerone:
model+= [nn.Sigmoid()]
return nn.Sequential(*model)
def forward(self, input):
dist_inputs = self.model(input)
return dist_inputs
# return td.independent.Independent(td.Normal(dist_inputs, 1), 1)
|
{"hexsha": "d9772897509955d583f75009eca59f4fa1d8abdb", "size": 1991, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/dense.py", "max_stars_repo_name": "TachikakaMin/modular-rl", "max_stars_repo_head_hexsha": "d46c752eb1f8e6f7cdc073d6981e04e1804823df", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/dense.py", "max_issues_repo_name": "TachikakaMin/modular-rl", "max_issues_repo_head_hexsha": "d46c752eb1f8e6f7cdc073d6981e04e1804823df", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/dense.py", "max_forks_repo_name": "TachikakaMin/modular-rl", "max_forks_repo_head_hexsha": "d46c752eb1f8e6f7cdc073d6981e04e1804823df", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.5660377358, "max_line_length": 127, "alphanum_fraction": 0.5986941236, "include": true, "reason": "import numpy", "num_tokens": 440}
|
import cv2
import numpy as np
cap = cv2.VideoCapture("example/test.mp4")
ret, frame1 = cap.read()
prvs = cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[...,1] = 255
while(1):
ret, frame2 = cap.read()
next = cv2.cvtColor(frame2,cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs,next, None, 0.5, 3, 15, 3, 5, 1.2, 0)
mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
print("Magnitude: ")
print( type(mag))
print( mag.shape[1])
print("Angle: ")
print( ang)
hsv[...,0] = ang*180/np.pi/2
hsv[...,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)
cv2.imshow('frame2',rgb)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
elif k == ord('s'):
cv2.imwrite('opticalfb.png',frame2)
cv2.imwrite('opticalhsv.png',rgb)
prvs = next
cap.release()
cv2.destroyAllWindows()
|
{"hexsha": "0ab042160ef0f69d159ee7c5b8346c8d21e63f5f", "size": 969, "ext": "py", "lang": "Python", "max_stars_repo_path": "testoptic.py", "max_stars_repo_name": "berktinaz/lip-movement-detector", "max_stars_repo_head_hexsha": "205fafd59779018ea7f973c678c84d653514fb02", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "testoptic.py", "max_issues_repo_name": "berktinaz/lip-movement-detector", "max_issues_repo_head_hexsha": "205fafd59779018ea7f973c678c84d653514fb02", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "testoptic.py", "max_forks_repo_name": "berktinaz/lip-movement-detector", "max_forks_repo_head_hexsha": "205fafd59779018ea7f973c678c84d653514fb02", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-31T08:49:05.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T08:49:05.000Z", "avg_line_length": 25.5, "max_line_length": 83, "alphanum_fraction": 0.5923632611, "include": true, "reason": "import numpy", "num_tokens": 319}
|
import numpy as np
import pandas as pd
import numba
import awkward as ak
from .common import reshape_awkward
from sklearn.cluster import DBSCAN
def find_cluster(interactions, cluster_size_space, cluster_size_time):
"""
Function which finds cluster within a event.
Args:
x (pandas.DataFrame): Subentries of event must contain the
fields, x,y,z,time
cluster_size_space (float): Max spatial distance between two points to
be inside a cluster [cm].
cluster_size_time (float): Max time distance between two points to be
inside a cluster [ns].
Returns:
awkward.array: Adds to interaction a cluster_ids record.
"""
# TODO is there a better way to get the df?
df = []
for key in ['x', 'y', 'z', 'ed', 't']:
df.append(ak.to_pandas(interactions[key], anonymous=key))
df = pd.concat(df, axis=1)
# Splitting into individual events and apply time clustering:
groups = df.groupby('entry')
df["time_cluster"] = np.concatenate(groups.apply(lambda x: simple_1d_clustering(x.t.values, cluster_size_time)))
# Splitting into individual events and time cluster and apply space clustering space:
groups = df.groupby(['entry', 'time_cluster'])
groups = groups.apply(lambda x: _find_cluster(x, cluster_size_space))
for i in np.unique(groups.index.get_level_values(0)):
add_to_cluster = 0
for j in range(len(groups[i])):
groups[i][j]+=add_to_cluster
add_to_cluster = np.max(groups[i][j])+1
df['cluster_id'] = np.concatenate(groups.values)
ci = df.loc[:, 'cluster_id'].values
offsets = ak.num(interactions['x'])
interactions['cluster_ids'] = reshape_awkward(ci, offsets)
return interactions
@numba.jit(nopython=True)
def simple_1d_clustering(data, scale):
"""
Function to cluster one dimensional data.
Args:
data (numpy.array): one dimensional array to be clusterd
scale (float): Max distance between two points to
be inside a cluster.
Returns:
clusters_undo_sort (np.array): Cluster Labels
"""
idx_sort = np.argsort(data)
idx_undo_sort = np.argsort(idx_sort)
data_sorted = data[idx_sort]
diff = data_sorted[1:] - data_sorted[:-1]
clusters = [0]
c = 0
for value in diff:
if value <= scale:
clusters.append(c)
elif value > scale:
c=c+1
clusters.append(c)
clusters_undo_sort = np.array(clusters)[idx_undo_sort]
return clusters_undo_sort
def _find_cluster(x, cluster_size_space):
"""
Function which finds cluster within a event.
Args:
x (pandas.DataFrame): Subentries of event must contain the
fields, x,y,z,time
cluster_size_space (float): Max distance between two points to
be inside a cluster [cm].
Returns:
functon: to be used in groupby.apply.
"""
db_cluster = DBSCAN(eps=cluster_size_space, min_samples=1)
xprime = x[['x', 'y', 'z']].values
return db_cluster.fit_predict(xprime)
def cluster(inter, classify_by_energy=False):
"""
Function which clusters the found clusters together.
To cluster events a weighted mean is computed for time and position.
The individual interactions are weighted by their energy.
The energy of clustered interaction is given by the total sum.
Events can be classified either by the first interaction in time in the
cluster or by the highest energy deposition.
Args:
inter (awkward.Array): Array containing at least the following
fields: x,y,z,t,ed,cluster_ids, type, parenttype, creaproc,
edproc.
Kwargs:
classify_by_energy (bool): If true events are classified
according to the properties of the highest energy deposit
within the cluster. If false cluster is classified according
to first interaction.
Returns:
awkward.Array: Clustered events with nest conform
classification.
"""
# Sort interactions by cluster_ids to simplify looping
inds = ak.argsort(inter['cluster_ids'])
inter = inter[inds]
# TODO: Better way to do this with awkward?
x = inter['x']
y = inter['y']
z = inter['z']
ed = inter['ed']
time = inter['t']
ci = inter['cluster_ids']
types = inter['type']
parenttype = inter['parenttype']
creaproc = inter['creaproc']
edproc = inter['edproc']
# Init result and cluster:
res = ak.ArrayBuilder()
_cluster(x, y, z, ed, time, ci,
types, parenttype, creaproc, edproc,
classify_by_energy, res)
return res.snapshot()
@numba.njit
def _cluster(x, y, z, ed, time, ci,
types, parenttype, creaproc, edproc,
classify_by_energy, res):
# Loop over each event
nevents = len(ed)
for ei in range(nevents):
# Init a new list for clustered interactions within event:
res.begin_list()
# Init buffers:
ninteractions = len(ed[ei])
x_mean = 0
y_mean = 0
z_mean = 0
t_mean = 0
ed_tot = 0
current_ci = 0 # Current cluster id
i_class = 0 # Index for classification (depends on users requirement)
# Set classifier start value according to user request, interactions
# are classified either by
if classify_by_energy:
# Highest energy
classifier_max = 0
else:
# First interaction
classifier_max = np.inf
# Loop over all interactions within event:
for ii in range(ninteractions):
if current_ci != ci[ei][ii]:
# Cluster Id has changed compared to previous interaction,
# hence we have to write out our result and empty the buffer,
# but first classify event:
A, Z, nestid = classify(types[ei][i_class],
parenttype[ei][i_class],
creaproc[ei][i_class],
edproc[ei][i_class])
# Write result, simple but extensive with awkward...
_write_result(res, x_mean, y_mean, z_mean,
ed_tot, t_mean, A, Z, nestid)
# Update cluster id and empty buffer
current_ci = ci[ei][ii]
x_mean = 0
y_mean = 0
z_mean = 0
t_mean = 0
ed_tot = 0
# Reset classifier:
if classify_by_energy:
classifier_max = 0
else:
classifier_max = np.inf
# We have to gather information of current cluster:
e = ed[ei][ii]
t = time[ei][ii]
x_mean += x[ei][ii] * e
y_mean += y[ei][ii] * e
z_mean += z[ei][ii] * e
t_mean += t * e
ed_tot += e
if classify_by_energy:
# In case we want to classify the event by energy.
if e > classifier_max:
i_class = ii
classifier_max = e
else:
# or by first arrival time:
if t < classifier_max:
i_class = ii
classifier_max = t
# Before we are done with this event we have to classify and
# write the last interaction
A, Z, nestid = classify(types[ei][i_class],
parenttype[ei][i_class],
creaproc[ei][i_class],
edproc[ei][i_class])
_write_result(res, x_mean, y_mean, z_mean,
ed_tot, t_mean, A, Z, nestid)
res.end_list()
infinity = np.iinfo(np.int16).max
classifier = np.zeros(7, dtype=[(('Interaction type', 'types'), np.dtype('<U30')),
(('Interaction type of the parent', 'parenttype'), np.dtype('<U30')),
(('Creation process', 'creaproc'), np.dtype('<U30')),
(('Energy deposit process', 'edproc'), np.dtype('<U30')),
(('Atomic mass number', 'A'), np.int16),
(('Atomic number', 'Z'), np.int16),
(('Nest Id for qunata generation', 'nestid'), np.int16)]
)
classifier['types'] = ['None', 'neutron', 'alpha', 'None','None', 'gamma', 'e-']
classifier['parenttype'] = ['None', 'None', 'None', 'Kr83[9.405]','Kr83[41.557]', 'None', 'None']
classifier['creaproc'] = ['None', 'None', 'None', 'None', 'None','None', 'None']
classifier['edproc'] = ['ionIoni', 'hadElastic', 'None', 'None','None', 'None', 'None']
classifier['A'] = [0, 0, 4, infinity, infinity, 0, 0]
classifier['Z'] = [0, 0, 2, 0, 0, 0, 0]
classifier['nestid'] = [0, 0, 6, 11, 11, 7, 8]
@numba.njit
def classify(types, parenttype, creaproc, edproc):
for c in classifier:
m = 0
m += (c['types'] == types) or (c['types'] == 'None')
m += (c['parenttype'] == parenttype) or (c['parenttype'] == 'None')
m += (c['creaproc'] == creaproc) or (c['creaproc'] == 'None')
m += (c['edproc'] == edproc) or (c['edproc'] == 'None')
if m == 4:
return c['A'], c['Z'], c['nestid']
# If our data does not match any classification make it a nest None type
# TODO: fix me
return infinity, infinity, 12
@numba.njit
def _write_result(res, x_mean, y_mean, z_mean,
ed_tot, t_mean, A, Z, nestid):
"""
Helper to write result into record array.
"""
res.begin_record()
res.field('x')
res.real(x_mean / ed_tot)
res.field('y')
res.real(y_mean / ed_tot)
res.field('z')
res.real(z_mean / ed_tot)
res.field('t')
res.real(t_mean / ed_tot)
res.field('ed')
res.real(ed_tot)
res.field('nestid')
res.integer(nestid)
res.field('A')
res.integer(A)
res.field('Z')
res.integer(Z)
res.end_record()
|
{"hexsha": "f2b4411b4b76238ace82ef6b8d2ca5bb29d88e6b", "size": 10208, "ext": "py", "lang": "Python", "max_stars_repo_path": "epix/clustering.py", "max_stars_repo_name": "XENONnT/epix", "max_stars_repo_head_hexsha": "d315551cdcf6b98898c6682952eb0def10646dc3", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "epix/clustering.py", "max_issues_repo_name": "XENONnT/epix", "max_issues_repo_head_hexsha": "d315551cdcf6b98898c6682952eb0def10646dc3", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 39, "max_issues_repo_issues_event_min_datetime": "2021-01-13T13:18:43.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-21T15:29:37.000Z", "max_forks_repo_path": "epix/clustering.py", "max_forks_repo_name": "XENONnT/epix", "max_forks_repo_head_hexsha": "d315551cdcf6b98898c6682952eb0def10646dc3", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.0266666667, "max_line_length": 116, "alphanum_fraction": 0.5659286834, "include": true, "reason": "import numpy,import numba", "num_tokens": 2462}
|
# -*- encoding: utf-8 -*-
# pylint: disable=E0203,E1101,C0111
"""
@file
@brief Runtime operator.
"""
import numpy
from ._op import OpRunUnaryNum
class ReduceLogSumExp(OpRunUnaryNum):
atts = {'axes': [], 'keepdims': 1}
def __init__(self, onnx_node, desc=None, **options):
OpRunUnaryNum.__init__(self, onnx_node, desc=desc,
expected_attributes=ReduceLogSumExp.atts,
**options)
if isinstance(self.axes, numpy.ndarray):
if len(self.axes.shape) == 0 or self.axes.shape[0] == 0:
self.axes = None
else:
self.axes = tuple(self.axes)
elif self.axes in [[], tuple()]:
self.axes = None
elif isinstance(self.axes, list):
self.axes = tuple(self.axes)
def _run(self, data): # pylint: disable=W0221
res = numpy.log(numpy.sum(numpy.exp(data),
axis=tuple(self.axes) if self.axes else None,
keepdims=self.keepdims == 1,
dtype=data.dtype))
return (res, )
|
{"hexsha": "275d619f9cc681dc9b4c8ae54c8179c281063c47", "size": 1146, "ext": "py", "lang": "Python", "max_stars_repo_path": "mlprodict/onnxrt/ops_cpu/op_reduce_log_sum_exp.py", "max_stars_repo_name": "Exlsunshine/mlprodict", "max_stars_repo_head_hexsha": "9ea1a0fc85726822cae82c0b27b23b46f9ef351a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mlprodict/onnxrt/ops_cpu/op_reduce_log_sum_exp.py", "max_issues_repo_name": "Exlsunshine/mlprodict", "max_issues_repo_head_hexsha": "9ea1a0fc85726822cae82c0b27b23b46f9ef351a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mlprodict/onnxrt/ops_cpu/op_reduce_log_sum_exp.py", "max_forks_repo_name": "Exlsunshine/mlprodict", "max_forks_repo_head_hexsha": "9ea1a0fc85726822cae82c0b27b23b46f9ef351a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.7428571429, "max_line_length": 79, "alphanum_fraction": 0.5261780105, "include": true, "reason": "import numpy", "num_tokens": 259}
|
/-
Copyright (c) 2021 Johan Commelin. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johan Commelin
-/
import category_theory.preadditive.basic
/-!
# Preadditive structure on functor categories
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
If `C` and `D` are categories and `D` is preadditive,
then `C ⥤ D` is also preadditive.
-/
open_locale big_operators
namespace category_theory
open category_theory.limits preadditive
variables {C D : Type*} [category C] [category D] [preadditive D]
instance functor_category_preadditive : preadditive (C ⥤ D) :=
{ hom_group := λ F G,
{ add := λ α β,
{ app := λ X, α.app X + β.app X,
naturality' := by { intros, rw [comp_add, add_comp, α.naturality, β.naturality] } },
zero := { app := λ X, 0, naturality' := by { intros, rw [zero_comp, comp_zero] } },
neg := λ α,
{ app := λ X, -α.app X,
naturality' := by { intros, rw [comp_neg, neg_comp, α.naturality] } },
sub := λ α β,
{ app := λ X, α.app X - β.app X,
naturality' := by { intros, rw [comp_sub, sub_comp, α.naturality, β.naturality] } },
add_assoc := by { intros, ext, apply add_assoc },
zero_add := by { intros, ext, apply zero_add },
add_zero := by { intros, ext, apply add_zero },
sub_eq_add_neg := by { intros, ext, apply sub_eq_add_neg },
add_left_neg := by { intros, ext, apply add_left_neg },
add_comm := by { intros, ext, apply add_comm } },
add_comp' := by { intros, ext, apply add_comp },
comp_add' := by { intros, ext, apply comp_add } }
namespace nat_trans
variables {F G : C ⥤ D}
/-- Application of a natural transformation at a fixed object,
as group homomorphism -/
@[simps] def app_hom (X : C) : (F ⟶ G) →+ (F.obj X ⟶ G.obj X) :=
{ to_fun := λ α, α.app X,
map_zero' := rfl,
map_add' := λ _ _, rfl }
@[simp] lemma app_zero (X : C) : (0 : F ⟶ G).app X = 0 := rfl
@[simp]
@[simp] lemma app_sub (X : C) (α β : F ⟶ G) : (α - β).app X = α.app X - β.app X := rfl
@[simp] lemma app_neg (X : C) (α : F ⟶ G) : (-α).app X = -α.app X := rfl
@[simp] lemma app_nsmul (X : C) (α : F ⟶ G) (n : ℕ) : (n • α).app X = n • α.app X :=
(app_hom X).map_nsmul α n
@[simp] lemma app_zsmul (X : C) (α : F ⟶ G) (n : ℤ) : (n • α).app X = n • α.app X :=
(app_hom X : (F ⟶ G) →+ (F.obj X ⟶ G.obj X)).map_zsmul α n
@[simp] lemma app_sum {ι : Type*} (s : finset ι) (X : C) (α : ι → (F ⟶ G)) :
(∑ i in s, α i).app X = ∑ i in s, ((α i).app X) :=
by { rw [← app_hom_apply, add_monoid_hom.map_sum], refl }
end nat_trans
end category_theory
|
{"author": "leanprover-community", "repo": "mathlib", "sha": "5e526d18cea33550268dcbbddcb822d5cde40654", "save_path": "github-repos/lean/leanprover-community-mathlib", "path": "github-repos/lean/leanprover-community-mathlib/mathlib-5e526d18cea33550268dcbbddcb822d5cde40654/src/category_theory/preadditive/functor_category.lean"}
|
//
// Created by niko on 26.05.2021.
//
#include <redis_async/details/connection/transport.hpp>
#include <redis_async/error.hpp>
#include <boost/asio/connect.hpp>
#include <boost/bind.hpp>
namespace redis_async {
namespace details {
//****************************************************************************
// tcp_layer
tcp_transport::tcp_transport(const io_service_ptr &service)
: resolver_(*service)
, socket(*service) {
}
void tcp_transport::connect_async(connection_options const &conn, const connect_callback &cb) {
if (conn.uri.empty()) {
throw error::connection_error("No connection uri!");
}
if (conn.schema != "tcp") {
throw error::connection_error("Wrong connection schema for TCP transport");
}
std::string host = conn.uri;
std::string svc = "6379";
std::string::size_type pos = conn.uri.find(':');
if (pos != std::string::npos) {
host = conn.uri.substr(0, pos);
svc = conn.uri.substr(pos + 1);
}
tcp::resolver::query query(host, svc);
resolver_.async_resolve(query,
boost::bind(&tcp_transport::handle_resolve, this, _1, _2, cb));
}
void tcp_transport::handle_resolve(redis_async::details::tcp_transport::error_code ec,
tcp::resolver::iterator endpoint_iterator,
const connect_callback &cb) {
if (!ec) {
boost::asio::async_connect(
socket, std::move(endpoint_iterator),
boost::bind(&tcp_transport::handle_connect, this, _1, cb));
} else {
cb(ec);
}
}
void tcp_transport::handle_connect(redis_async::details::tcp_transport::error_code ec, const connect_callback &cb) {
cb(ec);
}
bool tcp_transport::connected() const {
return socket.is_open();
}
void tcp_transport::close() {
if (socket.is_open())
socket.close();
}
//----------------------------------------------------------------------------
// socket_transport implementation
//----------------------------------------------------------------------------
socket_transport::socket_transport(const io_service_ptr &service)
: socket(*service) {
}
void socket_transport::connect_async(connection_options const &conn,
const connect_callback &cb) {
using asio_config::stream_protocol;
if (conn.schema != "unix") {
throw error::connection_error(
"Wrong connection schema for Unix Domain socket transport");
}
std::string uri = conn.uri;
if (uri.empty()) {
uri = "/tmp/.s.REDIS.6379";
}
socket.async_connect(stream_protocol::endpoint(uri), cb);
}
bool socket_transport::connected() const {
return socket.is_open();
}
void socket_transport::close() {
if (socket.is_open())
socket.close();
}
} // namespace details
} // namespace redis_async
|
{"hexsha": "98aecb486a13582e9c3c87984c0cc18cf4cc1898", "size": 3459, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/details/connection/transport.cpp", "max_stars_repo_name": "Greenvi4/redis_async", "max_stars_repo_head_hexsha": "477fec00fe080c1425ccbaca19ed67bce368c972", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/details/connection/transport.cpp", "max_issues_repo_name": "Greenvi4/redis_async", "max_issues_repo_head_hexsha": "477fec00fe080c1425ccbaca19ed67bce368c972", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/details/connection/transport.cpp", "max_forks_repo_name": "Greenvi4/redis_async", "max_forks_repo_head_hexsha": "477fec00fe080c1425ccbaca19ed67bce368c972", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.2959183673, "max_line_length": 124, "alphanum_fraction": 0.4871350101, "num_tokens": 619}
|
import functools
import os
import traceback
import numpy as np
from colorama import Fore, Style
from taichi._lib import core as _ti_core
from taichi.lang import impl
from taichi.types.primitive_types import (f16, f32, f64, i8, i16, i32, i64, u8,
u16, u32, u64)
_has_pytorch = False
_env_torch = os.environ.get('TI_ENABLE_TORCH', '1')
if not _env_torch or int(_env_torch):
try:
import torch
_has_pytorch = True
except:
pass
def has_pytorch():
"""Whether has pytorch in the current Python environment.
Returns:
bool: True if has pytorch else False.
"""
return _has_pytorch
from distutils.spawn import find_executable
# Taichi itself uses llvm-10.0.0 to compile.
# There will be some issues compiling CUDA with other clang++ version.
_clangpp_candidates = ['clang++-10']
_clangpp_presence = None
for c in _clangpp_candidates:
if find_executable(c) is not None:
_clangpp_presence = find_executable(c)
def has_clangpp():
return _clangpp_presence is not None
def get_clangpp():
return _clangpp_presence
def is_taichi_class(rhs):
taichi_class = False
try:
if rhs._is_taichi_class:
taichi_class = True
except:
pass
return taichi_class
def to_numpy_type(dt):
"""Convert taichi data type to its counterpart in numpy.
Args:
dt (DataType): The desired data type to convert.
Returns:
DataType: The counterpart data type in numpy.
"""
if dt == f32:
return np.float32
if dt == f64:
return np.float64
if dt == i32:
return np.int32
if dt == i64:
return np.int64
if dt == i8:
return np.int8
if dt == i16:
return np.int16
if dt == u8:
return np.uint8
if dt == u16:
return np.uint16
if dt == u32:
return np.uint32
if dt == u64:
return np.uint64
if dt == f16:
return np.half
assert False
def to_pytorch_type(dt):
"""Convert taichi data type to its counterpart in torch.
Args:
dt (DataType): The desired data type to convert.
Returns:
DataType: The counterpart data type in torch.
"""
# pylint: disable=E1101
if dt == f32:
return torch.float32
if dt == f64:
return torch.float64
if dt == i32:
return torch.int32
if dt == i64:
return torch.int64
if dt == i8:
return torch.int8
if dt == i16:
return torch.int16
if dt == u8:
return torch.uint8
if dt == f16:
return torch.float16
if dt in (u16, u32, u64):
raise RuntimeError(
f'PyTorch doesn\'t support {dt.to_string()} data type.')
assert False
def to_taichi_type(dt):
"""Convert numpy or torch data type to its counterpart in taichi.
Args:
dt (DataType): The desired data type to convert.
Returns:
DataType: The counterpart data type in taichi.
"""
if type(dt) == _ti_core.DataType:
return dt
if dt == np.float32:
return f32
if dt == np.float64:
return f64
if dt == np.int32:
return i32
if dt == np.int64:
return i64
if dt == np.int8:
return i8
if dt == np.int16:
return i16
if dt == np.uint8:
return u8
if dt == np.uint16:
return u16
if dt == np.uint32:
return u32
if dt == np.uint64:
return u64
if dt == np.half:
return f16
if has_pytorch():
# pylint: disable=E1101
if dt == torch.float32:
return f32
if dt == torch.float64:
return f64
if dt == torch.int32:
return i32
if dt == torch.int64:
return i64
if dt == torch.int8:
return i8
if dt == torch.int16:
return i16
if dt == torch.uint8:
return u8
if dt == torch.float16:
return f16
if dt in (u16, u32, u64):
raise RuntimeError(
f'PyTorch doesn\'t support {dt.to_string()} data type.')
raise AssertionError(f"Unknown type {dt}")
def cook_dtype(dtype):
if isinstance(dtype, _ti_core.DataType):
return dtype
if isinstance(dtype, _ti_core.Type):
return _ti_core.DataType(dtype)
if dtype is float:
return impl.get_runtime().default_fp
if dtype is int:
return impl.get_runtime().default_ip
raise ValueError(f'Invalid data type {dtype}')
def in_taichi_scope():
return impl.inside_kernel()
def in_python_scope():
return not in_taichi_scope()
def taichi_scope(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
assert in_taichi_scope(), \
f'{func.__name__} cannot be called in Python-scope'
return func(*args, **kwargs)
return wrapped
def python_scope(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
assert in_python_scope(), \
f'{func.__name__} cannot be called in Taichi-scope'
return func(*args, **kwargs)
return wrapped
def warning(msg, warning_type=UserWarning, stacklevel=1, print_stack=True):
"""Print a warning message. Note that the builtin `warnings` module is
unreliable since it may be suppressed by other packages such as IPython.
Args:
msg (str): message to print.
warning_type (Warning): type of warning.
stacklevel (int): warning stack level from the caller.
print_stack (bool): whether to print the stack
"""
msg = f'{warning_type.__name__}: {msg}'
if print_stack:
msg += f'\n{get_traceback(stacklevel)}'
print(Fore.YELLOW + Style.BRIGHT + msg + Style.RESET_ALL)
def get_traceback(stacklevel=1):
s = traceback.extract_stack()[:-1 - stacklevel]
return ''.join(traceback.format_list(s))
__all__ = []
|
{"hexsha": "feb18831d91cbf18127bea1c42e13b33788fc415", "size": 5961, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/taichi/lang/util.py", "max_stars_repo_name": "gaoxinge/taichi", "max_stars_repo_head_hexsha": "86d403f071b8505858763d4712b37cd71b89db91", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/taichi/lang/util.py", "max_issues_repo_name": "gaoxinge/taichi", "max_issues_repo_head_hexsha": "86d403f071b8505858763d4712b37cd71b89db91", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/taichi/lang/util.py", "max_forks_repo_name": "gaoxinge/taichi", "max_forks_repo_head_hexsha": "86d403f071b8505858763d4712b37cd71b89db91", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.5612648221, "max_line_length": 79, "alphanum_fraction": 0.6027512162, "include": true, "reason": "import numpy", "num_tokens": 1528}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 17-9-29 下午3:56
# @Author : MaybeShewill-CV
# @Site : https://github.com/MaybeShewill-CV/CRNN_Tensorflow
# @File : test_shadownet.py
# @IDE: PyCharm Community Edition
"""
Use shadow net to recognize the scene text of a single image
"""
import argparse
import os.path as ops
import os
import cv2
import numpy as np
import tensorflow as tf
from tensorflow.python.tools.freeze_graph import freeze_graph
import matplotlib.pyplot as plt
import glog as logger
from config import global_config
from crnn_model import crnn_model
from data_provider import tf_io_pipline_tools
from local_utils.custom_ctc_decoder import ctc_decode
from time import time
CFG = global_config.cfg
def init_args():
"""
:return: parsed arguments and (updated) config.cfg object
"""
parser = argparse.ArgumentParser()
parser.add_argument('--image_path', type=str,
help='Path to the image to be tested',
default='./data/test_images/test_01.jpg')
parser.add_argument('--weights_path', type=str,
help='Path to the pre-trained weights to use',
default='./model/crnn_syn90k/shadownet.ckpt')
parser.add_argument('-c', '--char_dict_path', type=str,
help='Directory where character dictionaries for the dataset were stored',
default='./data/char_dict/char_dict.json')
parser.add_argument('-o', '--ord_map_dict_path', type=str,
help='Directory where ord map dictionaries for the dataset were stored',
default='./data/char_dict/ord_map.json')
parser.add_argument('-v', '--visualize', type=args_str2bool, nargs='?', const=True,
help='Whether to display images',
default=True)
return parser.parse_args()
def args_str2bool(arg_value):
"""
:param arg_value:
:return:
"""
if arg_value.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif arg_value.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Unsupported value encountered.')
def recognize(image_path, weights_path, char_dict_path, ord_map_dict_path, is_vis):
# def recognize(weights_path, char_dict_path, ord_map_dict_path, is_vis):
"""
:param image_path:
:param weights_path:
:param char_dict_path:
:param ord_map_dict_path:
:param is_vis:
:return:
"""
image = cv2.imdecode(np.fromfile(image_path, dtype=np.uint8), 1)
new_heigth = 32
rate = new_heigth / image.shape[0]
new_width = int(rate * image.shape[1])
image = cv2.resize(image, (new_width, new_heigth), interpolation=cv2.INTER_LINEAR)
image_vis = image
image = np.array(image, np.float32) / 127.5 - 1.0
inputdata = tf.placeholder(
dtype=tf.float32,
shape=[None, new_heigth, None, CFG.ARCH.INPUT_CHANNELS],
name='input'
)
net = crnn_model.ShadowNet(
phase='test',
hidden_nums=CFG.ARCH.HIDDEN_UNITS,
layers_nums=CFG.ARCH.HIDDEN_LAYERS,
num_classes=CFG.ARCH.NUM_CLASSES
)
inference_ret = net.inference(
inputdata=inputdata,
name='shadow_net',
reuse=False
)
# config tf saver
saver = tf.train.Saver()
decodes, _ = tf.nn.ctc_beam_search_decoder(
inputs=inference_ret,
sequence_length=new_width // 4 * np.ones(1),
merge_repeated=False
)
codec = tf_io_pipline_tools.TextFeatureIO(
char_dict_path=char_dict_path,
ord_map_dict_path=ord_map_dict_path
).reader
# config tf session
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.per_process_gpu_memory_fraction = CFG.TEST.GPU_MEMORY_FRACTION
sess_config.gpu_options.allow_growth = CFG.TEST.TF_ALLOW_GROWTH
sess = tf.Session(config=sess_config)
with sess.as_default():
saver.restore(sess=sess, save_path=weights_path)
# debug_1 = sess.run(inference_ret, feed_dict={inputdata: [image]})
# debug_1 = debug_1.argmax(axis=2)[:,0]
# print(debug_1)
preds = sess.run(decodes, feed_dict={inputdata: [image]})
preds = codec.sparse_tensor_to_str(preds[0])
logger.info('Predict image {:s} result {:s}'.format(
ops.split(image_path)[1], preds[0])
)
if is_vis:
plt.figure('CRNN Model Demo')
plt.imshow(image_vis[:, :, (2, 1, 0)])
plt.show()
sess.close()
return
def define_graph():
inputdata = tf.placeholder(
dtype=tf.float32,
shape=[1, 32, None, CFG.ARCH.INPUT_CHANNELS],
name='input'
)
net = crnn_model.ShadowNet(
phase='test',
hidden_nums=CFG.ARCH.HIDDEN_UNITS,
layers_nums=CFG.ARCH.HIDDEN_LAYERS,
num_classes=CFG.ARCH.NUM_CLASSES
)
inference_ret = net.inference(
inputdata=inputdata,
name='shadow_net',
reuse=False
)
with tf.Session() as sess:
graph_def = sess.graph.as_graph_def()
with tf.gfile.FastGFile('../model/chinese/test.pb', 'wb') as f:
f.write(graph_def.SerializeToString())
def print_pb_debug(pb_path):
with tf.Session() as sess:
with open(pb_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
print(graph_def)
def freezeGraph():
freeze_graph(input_graph='../model/chinese/test.pb', # =some_graph_def.pb
input_saver="",
input_checkpoint='../model/chinese/shadownet_2019-03-28-11-58-32.ckpt-200000',
checkpoint_version=2,
output_graph='../model/chinese/out.pb',
input_binary=True,
restore_op_name="save/restore_all",
filename_tensor_name="save/Const:0",
initializer_nodes="",
variable_names_whitelist="",
variable_names_blacklist="",
input_meta_graph="",
saved_model_tags='serve',
clear_devices=True,
output_node_names='shadow_net/sequence_rnn_module/transpose_time_major',
)
def pb_recognize(pb_path, char_dict_path, ord_map_dict_path):
codec = tf_io_pipline_tools.TextFeatureIO(
char_dict_path=char_dict_path,
ord_map_dict_path=ord_map_dict_path
).reader
with tf.gfile.FastGFile(pb_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
with tf.Session() as session:
new_heigth = 32
while True:
path = input('please input the image path\n')
if path == '':
break
if not ops.exists(path):
print('invalid path')
continue
img_path_list = []
if ops.isfile(path):
img_path_list.append(path)
elif ops.isdir(path):
files = os.listdir(path)
for file in files:
img_path_list.append(ops.join(path, file))
for img_path in img_path_list:
image = cv2.imdecode(np.fromfile(img_path, np.uint8), 1)
rate = new_heigth / image.shape[0]
new_width = int(rate * image.shape[1])
image = cv2.resize(image, (new_width, new_heigth), interpolation=cv2.INTER_LINEAR)
image_vis = image
image = np.array(image, np.float32) / 127.5 - 1.0
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
prediction_tensor = session.graph.get_tensor_by_name(
'shadow_net/sequence_rnn_module/transpose_time_major:0')
# ----------------tf-----------------------------------
# decodes, _ = tf.nn.ctc_beam_search_decoder(
# inputs=prediction_tensor,
# sequence_length=new_width // 4 * np.ones(1),
# merge_repeated=False
# )
# out = session.run(decodes, {'input:0': image})
# text = codec.sparse_tensor_to_str(out[0])
# print(text[0])
# -----------------------my implement----------------------------------------
out = session.run(prediction_tensor, {'input:0': image})
out = out.argmax(axis=2)[:, 0]
out = ctc_decode(out)
text = codec.array_to_str(out)
print(text)
if __name__ == '__main__':
pass
# # init images
# args = init_args()
#
# # detect images
# recognize(
# image_path=args.image_path,
# weights_path=args.weights_path,
# char_dict_path=args.char_dict_path,
# ord_map_dict_path=args.ord_map_dict_path,
# is_vis=args.visualize
# )
#############################
#
# recognize(
# image_path='F:\Project\ocr-demo\测试图片-识别/TIM截图20190309134248.jpg',
# weights_path='../model/chinese/shadownet_2019-03-28-11-58-32.ckpt-200000',
# char_dict_path='../data/char_dict/char_dict.json',
# ord_map_dict_path='../data/char_dict/ord_map.json',
# is_vis=True
# )
# define_graph()
# freezeGraph()
pb_recognize(
'../model/chinese/out.pb',
'../data/char_dict/char_dict.json',
'../data/char_dict/ord_map.json')
|
{"hexsha": "c3bc160aaef0b2f761d9483ed6de8d985415bbee", "size": 9662, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/test_shadownet.py", "max_stars_repo_name": "LJXLJXLJX/CRNN_Tensorflow", "max_stars_repo_head_hexsha": "2d2ed639ecfd26da30c5784582388db5dcaec1f8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tools/test_shadownet.py", "max_issues_repo_name": "LJXLJXLJX/CRNN_Tensorflow", "max_issues_repo_head_hexsha": "2d2ed639ecfd26da30c5784582388db5dcaec1f8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tools/test_shadownet.py", "max_forks_repo_name": "LJXLJXLJX/CRNN_Tensorflow", "max_forks_repo_head_hexsha": "2d2ed639ecfd26da30c5784582388db5dcaec1f8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.0890410959, "max_line_length": 98, "alphanum_fraction": 0.589939971, "include": true, "reason": "import numpy", "num_tokens": 2259}
|
#!/usr/bin/python
usage="computeUptimeHist.py [--options] start end"
description="queries data and computes histograms based on uptime"
author = "reed.essick@ligo.org"
#-------------------------------------------------
import os
import sys
import numpy as np
#import snr_utils as psd_utils
from glue.ligolw import ligolw
from glue.ligolw import table
from glue.ligolw import lsctables
from glue.ligolw import utils as ligolw_utils
from laldetchar.idq import event
import subprocess as sp
import pickle
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from optparse import OptionParser
#-------------------------------------------------
parser = OptionParser(usage=usage, description=description)
parser.add_option("-v", "--verbose", default=False, action="store_true")
parser.add_option("-o", "--observatory", default=[], action="append", type="string")
#parser.add_option("-c", "--channel", default="GDS-CALIB_STRAIN", type="string")
#parser.add_option("-F", "--frame-type", default="HOFT_C00", type="string")
parser.add_option("-f", "--flag", default="DMT-ANALYSIS_READY:1", type="string")
parser.add_option("-u", "--segdb-url", default="https://segments.ligo.org", type="string")
parser.add_option("-O", "--output-dir", default=".", type="string")
#parser.add_option("-d", "--psd-dur", default=64, type="int")
#parser.add_option("-n", "--num-segs", default=16, type="int")
opts, args = parser.parse_args()
if len(args) != 2:
raise ValueError("must supply two arguments : %s"%(usage))
start, end = [float(_) for _ in args]
if not os.path.exists(opts.output_dir):
os.makedirs(opts.output_dir)
opts.observatory.sort()
#-------------------------------------------------
### query segments
segs = [[start, end]]
for ifo in opts.observatory:
segfilename = "%s/%s1_%s-%d-%d.xml"%(opts.output_dir, ifo, opts.flag.replace(":","_"), start, end-start)
cmd = "ligolw_segment_query_dqsegdb -t %s -q -a %s1:%s -s %d -e %d -o %s"%(opts.segdb_url, ifo, opts.flag, start, end, segfilename)
if opts.verbose:
print "querying %s segments for %s\n %s"%(opts.flag, ifo, cmd)
output = sp.Popen( cmd.split(), stdout=sp.PIPE, stderr=sp.PIPE ).communicate()
### iterate over segments, computing PSDs
xmldoc = ligolw_utils.load_filename(segfilename, contenthandler=lsctables.use_in(ligolw.LIGOLWContentHandler))
segs = event.andsegments( [segs, [[row.start_time, row.end_time] for row in table.get_table(xmldoc, lsctables.SegmentTable.tableName)]] ) ### take intersection of segments
### write intersection to ascii file
segfilename = "%s/intersection-%d-%d.seg"%(opts.output_dir, start, end-start)
if opts.verbose:
print "found %d sec of joint livetime"%(event.livetime(segs))
print "writing : %s"%(segfilename)
file_obj = open(segfilename, 'w')
for s, e in segs:
print >> file_obj, s, e
file_obj.close()
#-------------------------------------------------
### build histograms
### will have to do this by hand (unfortunately)
### set up bins
day = 86400.
Nbins = 60*12 ### one bin per minute
bins = np.linspace(0, day, Nbins+1)
counts = np.zeros((Nbins,), dtype=float)
binDur = bins[1]-bins[0]
### determine reference start time
tref = 1125964817 ###Fri Sep 11 00:00:00 GMT 2015
if opts.verbose:
print "computing binning"
for s, e in segs:
if opts.verbose:
print "processing : %d - %d"%(s, e)
relative_start = (s - tref)%(day) ### where the segment starts in the day
dur = e - s
for i in xrange(Nbins): ### find the bin we start in
if bins[i+1] > relative_start:
break
else:
raise ValueError("could not find relative start's position in binning")
seg = bins[i+1]-relative_start ### add the initial bit to the bin we start in
if seg > dur:
seg = dur
counts[i] += seg
dur -= seg
i = (i+1)%(Nbins)
while dur > 0: ### iterate until we run out of this segment, adding time to each bin as needed.
if dur < binDur:
counts[i] += dur
break
else:
counts[i] += binDur
dur -= binDur
i = (i+1)%(Nbins)
filename = "%s/UptimeHist-%s-%d-%d.pkl"%(opts.output_dir, opts.flag.replace(":","_"), start, end-start)
if opts.verbose:
print filename
file_obj = open(filename, "w")
pickle.dump(bins, file_obj)
pickle.dump(counts, file_obj)
file_obj.close()
counts /= np.sum(counts) ### compute fractional occupation
counts /= binDur/day ### normalize by the expected amount
### plot histogram
if opts.verbose:
print "plotting the histogram"
fig = plt.figure()
ax = fig.gca()
x = []
y = []
Y = []
for i in xrange(Nbins):
x += list(bins[i:i+2])
y += [0]*2
Y += [counts[i]]*2
ax.fill_between( x, y, Y )
ax.plot([0, 86400], [1, 1], 'k:', alpha=0.5)
ax.set_title('%d sec joint livetime out of %d sec'%(event.livetime(segs), end-start))
ax.set_xlabel('hours relative to Sept 11 00:00:00 GMT 2015')
ax.set_ylabel('fraction of joint livetime / uniform distribution')
ax.xaxis.set_ticks([i*3600 for i in xrange(25)])
ax.xaxis.set_ticklabels(["%d"%i for i in xrange(25)])
ax.set_xlim(xmin=0, xmax=86400)
figname = "%s/UptimeHist-%s-%d-%d.png"%(opts.output_dir, opts.flag.replace(":","_"), start, end-start)
if opts.verbose:
print "saving : %s"%(figname)
fig.savefig( figname )
plt.close( fig )
|
{"hexsha": "5a4298042c8fdcda0202317c15d8c0928133cde1", "size": 5365, "ext": "py", "lang": "Python", "max_stars_repo_path": "computeUptimeHist.py", "max_stars_repo_name": "reedessick/observationalBias", "max_stars_repo_head_hexsha": "3f39c57e7f30f34ba67056072882b69267d0aba1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "computeUptimeHist.py", "max_issues_repo_name": "reedessick/observationalBias", "max_issues_repo_head_hexsha": "3f39c57e7f30f34ba67056072882b69267d0aba1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "computeUptimeHist.py", "max_forks_repo_name": "reedessick/observationalBias", "max_forks_repo_head_hexsha": "3f39c57e7f30f34ba67056072882b69267d0aba1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.6408839779, "max_line_length": 175, "alphanum_fraction": 0.6424976701, "include": true, "reason": "import numpy", "num_tokens": 1492}
|
"""
Hyperparameter tuning using (optionally random) Grid Search.
"""
import tigercontrol
from tigercontrol.utils.random import generate_key
import jax.numpy as np
import jax
from jax import jit, grad, random
import itertools
class GridSearch:
"""
Description: Implements the equivalent of an AR(p) controller - predicts a linear
combination of the previous p observed values in a time-series
"""
def __init__(self):
pass
def search(self, controller_id, controller_params, environment_id, environment_params, loss, search_space, trials=None,
smoothing=10, min_steps=100, verbose=0):
"""
Description: Search for optimal controller parameters
Args:
controller_id (string): id of controller
controller_params (dict): initial controller parameters dict (updated by search space)
environment_id (string): id of environment to try on
environment_params (dict): environment parameters dict
loss (function): a function mapping y_pred, y_true -> scalar loss
search_space (dict): dict mapping parameter names to a finite set of options
trials (int, None): number of random trials to sample from search space / try all parameters
smoothing (int): loss computed over smoothing number of steps to decrease variance
min_steps (int): minimum number of steps that the controller gets to run for
verbose (int): if 1, print progress and current parameters
"""
self.controller_id = controller_id
self.controller_params = controller_params
self.environment_id = environment_id
self.environment_params = environment_params
self.loss = loss
# store the order to test parameters
param_list = list(itertools.product(*[v for k, v in search_space.items()]))
index = np.arange(len(param_list)) # np.random.shuffle doesn't work directly on non-JAX objects
shuffled_index = random.shuffle(generate_key(), index)
param_order = [param_list[i] for i in shuffled_index] # shuffle order of elements
# helper controller
def _update_smoothing(l, val):
""" update smoothing loss list with new val """
return jax.ops.index_update(np.roll(l, 1), 0, val)
self._update_smoothing = jit(_update_smoothing)
# store optimal params and optimal loss
optimal_params, optimal_loss = {}, None
t = 0
for params in param_order: # loop over all params in the given order
t += 1
curr_params = controller_params.copy()
curr_params.update({k:v for k, v in zip(search_space.keys(), params)})
loss = self._run_test(curr_params, smoothing=smoothing, min_steps=min_steps, verbose=verbose)
if not optimal_loss or loss < optimal_loss:
optimal_params = curr_params
optimal_loss = loss
if t == trials: # break after trials number of attempts, unless trials is None
break
return optimal_params, optimal_loss
def _run_test(self, controller_params, smoothing, min_steps, verbose=0):
""" Run a single test with given controller params, using median stopping rule """
# initialize environment and controller
if verbose:
print("Currently testing parameters: " + str(controller_params))
controller = tigercontrol.controller(self.controller_id)
controller.initialize(**controller_params)
environment = tigercontrol.environment(self.environment_id)
if environment.has_regressors:
x, y_true = environment.reset(**self.environment_params)
else:
x = environment.reset(**self.environment_params)
t = 0
losses = [] # sorted losses, used to get median
smooth_losses = np.zeros(smoothing) # store previous losses to get smooth loss
while True: # run controller until worse than median loss, ignoring first 100 steps
t += 1
y_pred = controller.predict(x)
if environment.has_regressors:
controller.update(y_true)
loss = self.loss(y_pred, y_true)
else:
x = environment.step()
controller.update(x)
loss = self.loss(y_pred, x)
if t == 1: # fill all of smooth_losses with the first loss
for i in range(smoothing):
smooth_losses = self._update_smoothing(smooth_losses, loss)
else: # else replace only the oldest loss
smooth_losses = self._update_smoothing(smooth_losses, loss)
smooth_loss = np.mean(smooth_losses)
if t % smoothing == 0:
self._add_to_list(losses, smooth_loss)
if self._halting_rule(losses, smooth_loss) and t >= min_steps: break
if verbose:
print("Final loss: ", smooth_loss)
return smooth_loss
def _add_to_list(self, l, val):
""" add val to list l in sorted order """
i = 0
while i < len(l) and l[i] < val: i += 1
l.insert(i, val)
def _halting_rule(self, l, val, div=2): # div can be set to gamma > 2 to make stopping rule stricter
""" return True if val is greater than median of list """
if len(l) % 2 == 0:
return val >= (l[int(len(l)/div)] + l[int(len(l)/div - 1)]) / 2
return val >= l[int(len(l)/div)]
|
{"hexsha": "e7acb6a707a6e4fcbeaef1facce2cf0b856b4af9", "size": 5534, "ext": "py", "lang": "Python", "max_stars_repo_path": "tigercontrol/utils/autotuning/grid_search.py", "max_stars_repo_name": "MinRegret/TigerControl", "max_stars_repo_head_hexsha": "b1ca0617cbb2198f9d5cb37f725f3d7accbab08f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 31, "max_stars_repo_stars_event_min_datetime": "2019-11-08T06:01:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-20T04:50:43.000Z", "max_issues_repo_path": "tigercontrol/utils/autotuning/grid_search.py", "max_issues_repo_name": "johnhallman/ctsb", "max_issues_repo_head_hexsha": "b1ca0617cbb2198f9d5cb37f725f3d7accbab08f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 32, "max_issues_repo_issues_event_min_datetime": "2019-06-27T15:05:04.000Z", "max_issues_repo_issues_event_max_datetime": "2019-08-07T04:23:47.000Z", "max_forks_repo_path": "tigercontrol/utils/autotuning/grid_search.py", "max_forks_repo_name": "MinRegret/tigercontrol", "max_forks_repo_head_hexsha": "b1ca0617cbb2198f9d5cb37f725f3d7accbab08f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-09-30T17:06:50.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-12T22:39:34.000Z", "avg_line_length": 44.272, "max_line_length": 124, "alphanum_fraction": 0.6346223347, "include": true, "reason": "import jax,from jax", "num_tokens": 1171}
|
//---------------------------------------------------------------------------//
//!
//! \file Utility_RotationCylindricalSpatialCoordinateConversionPolicy.hpp
//! \author Alex Robinson
//! \brief Rotation cylindrical spatial coordinate conversion policy decl.
//!
//---------------------------------------------------------------------------//
#ifndef UTILITY_ROTATION_CYLINDRICAL_SPATIAL_COORDINATE_CONVERSION_POLICY_HPP
#define UTILITY_ROTATION_CYLINDRICAL_SPATIAL_COORDINATE_CONVERSION_POLICY_HPP
// Boost Includes
#include <boost/serialization/array_wrapper.hpp>
// FRENSIE Includes
#include "Utility_CylindricalSpatialCoordinateConversionPolicy.hpp"
namespace Utility{
//! The rotation cylindrical spatial coordinate conversion policy class
class RotationCylindricalSpatialCoordinateConversionPolicy : public CylindricalSpatialCoordinateConversionPolicy
{
public:
//! Constructor
RotationCylindricalSpatialCoordinateConversionPolicy( const double axis[3] );
//! Destructor
~RotationCylindricalSpatialCoordinateConversionPolicy()
{ /* ... */ }
//! Convert the spatial coordinates to cartesian coordinates
void convertToCartesianSpatialCoordinates(
const double primary_spatial_coord,
const double secondary_spatial_coord,
const double tertiary_spatial_coord,
double& x_spatial_coord,
double& y_spatial_coord,
double& z_spatial_coord ) const override;
//! Convert the cartesian coordinates to the spatial coordinate system
void convertFromCartesianSpatialCoordinates(
const double x_spatial_coord,
const double y_spatial_coord,
const double z_spatial_coord,
double& primary_spatial_coord,
double& secondary_spatial_coord,
double& tertiary_spatial_coord ) const override;
//! Convert the spatial coordinates to cartesian coordinates
using CylindricalSpatialCoordinateConversionPolicy::convertToCartesianSpatialCoordinates;
//! Convert the cartesian coordinates to the spatial coordinate system
using CylindricalSpatialCoordinateConversionPolicy::convertFromCartesianSpatialCoordinates;
private:
// The default constructor should not be used - if the z-axis of the local
// coordinate system w.r.t. the global Cartesian coordinate system aligns
// with the z-axis of the Cartesian coordinate system use the basic
// conversion policy
RotationCylindricalSpatialCoordinateConversionPolicy()
{ /* ... */ }
// We have C-arrays as members - hide the copy constructor and assignment
// operator
RotationCylindricalSpatialCoordinateConversionPolicy( const RotationCylindricalSpatialCoordinateConversionPolicy& that );
RotationCylindricalSpatialCoordinateConversionPolicy& operator=( const RotationCylindricalSpatialCoordinateConversionPolicy& that );
// Save the policy to an archive
template<typename Archive>
void save( Archive& ar, const unsigned version ) const;
// Load the policy from an archive
template<typename Archive>
void load( Archive& ar, const unsigned version );
BOOST_SERIALIZATION_SPLIT_MEMBER();
// Declare the boost serialization access object as a friend
friend class boost::serialization::access;
// The z-axis (unit vector) of the local cylindrical coordinate system w.r.t.
// the global Cartesian coordinate system
double d_axis[3];
};
// Save the policy to an archive
template<typename Archive>
void RotationCylindricalSpatialCoordinateConversionPolicy::save( Archive& ar, const unsigned version ) const
{
// Save the base class
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP( CylindricalSpatialCoordinateConversionPolicy );
// Save the local data
ar & boost::serialization::make_nvp( "d_axis", boost::serialization::make_array( d_axis, 3 ) );
}
// Load the policy from an archive
template<typename Archive>
void RotationCylindricalSpatialCoordinateConversionPolicy::load( Archive& ar, const unsigned version )
{
// Load the base class
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP( CylindricalSpatialCoordinateConversionPolicy );
// Load the local data
ar & boost::serialization::make_nvp( "d_axis", boost::serialization::make_array( d_axis, 3 ) );
}
} // end Utility namespace
BOOST_SERIALIZATION_CLASS_VERSION( RotationCylindricalSpatialCoordinateConversionPolicy, Utility, 0 );
BOOST_SERIALIZATION_CLASS_EXPORT_STANDARD_KEY( RotationCylindricalSpatialCoordinateConversionPolicy, Utility );
EXTERN_EXPLICIT_CLASS_SAVE_LOAD_INST( Utility, RotationCylindricalSpatialCoordinateConversionPolicy );
#endif // end UTILITY_ROTATION_CYLINDRICAL_SPATIAL_COORDINATE_CONVERSION_POLICY_HPP
//---------------------------------------------------------------------------//
// end Utility_RotationCylindricalSpatialCoordinateConversionPolicy.hpp
//---------------------------------------------------------------------------//
|
{"hexsha": "4b7d26c3def88620cfe7e1371e56882d8086bcf8", "size": 5152, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "packages/utility/system/src/Utility_RotationCylindricalSpatialCoordinateConversionPolicy.hpp", "max_stars_repo_name": "bam241/FRENSIE", "max_stars_repo_head_hexsha": "e1760cd792928699c84f2bdce70ff54228e88094", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2019-11-14T19:58:30.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-04T17:44:09.000Z", "max_issues_repo_path": "packages/utility/system/src/Utility_RotationCylindricalSpatialCoordinateConversionPolicy.hpp", "max_issues_repo_name": "bam241/FRENSIE", "max_issues_repo_head_hexsha": "e1760cd792928699c84f2bdce70ff54228e88094", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 43.0, "max_issues_repo_issues_event_min_datetime": "2020-03-03T19:59:20.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-08T03:36:08.000Z", "max_forks_repo_path": "packages/utility/system/src/Utility_RotationCylindricalSpatialCoordinateConversionPolicy.hpp", "max_forks_repo_name": "bam241/FRENSIE", "max_forks_repo_head_hexsha": "e1760cd792928699c84f2bdce70ff54228e88094", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 6.0, "max_forks_repo_forks_event_min_datetime": "2020-02-12T17:37:07.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-08T18:59:51.000Z", "avg_line_length": 42.2295081967, "max_line_length": 134, "alphanum_fraction": 0.7010869565, "num_tokens": 952}
|
[STATEMENT]
lemma coupling_cond:
assumes "coupling Q V D S"
shows "(Q = Map.empty) \<longleftrightarrow> (\<forall>u. u\<notin>S \<longrightarrow> D u = \<infinity>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (Q = Map.empty) = (\<forall>u. u \<notin> S \<longrightarrow> D u = \<infinity>)
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
coupling Q V D S
goal (1 subgoal):
1. (Q = Map.empty) = (\<forall>u. u \<notin> S \<longrightarrow> D u = \<infinity>)
[PROOF STEP]
by (fastforce simp add: coupling_def)
|
{"llama_tokens": 208, "file": "Prim_Dijkstra_Simple_Dijkstra_Abstract", "length": 2}
|
import unittest
import numpy as np
from bnpy.allocmodel.hmm.HMMUtil import FwdAlg_py, BwdAlg_py, SummaryAlg_py
from bnpy.allocmodel.hmm.HMMUtil import SummaryAlg_cpp, calcRespPair_fast
from bnpy.allocmodel.hmm.HMMUtil import calc_sub_Htable_forMergePair
from bnpy.init.FromTruth import convertLPFromHardToSoft
class TestSummaryAlg_K4T2(unittest.TestCase):
def shortDescription(self):
return None
def setUp(self, K=4, T=2):
initPi = 1.0 / K * np.ones(K)
transPi = 1.0 / K * np.ones((K, K))
SoftEv = 10 * np.ones((T, K)) + np.random.rand(T, K)
self._setUpFromParams(initPi, transPi, SoftEv)
def _setUpFromParams(self, initPi, transPi, SoftEv):
fMsg, margPrObs = FwdAlg_py(initPi, transPi, SoftEv)
bMsg = BwdAlg_py(initPi, transPi, SoftEv, margPrObs)
self.initPi = initPi
self.transPi = transPi
self.SoftEv = SoftEv
self.fMsg = fMsg
self.bMsg = bMsg
self.margPrObs = margPrObs
self.K = initPi.size
self.T = SoftEv.shape[0]
def test_python_equals_cpp(self):
''' Test both versions of C++ and python, verify same value returned
'''
print ''
print '-------- python'
T1, H1, _ = SummaryAlg_py(self.initPi, self.transPi, self.SoftEv,
self.margPrObs, self.fMsg, self.bMsg)
if self.K < 5:
print H1
else:
print H1[:5, :5]
print '-------- cpp'
T2, H2, _ = SummaryAlg_cpp(self.initPi, self.transPi, self.SoftEv,
self.margPrObs, self.fMsg, self.bMsg)
if self.K < 5:
print H2
else:
print H2[:5, :5]
assert np.allclose(T1, T2)
assert np.allclose(H1, H2)
def test_all_possible_single_merges(self):
''' Iterate over all possible pairs (kA, kB), verify merge Htable correct.
'''
print ''
for kA in xrange(self.K):
for kB in xrange(kA + 1, self.K):
self.test_single_merge__python_equals_cpp(kA=kA, kB=kB)
def test_single_merge__python_equals_cpp(self, kA=0, kB=1):
''' Test both versions of C++ and python, verify same value returned
'''
print ''
mPairIDs = [(kA, kB)]
print '-------- python'
_, _, mH1 = SummaryAlg_py(self.initPi, self.transPi, self.SoftEv,
self.margPrObs, self.fMsg, self.bMsg, mPairIDs)
print mH1[:5, :5]
print '-------- cpp'
_, _, mH2 = SummaryAlg_cpp(self.initPi, self.transPi, self.SoftEv,
self.margPrObs, self.fMsg, self.bMsg, mPairIDs)
print mH2[:5, :5]
assert np.allclose(mH1, mH2)
def test_many_possible_multiple_merges(self):
for M in xrange(5, 10):
for seed in xrange(3):
self.test_tracking_multiple_merges__python_equals_cpp(
M=M,
seed=seed)
def test_tracking_multiple_merges__python_equals_cpp(self, M=3, seed=0):
''' Test both versions of C++ and python, verify same value returned
Here, we track M pairs simultaneously
Chosen by random shuffling from all possible valid pairs (kA < kB)
'''
print ''
mPairIDs = list()
for kA in xrange(self.K):
for kB in xrange(kA + 1, self.K):
mPairIDs.append((kA, kB))
PRNG = np.random.RandomState(seed)
PRNG.shuffle(mPairIDs)
mPairIDs = mPairIDs[:M]
print 'mPairIDs:', mPairIDs
print '-------- python'
_, _, mH1 = SummaryAlg_py(self.initPi, self.transPi, self.SoftEv,
self.margPrObs, self.fMsg, self.bMsg, mPairIDs)
print mH1[:10, :5]
print '-------- cpp'
_, _, mH2 = SummaryAlg_cpp(self.initPi, self.transPi, self.SoftEv,
self.margPrObs, self.fMsg, self.bMsg, mPairIDs)
print mH2[:10, :5]
print 'MaxError: ', np.max(np.abs(mH1 - mH2))
assert np.allclose(mH1, mH2, atol=1e-6, rtol=0)
class TestSummaryAlg_K4T100(TestSummaryAlg_K4T2):
def setUp(self, K=4, T=100):
parent = super(type(self), self)
parent.setUp(K, T)
class TestSummaryAlg_K22T55(TestSummaryAlg_K4T2):
def setUp(self, K=22, T=55):
parent = super(type(self), self)
parent.setUp(K, T)
class TestSummaryAlg_ToyData(TestSummaryAlg_K4T2):
def setUp(self):
T = 3000
import DDToyHMM
Data = DDToyHMM.get_data(seed=0, nDocTotal=1, T=T)
initPi = DDToyHMM.initPi
transPi = DDToyHMM.transPi
LP = dict(Z=Data.TrueParams['Z'])
LP = convertLPFromHardToSoft(LP, Data)
Keff = LP['resp'].shape[1]
assert LP['resp'].shape[0] == T
assert LP['Z'].shape[0] == T
K = initPi.size
SoftEv = np.zeros((T, K))
SoftEv[:, :Keff] = LP['resp']
SoftEv += 0.05
self._setUpFromParams(initPi, transPi, SoftEv)
|
{"hexsha": "f9aac1e3c3c8d7ad5429ebc0221ab692349d6916", "size": 5106, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/allocmodel/hmm/TestSummaryAlg.py", "max_stars_repo_name": "co2meal/-bnpy-dev", "max_stars_repo_head_hexsha": "74f69afde6c9dac8de4c074842df53ae87a15ac1", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-07-02T03:50:23.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-16T03:23:55.000Z", "max_issues_repo_path": "tests/allocmodel/hmm/TestSummaryAlg.py", "max_issues_repo_name": "co2meal/-bnpy-dev", "max_issues_repo_head_hexsha": "74f69afde6c9dac8de4c074842df53ae87a15ac1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-01-07T01:33:06.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-07T01:33:06.000Z", "max_forks_repo_path": "tests/zzz_deprecated_unmaintained/allocmodel/hmm/TestSummaryAlg.py", "max_forks_repo_name": "birlrobotics/bnpy", "max_forks_repo_head_hexsha": "8f297d8f3e4a56088d7755134c329f63a550be9e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-09-01T13:21:18.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-01T13:21:18.000Z", "avg_line_length": 32.7307692308, "max_line_length": 82, "alphanum_fraction": 0.5720720721, "include": true, "reason": "import numpy", "num_tokens": 1424}
|
import streamlit as st
import numpy as np
import pandas as pd
import pickle
import datetime
import matplotlib.pyplot as plt
import seaborn as sns
from keras.models import load_model
st.write('''
# Simple Stock Price Prediction
''')
st.sidebar.header('Users Input Parameters')
df = pd.DataFrame({
'first column': ["MSFT 1- Day","NIFTY 1-Minute"]
})
option = st.sidebar.selectbox(
'Select prediction Interval',
df['first column'])
if option == "NIFTY 1-Minute":
'You selected:', option
df = pickle.load(open('df_nifty.pkl','rb'))
scaler = pickle.load(open('scaler.pkl', 'rb'))
model = load_model('model.h5')
with open('ftest.pkl', 'rb') as f:
f_test = pickle.load(f)
f_test = np.array(f_test)
f_test = np.reshape(f_test, (f_test.shape[0], f_test.shape[1],1))
st.sidebar.header('User Input Parameters')
def user_input_features():
date = st.sidebar.date_input('Select Date', datetime.date(2021,4,1))
date = date.day
hour = st.sidebar.slider('Hour of the day',9,17,9)
minute = st.sidebar.slider('Minute of the day',0,59,5)
return date,hour,minute
day,hour,minute = user_input_features()
f_predict = []
n_days = 1
minutes = 0
hours = 8 #[9--17]
for i in range (n_days):
while (hours > 0):
while (minutes < 60):
res = model.predict(f_test)
f_predict.append(res[0][0])
f_test = np.delete(f_test,[0],None)
f_test = np.append(f_test,res[0][0],None)
f_test = f_test.reshape(1,60,1)
minutes=minutes+1
hours=hours-1
minutes = 0
hours=8
minutes=0
hour=hour-8
st.header("Prediction:")
res =scaler.inverse_transform([[f_predict[day*hour*minute]]])
col1, col2,col3 = st.beta_columns(3)
original = df.open[0]
col1.subheader("Open Price")
col1.write(res[0][0])
grayscale = df.open[0]
col2.subheader("Prev Day Open")
a=scaler.inverse_transform([[f_predict[day-2]]])[0][0]
col2.write(a)
return1 = (a - res[0][0])/(a)*100
#fo = "{:.2f}".format(return1)
col3.subheader("Return %")
col3.write(return1)
st.header("Visualisation:")
idx = pd.date_range("2021-04-01", periods=len(f_predict), freq="D")
ts = pd.Series(range(len(idx)), index=idx)
r = scaler.inverse_transform([f_predict]).reshape(-1,1)
r = r.reshape(r.shape[0])
fig, ax = plt.subplots()
ax=sns.lineplot(x=df.DateAndTime,y=df['open'],color='r')
ax=sns.lineplot(x=idx,y=r)
st.pyplot(fig)
# MSFFTT
if option == "MSFT 1- Day":
'You selected:', option
df = pickle.load(open('df_msft.pkl','rb'))
scaler = pickle.load(open('scalerMSFT.pkl', 'rb'))
model = load_model('modelMSFT.h5')
#model = joblib.load('modelMSFT.pkl')
with open('ftestMSFT.pkl', 'rb') as f:
f_test = pickle.load(f)
f_test = np.array(f_test)
f_test = np.reshape(f_test, (f_test.shape[0], f_test.shape[1],1))
def user_input_features():
date = st.sidebar.date_input('Select Date', datetime.date(2021,5,3))
#st.write(date.day)
return date.day
day = user_input_features()
f_predict = []
n_days = day
for i in range (n_days):
res = model.predict(f_test)
f_predict.append(res[0][0])
f_test = np.delete(f_test,[0],None)
f_test = np.append(f_test,res[0][0],None)
f_test = f_test.reshape(1,60,1)
st.header("Prediction:")
res = scaler.inverse_transform([[f_predict[day-1]]])
col1, col2,col3 = st.beta_columns(3)
original = df.Open[0]
col1.subheader("Open Price")
col1.write(res[0][0])
grayscale = df.Open[0]
col2.subheader("Prev Day Open")
a=scaler.inverse_transform([[f_predict[day-2]]])[0][0]
col2.write(a)
return1 = (a - res[0][0])/(a)*100
#fo = "{:.2f}".format(return1)
col3.subheader("Return %")
col3.write(return1)
st.header("Visualisation:")
idx = pd.date_range("2021-05-04", periods=len(f_predict), freq="D")
ts = pd.Series(range(len(idx)), index=idx)
r = scaler.inverse_transform([f_predict]).reshape(-1,1)
r = r.reshape(r.shape[0])
fig, ax = plt.subplots()
ax=sns.lineplot(x=df.Date[df.Date.dt.year > 2019],y=df['Open'],color='r')
ax=sns.lineplot(x=idx,y=r)
st.pyplot(fig)
## Credits
if st.sidebar.button("Credits"):
st.sidebar.markdown("<h1 style='text-align: left; color: green;'>Welcome!</h1>",
unsafe_allow_html=True)
st.sidebar.subheader("Under Guidance of")
st.sidebar.info(
"""
1. Yasin Sir\n
2. Team @ [Technocolab](https://www.linkedin.com/company/technocolabs/)\n
""")
st.sidebar.subheader("Contributors/Project Team")
st.sidebar.info(
"1. [Nayana](https://www.linkedin.com/in/)\n"
"2. [Harshit Singh](https://www.linkedin.com/in/harshit-singh-097/)\n"
"3. [Yogendra](https://www.linkedin.com/in/)\n"
"4. [Snehashish](https://www.linkedin.com/in/)\n"
"5. [Pranay](https://www.linkedin.com/in//)"
)
st.sidebar.info("[contact us](https://www.technocolabs.tech/)\n")
|
{"hexsha": "33ad8371552fffdc86a8cd5dd4c3fd3f6577a5dc", "size": 5219, "ext": "py", "lang": "Python", "max_stars_repo_path": "app.py.py", "max_stars_repo_name": "Nayanasalur/Streamlit", "max_stars_repo_head_hexsha": "16bacf0594170626ff685cc3acd94e0d551ad8a4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "app.py.py", "max_issues_repo_name": "Nayanasalur/Streamlit", "max_issues_repo_head_hexsha": "16bacf0594170626ff685cc3acd94e0d551ad8a4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "app.py.py", "max_forks_repo_name": "Nayanasalur/Streamlit", "max_forks_repo_head_hexsha": "16bacf0594170626ff685cc3acd94e0d551ad8a4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.156424581, "max_line_length": 84, "alphanum_fraction": 0.6045219391, "include": true, "reason": "import numpy", "num_tokens": 1514}
|
import matplotlib
from matplotlib import style
import matplotlib.animation as animation
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
import tkinter as tk
from tkinter import ttk
from matplotlib import pyplot as plt
import serial.tools.list_ports
import numpy as np
import threading
import time
from tkinter import filedialog
from tkinter import messagebox
from matplotlib.widgets import RectangleSelector
from datetime import datetime
matplotlib.use("TkAgg") # backhand of matplotlib
style.use("ggplot")
LARGE_FONT = ("Cambria italic", 12) # Custom font
class TutorialPopUp(tk.Toplevel):
"""Popup widget. Displays the tutorial info"""
def __init__(self, parent, title=None):
tk.Toplevel.__init__(self, parent)
self.transient(parent)
if title:
self.title(title)
self.parent = parent
self.result = None
body = tk.Frame(self)
self.initial_focus = self.body(body)
body.pack(padx=5, pady=5)
# self.buttonbox()
self.box = ttk.Frame(self)
# self.initial_focus = self.combo_box
self.s = ttk.Scrollbar(self.box)
self.t = tk.Text(self.box, height=10, width=85)
self.s.pack(side=tk.RIGHT, fill=tk.Y)
self.t.pack(side=tk.RIGHT, fill=tk.Y)
self.s.config(command=self.t.yview)
self.t.config(yscrollcommand=self.s.set)
self.read_tutorial_txt()
self.t.config(state=tk.DISABLED)
self.photo = tk.PhotoImage(file="tut_image.gif")
self.label1 = ttk.Label(self.box, image=self.photo)
self.label1.image = self.photo # keep a reference!
self.label1.pack()
self.bind("<Escape>", self.cancel)
self.w2 = ttk.Button(self.box, text="Exit", width=10, command=self.cancel)
self.w2.pack(side=tk.LEFT, padx=5, pady=5)
self.box.pack()
self.grab_set()
if not self.initial_focus:
self.initial_focus = self
self.protocol("WM_DELETE_WINDOW", self.cancel)
self.geometry("+%d+%d" % (parent.winfo_rootx() + 50,
parent.winfo_rooty() + 50))
self.initial_focus.focus_set()
self.wait_window(self)
#
# construction hooks
def read_tutorial_txt(self):
with open("tutorial.txt", "r") as tutorial:
lines = tutorial.read()
self.t.insert(tk.END, lines)
def body(self, master):
# create dialog body. return widget that should have
# initial focus. this method should be overridden
pass
def cancel(self, event=None):
# put focus back to the parent window
self.parent.focus_set()
self.destroy()
#
# command hooks
def validate(self):
return 1 # override
def apply(self):
pass # override
class DeleteKValuesPopUp(tk.Toplevel):
"""Popup widget. Displays a dropdown list for deleting Calibration values"""
def __init__(self, parent, title=None):
tk.Toplevel.__init__(self, parent)
self.transient(parent)
if title:
self.title(title)
self.parent = parent
self.result = None
body = tk.Frame(self)
self.initial_focus = self.body(body)
body.pack(padx=5, pady=5)
# self.buttonbox()
self.box = ttk.Frame(self)
self.v = tk.StringVar()
self.box_value = tk.StringVar()
self.box_value.set("N/A")
self.combo_box_list = []
self.combo_box = ttk.Combobox(self.box, textvariable=self.box_value, font=LARGE_FONT)
self.get_k_values()
self.combo_box.configure(width=12, height=10)
self.combo_box.bind("<<ComboboxSelected>>", self.listboxfunc)
self.combo_box.pack()
self.initial_focus = self.combo_box
self.w1 = ttk.Button(self.box, text="Delete", width=10, command=self.ok, default=tk.ACTIVE)
self.w1.pack(side=tk.LEFT, padx=5, pady=5)
self.w2 = ttk.Button(self.box, text="Cancel", width=10, command=self.cancel)
self.w2.pack(side=tk.LEFT, padx=5, pady=5)
self.bind("<Return>", self.ok)
self.bind("<Escape>", self.cancel)
self.box.pack()
self.grab_set()
if not self.initial_focus:
self.initial_focus = self
self.protocol("WM_DELETE_WINDOW", self.cancel)
self.geometry("+%d+%d" % (parent.winfo_rootx() + 50,
parent.winfo_rooty() + 50))
self.initial_focus.focus_set()
self.wait_window(self)
#
# construction hooks
def body(self, master):
# create dialog body. return widget that should have
# initial focus. this method should be overridden
pass
def ok(self, event=None):
if not self.validate():
self.initial_focus.focus_set() # put focus back
return
f = open("kvalues.txt", "r")
lines = f.readlines()
f.close()
f = open("kvalues.txt", "w")
for line in lines:
if line != self.v.get():
f.write(line)
f.close()
self.withdraw()
self.update_idletasks()
self.apply()
self.cancel()
def cancel(self, event=None):
# put focus back to the parent window
self.parent.focus_set()
self.destroy()
#
# command hooks
def validate(self):
return 1 # override
def apply(self):
pass # override
def get_k_values(self):
self.combo_box_list.clear()
# self.listbox.delete(0, tk.END)
with open("kvalues.txt", "r") as kvalues:
lines = kvalues.readline()
while lines:
self.combo_box_list.append(lines)
# self.listbox.insert(tk.END, lines)
lines = kvalues.readline()
self.combo_box['values'] = self.combo_box_list
def listboxfunc(self, event):
# kkvalue = self.listbox.get(tk.ACTIVE)
kkvalue = self.combo_box.get()
self.v.set(kkvalue)
class MyPlots(plt.Figure):
"""Main plot class. Takes the data from the datasource and plots it live"""
def __init__(self, datasource):
plt.Figure.__init__(self)
self.DataAcquisition = datasource
self.cList = np.array([], dtype=float)
self.vList = np.array([], dtype=float)
self.axes1 = self.add_subplot(211)
self.suptitle("Measurement", fontsize=20)
self.list1 = np.array([], dtype=float)
self.list2 = np.array([], dtype=float)
self.axes1.set_gid("A")
self.axes2 = self.add_subplot(212)
self.axes2.set_gid("B")
self.m = 0.0
self.c = 0.0
self.start_stop = True
def animate(self, i):
'''Plots a live graph.'''
if self.start_stop:
self.axes1.clear()
# self.axes1.set_ylim([-500, 2500])
self.axes1.set_ymargin(0.2)
self.axes1.set_ylabel("Voltage [mV]")
self.axes1.set_xlabel("Time [s]")
try:
self.axes1.plot(self.DataAcquisition.timeList, self.DataAcquisition.voltageList, "#00A3E0")
except ValueError:
print("timelist len : ", len(self.DataAcquisition.timeList))
print("vlist len : ", len(self.DataAcquisition.voltageList))
pass
self.axes1.plot(self.list2, self.list1, "ro")
self.axes2.clear()
self.axes2.set_ymargin(0.3)
# self.axes2.set_ylim([-100, 2500])
# self.axes2.set_xlim([0, 20])
self.axes2.set_xmargin(0.3)
self.axes2.set_ylabel("Voltage [mV]")
self.axes2.set_xlabel("Concentration [mg/ml]")
self.axes2.plot(self.cList, self.vList, "ro")
if self.m:
self.axes2.plot(self.cList, self.m * self.cList + self.c, 'b')
def linefit(self):
a = np.vstack([self.cList, np.ones(len(self.cList))]).T
self.m, self.c = np.linalg.lstsq(a, self.vList)[0]
def find_nearest(self, value):
'''Finds the index of the nearest values in an array, to be removed from the graph.'''
idx = (np.abs(self.cList - value)).argmin()
self.cList = np.delete(self.cList, idx)
self.vList = np.delete(self.vList, idx)
def find_nearest_sorted(self, value):
'''Finds the index of the element that is closest to the given value, in a sorted numpy array, '''
idx = np.searchsorted(self.DataAcquisition.timeList, value, side="left")
return idx
def get_volt_and_concent(self, concent, time1, time2):
'''
Takes the tipped in concentration value as concent, and takes the avarage of the selected voltage
values. Saves the concent and avarage voltage to corresponding arrays, to be ploted.
'''
self.cList = np.append(self.cList, concent)
idx1 = self.find_nearest_sorted(time1)
idx2 = self.find_nearest_sorted(time2)
average = np.mean(self.DataAcquisition.voltageList[idx1:idx2])
self.vList = np.append(self.vList, [float(average)])
self.list1 = np.append(self.list1, self.DataAcquisition.voltageList[idx1:idx2])
self.list2 = np.append(self.list2, self.DataAcquisition.timeList[idx1:idx2])
self.axes1.plot(self.list2, self.list1, "ro")
# self.axes1.plot(self.list2, self.list1, "blue", linewidth=2)
def clear_plots(self):
'''Clears the data lists from data source. Deletes all values from own arrays. Clears the sub_plots'''
self.DataAcquisition.clear_data_lists()
self.m = 0.0
self.vList = np.array([], dtype=float)
self.cList = np.array([], dtype=float)
self.list1 = np.array([], dtype=float)
self.list2 = np.array([], dtype=float)
self.axes1.clear()
self.axes2.clear()
def get_slope(self):
return self.m
def plot_static_data(self):
'''Plots the not-live data, loaded from the datasource'''
self.start_stop = False
self.DataAcquisition.load_data()
print(len(self.DataAcquisition.voltageList), "len voltage")
print(len(self.DataAcquisition.timeList), "len time")
self.axes1.clear()
self.axes1.set_ymargin(0.2)
self.axes1.set_ylabel("Voltage [mV]")
self.axes1.set_xlabel("Time [s]")
self.axes1.plot(self.DataAcquisition.timeList, self.DataAcquisition.voltageList, "#00A3E0")
def flip_start_stop(self):
self.start_stop = not self.start_stop
class RemoveDataPointPopup(tk.Toplevel):
'''Popup widget. Asks for confirmation to remove the clicked data point on the second plot'''
def __init__(self, parent, title=None):
tk.Toplevel.__init__(self, parent)
self.transient(parent)
if title:
self.title(title)
self.parent = parent
self.result = None
body = tk.Frame(self)
self.initial_focus = self.body(body)
body.pack(padx=5, pady=5)
# self.buttonbox()
self.box = ttk.Frame(self)
self.w1 = ttk.Button(self.box, text="OK", width=10, command=self.ok, default=tk.ACTIVE)
self.w1.pack(side=tk.LEFT, padx=5, pady=5)
self.w2 = ttk.Button(self.box, text="Cancel", width=10, command=self.cancel)
self.w2.pack(side=tk.LEFT, padx=5, pady=5)
self.bind("<Return>", self.ok)
self.bind("<Escape>", self.cancel)
self.box.pack()
self.grab_set()
if not self.initial_focus:
self.initial_focus = self
self.protocol("WM_DELETE_WINDOW", self.cancel)
self.geometry("+%d+%d" % (parent.winfo_rootx() + 450,
parent.winfo_rooty() + 450))
self.initial_focus.focus_set()
self.wait_window(self)
def body(self, master):
pass
def ok(self, event=None):
if not self.validate():
self.initial_focus.focus_set() # put focus back
return
self.parent.find_and_remove_nearest()
self.withdraw()
self.update_idletasks()
self.apply()
self.cancel()
def cancel(self, event=None):
# put focus back to the parent window
self.parent.focus_set()
self.destroy()
#
# command hooks
def validate(self):
return 1 # override
def apply(self):
pass # override
class EnterConcentrationPopup(tk.Toplevel):
'''Popup widget. Takes the entered concentration value to processed '''
def __init__(self, parent, title=None):
tk.Toplevel.__init__(self, parent)
self.transient(parent)
if title:
self.title(title)
self.parent = parent
self.result = None
# body = tk.Frame(self)
# self.initial_focus = self.body(body)
# body.pack(padx=15, pady=5)
# self.buttonbox()
self.box = ttk.Frame(self)
self.e = ttk.Entry(self.box)
self.e.pack(padx=6)
self.initial_focus = self.e
self.w1 = ttk.Button(self.box, text="OK", width=10, command=self.ok, default=tk.ACTIVE)
self.w1.pack(side=tk.LEFT, padx=6, pady=5)
self.w2 = ttk.Button(self.box, text="Cancel", width=10, command=self.cancel)
self.w2.pack(side=tk.LEFT, padx=6, pady=5)
self.bind("<Return>", self.ok)
self.bind("<Escape>", self.cancel)
self.box.pack()
self.grab_set()
if not self.initial_focus:
self.initial_focus = self
self.protocol("WM_DELETE_WINDOW", self.cancel)
self.geometry("+%d+%d" % (parent.winfo_rootx() + 200,
parent.winfo_rooty() + 200))
self.initial_focus.focus_set()
self.wait_window(self)
#
# construction hooks
def body(self, master):
# create dialog body. return widget that should have
# initial focus. this method should be overridden
pass
# def buttonbox(self):
# # add standard button box. override if you don't want the
# # standard buttons
#
# box = ttk.Frame(self)
# e = ttk.Entry(box)
#
# e.pack(padx=5)
#
# w1 = ttk.Button(box, text="OK", width=10, command=self.ok, default=tk.ACTIVE)
# w1.pack(side=tk.LEFT, padx=5, pady=5)
# w2 = ttk.Button(box, text="Cancel", width=10, command=self.cancel)
# w2.pack(side=tk.LEFT, padx=5, pady=5)
#
# self.bind("<Return>", self.ok)
# self.bind("<Escape>", self.cancel)
#
# box.pack()
#
# #
# standard button semantics
def ok(self, event=None):
if not self.validate():
self.initial_focus.focus_set() # put focus back
return
concent = float(self.e.get())
self.parent.get_volt_and_concent(concent)
self.withdraw()
self.update_idletasks()
self.apply()
self.cancel()
def cancel(self, event=None):
# put focus back to the parent window
self.parent.focus_set()
self.destroy()
#
# command hooks
def validate(self):
return 1 # override
def apply(self):
pass # override
class DataAcquisition:
'''Data source. Finds the corresponding serial port and starts taking data. '''
def __init__(self):
self.start_flag = True
self.comPort = self.find_port()
self.arduino = serial.Serial(self.comPort, 9600, timeout=.1)
self.voltageList = np.array([], dtype=float)
self.timeList = np.array([], dtype=float)
self.time = 0.0
self.now = time.time()
self.before = time.time()
self.start_read_port()
def find_port(self):
ports = list(serial.tools.list_ports.comports())
for eachPort in ports:
print(eachPort)
if "Arduino" in eachPort[1]:
print(eachPort[0])
portname = eachPort[0]
return portname
def read_port(self):
'''
Takes voltage values from the serial port in milivolt.
Calculates the time passed in the meanwhile.
Saves the data to corresponding numpy arrays
'''
while True:
data = self.arduino.readline()
if data and self.start_flag:
try:
voltage = float((data.decode())[0:-2])
# print(voltage)
self.voltageList = np.append(self.voltageList, [voltage])
self.now = time.time()
difference = self.now - self.before
self.time = difference + self.time
self.timeList = np.append(self.timeList, [self.time])
self.before = time.time()
except ValueError:
print("conversion failed")
time.sleep(0.3)
else:
print("serial stopped")
def start_read_port(self):
t1 = threading.Thread(target=self.read_port)
t1.setDaemon(True)
t1.start()
def clear_data_lists(self):
'''Empties the numpy arrays '''
self.time = 0.0
self.timeList = np.array([], dtype=float)
self.voltageList = np.array([], dtype=float)
def save_data(self):
'''Saves the values in the numpy arrays to a txt file via a filedilaog'''
f = filedialog.asksaveasfile(mode='w', defaultextension=".txt")
if f is None: # asksaveasfile return `None` if dialog closed with "cancel".
return
for (volt, time1) in zip(self.voltageList, self.timeList):
f.write("{}".format(volt))
f.write(" {:.3f} \n".format(time1))
f.close()
def load_data(self):
'''Loads offline data from a txt file. Pauses reading from serial port'''
self.stop_data_acquisition()
self.clear_data_lists()
fname = filedialog.askopenfilename(filetypes=(("Text Files", ".txt"),
("All Files", "*.*")))
if fname:
print(fname)
with open(fname, "r") as myfile:
lines = myfile.readline()
while lines:
lines = myfile.readline()
if lines:
self.voltageList = np.append(self.voltageList, [float(lines.split(" ")[0])])
self.timeList = np.append(self.timeList, [float(lines.split(" ")[1])])
def stop_data_acquisition(self):
self.start_flag = False
def start_data_acquisition(self):
self.start_flag = True
self.before = time.time()
class CalibrationFrame(tk.Frame):
'''Main GUI frame for the calibration page. Takes another frame (CalibrationPage) as master'''
def __init__(self, master):
tk.Frame.__init__(self, master)
self.master = master
# self.config(relief="sunken", borderwidth=1)
self.grid_rowconfigure(1, weight=1)
# self.grid_columnconfigure(0, weight=1)
self.v_dndc = tk.StringVar()
self.v_dndc.set("N/A")
self.v1 = tk.StringVar()
self.v1.set("N/A")
self.v2_k = tk.StringVar()
self.v2_k.set("?")
self.klist = []
self.formula_frame = tk.Frame(self)
self.formula_frame.grid(row=0, column=0, columnspan=2)
self.label1 = ttk.Label(self.formula_frame, text="mV = \u212A c", font=LARGE_FONT)
self.label1.grid(row=0, column=0, columnspan=2, pady=(20, 20), padx=(25, 0))
self.label_dc = ttk.Label(self.formula_frame, text="dc", font=LARGE_FONT)
self.label_dc.grid(column=0, row=0, columnspan=2, pady=(24, 0), padx=(115, 0), sticky="w")
self.label_ = ttk.Label(self.formula_frame, text="___", font=LARGE_FONT)
self.label_.grid(column=0, row=0, columnspan=2, pady=(0, 12), padx=(115, 0), sticky="w")
self.label_dn = ttk.Label(self.formula_frame, text="dn", font=LARGE_FONT)
self.label_dn.grid(column=0, row=0, columnspan=2, pady=(0, 24), padx=(115, 0), sticky="w")
self.label123 = ttk.Label(self.formula_frame, text="output", font=("Cambria italic", 8))
self.label123.grid(row=0, columnspan=2, pady=(30, 20), padx=(49, 0), sticky="w")
self.label4 = ttk.Label(self, text=" =", font=LARGE_FONT)
self.label4.grid(column=0, row=1, sticky="w", pady=(0, 0), padx=(20, 0))
self.label_dc = ttk.Label(self, text="dc", font=LARGE_FONT)
self.label_dc.grid(column=0, row=1, pady=(22, 0), sticky="w")
self.label_ = ttk.Label(self, text="___", font=LARGE_FONT)
self.label_.grid(column=0, row=1, pady=(0, 10), sticky="w")
self.label_dn = ttk.Label(self, text="dn", font=LARGE_FONT)
self.label_dn.grid(column=0, row=1, pady=(0, 20), sticky="w")
self.label412 = ttk.Label(self, text="[ml/g]", font=LARGE_FONT)
self.label412.grid(column=2, row=1, sticky="w", pady=(0, 0))
self.box_value = tk.StringVar()
self.box_value.set("N/A")
self.combo_box_list = []
self.combo_box = ttk.Combobox(self, textvariable=self.box_value, font=LARGE_FONT)
self.get_dndc_values()
self.combo_box.configure(width=12, height=10)
self.combo_box.bind("<<ComboboxSelected>>", self.combobox_func)
self.combo_box.grid(column=1, row=1, pady=(20, 20))
# self.label2 = ttk.Label(self, text="c = ", font=LARGE_FONT)
# self.label2.grid(column=0, row=2, sticky="w", pady=(0, 20))
# self.label23 = ttk.Label(self, text="299792458 m/s", font=LARGE_FONT)
# self.label23.grid(column=1, row=2, sticky="w", pady=(0, 20))
self.label5 = ttk.Label(self, text="Slope = ", font=LARGE_FONT)
self.label5.grid(column=0, row=3, sticky="sw", pady=(20, 20))
self.label3 = ttk.Label(self, textvariable=self.v1, font=LARGE_FONT)
self.label3.grid(column=1, row=3, sticky="sw", pady=(20, 20))
self.label41 = ttk.Label(self, text="\u212A = ", font=LARGE_FONT)
self.label41.grid(column=0, row=4, sticky="w", pady=(0, 20))
self.label411 = ttk.Label(self, textvariable=self.v2_k, font=LARGE_FONT)
self.label411.grid(column=1, row=4, sticky="w", pady=(0, 20))
# self.btn2 = ttk.Button(self, text="Fit the Line!", command=self.linefit)
# self.btn2.grid(column=0, row=5, sticky="ew", columnspan=3)
self.btn = ttk.Button(self, text="Solve the equation!", command=self.solve_equation)
self.btn.grid(column=0, row=6, sticky="ew", columnspan=3)
# self.btn.configure(state="disabled")
self.btn4 = ttk.Button(self, text="Save calibration", command=self.save_k_value)
self.btn4.grid(column=0, row=7, columnspan=3, sticky="ew")
self.btn4.configure(state="disabled")
def save_k_value(self):
'''Saves the calculated calibration value to a txt file. '''
with open("kvalues.txt", 'a') as kvalues:
string = self.v2_k.get() + " " + datetime.now().strftime("%d-%m-%y %H:%M") + "\n"
kvalues.write(string)
def print_slope(self):
slope = self.master.get_slope()
ak = "{:8.4f}".format(slope)
self.v1.set(ak)
def linefit(self):
self.master.plot_linefit()
self.print_slope()
def solve_equation(self):
'''Solves the equation to calculate the calibration values. Enables the save button'''
self.linefit()
self.btn4.configure(state="enabled")
ak = self.combo_box.get().split(' ')[0]
print("ak, ", ak)
self.v_dndc.set(ak)
floatt = self.v_dndc.get() # this is the dndc value tipped in by the user
print(floatt)
try:
slope = self.master.get_slope()
answer = slope / float(floatt)
self.v2_k.set("{:.4f}".format(answer))
except ValueError:
print("Value error during solve_equation!")
def quit(self):
self.destroy()
def combobox_func(self, event):
kkvalue = self.combo_box.get()
self.v_dndc.set(kkvalue)
def get_dndc_values(self):
'''Reads the saved dn_dc values from a text file and displays them in commbox.'''
self.combo_box_list.clear()
# self.listbox.delete(0, tk.END)
with open("dndc_list.txt", "r") as kvalues:
lines = kvalues.readline()
while lines:
self.combo_box_list.append(lines)
# self.listbox.insert(tk.END, lines)
lines = kvalues.readline()
self.combo_box['values'] = self.combo_box_list
class MeasurementFrame(tk.Frame):
'''Main GUI frame for the measurement page. Takes another frame (MeasurePage) as master. '''
def __init__(self, master):
tk.Frame.__init__(self, master)
self.master = master
# self.config(relief="sunken", borderwidth=1)
self.grid_rowconfigure(1, weight=1)
# self.grid_columnconfigure(0, weight=1)
self.v = tk.StringVar()
self.v1 = tk.StringVar()
self.v1.set("N/A")
self.v2 = tk.StringVar()
self.v.set("N/A")
self.v2.set("? [ml/g]")
# self.label1 = ttk.Label(self, text="Mv=K dn/dc c", font=("Cambria math", 12))
# self.label1.grid(row=0, column=0, columnspan=2)
self.formula_frame = tk.Frame(self)
self.formula_frame.grid(row=0, column=0, columnspan=2)
self.label1 = ttk.Label(self.formula_frame, text="mV = \u212A c", font=LARGE_FONT)
self.label1.grid(row=0, column=0, columnspan=2, pady=(20, 20), padx=(25, 0))
self.label_dc = ttk.Label(self.formula_frame, text="dc", font=LARGE_FONT)
self.label_dc.grid(column=0, row=0, columnspan=2, pady=(24, 0), padx=(115, 0), sticky="w")
self.label_ = ttk.Label(self.formula_frame, text="___", font=LARGE_FONT)
self.label_.grid(column=0, row=0, columnspan=2, pady=(0, 12), padx=(115, 0), sticky="w")
self.label_dn = ttk.Label(self.formula_frame, text="dn", font=LARGE_FONT)
self.label_dn.grid(column=0, row=0, columnspan=2, pady=(0, 24), padx=(115, 0), sticky="w")
self.label123 = ttk.Label(self.formula_frame, text="output", font=("Cambria italic", 8))
self.label123.grid(row=0, columnspan=2, pady=(30, 20), padx=(49, 0), sticky="w")
# self.label1 = ttk.Label(self, text="mV = \u212A c", font=LARGE_FONT)
# self.label1.grid(row=0, column=0, columnspan=2, pady=(20, 20))
# self.label123 = ttk.Label(self, text="output", font=("Cambria italic", 8))
# self.label123.grid(row=0, columnspan=2, pady=(15, 0), padx=(47, 0), sticky="w")
#
# self.label_dc = ttk.Label(self, text="dc", font=LARGE_FONT)
# self.label_dc.grid(column=0, row=0, columnspan=2, pady=(30, 0), padx=(113, 0), sticky="w")
#
# self.label_ = ttk.Label(self, text="___", font=LARGE_FONT)
# self.label_.grid(column=0, row=0, columnspan=2, pady=(0, 8), padx=(114, 0), sticky="w")
#
# self.label_dn = ttk.Label(self, text="dn", font=LARGE_FONT)
# self.label_dn.grid(column=0, row=0, columnspan=2, pady=(0, 18), padx=(113, 0), sticky="w")
self.label4 = ttk.Label(self, text="K =", font=LARGE_FONT)
self.label4.grid(column=0, row=1, sticky="nw", pady=(20, 20))
self.box_value = tk.StringVar()
self.box_value.set("N/A")
self.combo_box_list = []
self.combo_box = ttk.Combobox(self, textvariable=self.box_value, font=LARGE_FONT)
self.get_k_values()
self.combo_box.configure(width=15, height=10)
self.combo_box.bind("<<ComboboxSelected>>", self.listboxfunc)
self.combo_box.grid(column=1, row=1, pady=(20, 20))
self.label5 = ttk.Label(self, text="Slope = ", font=LARGE_FONT)
self.label5.grid(column=0, row=3, sticky="sw", pady=(10, 20))
self.label3 = ttk.Label(self, textvariable=self.v1, font=LARGE_FONT)
self.label3.grid(column=1, row=3, sticky="w", pady=(10, 20))
self.label41 = ttk.Label(self, textvariable=self.v2, font=LARGE_FONT)
self.label41.grid(column=1, row=4, sticky="w", pady=(0, 0))
self.label4 = ttk.Label(self, text=" =", font=LARGE_FONT)
self.label4.grid(column=0, row=4, sticky="w", pady=(0, 0), padx=(20, 0))
self.label_dc1 = ttk.Label(self, text="dc", font=LARGE_FONT)
self.label_dc1.grid(column=0, row=4, pady=(23, 0), sticky="w")
self.label_1 = ttk.Label(self, text="___", font=LARGE_FONT)
self.label_1.grid(column=0, row=4, pady=(0, 10), sticky="w")
self.label_dn1 = ttk.Label(self, text="dn", font=LARGE_FONT)
self.label_dn1.grid(column=0, row=4, pady=(0, 20), sticky="w")
self.btn1 = ttk.Button(self, text="Solve the equation!", command=self.solve_equation)
self.btn1.grid(column=0, row=5, sticky="ew", columnspan=2, pady=(20, 0))
def get_k_values(self):
self.combo_box_list.clear()
# self.listbox.delete(0, tk.END)
with open("kvalues.txt", "r") as kvalues:
lines = kvalues.readline()
while lines:
self.combo_box_list.append(lines)
# self.listbox.insert(tk.END, lines)
lines = kvalues.readline()
self.combo_box['values'] = self.combo_box_list
def print_slope(self):
slope = self.master.get_slope()
ak = "{:9.4f}".format(slope) # formatting the float to string in a nice fashion
self.v1.set(ak)
def listboxfunc(self, event):
kkvalue = self.combo_box.get()
self.v.set(kkvalue)
def linefit(self):
self.master.plot_linefit()
self.print_slope()
print("Line Fit!")
def solve_equation(self):
self.linefit()
floatt = self.v.get().split(' ')[0] # kvalue
print(floatt) # dont forget to select the K ****+
try:
slope = self.master.get_slope()
answer = slope / float(floatt)
self.v2.set("{:.4f}".format(answer) + " [mg/ml]")
except ValueError:
print("value error during solve_equatiom")
class Container(tk.Frame):
'''
Master frame for Calibration and Measurement frames. Hosts filemenu buttons.
Also serves as a middle-way communicator between GUI and Controller
'''
def __init__(self, master):
tk.Frame.__init__(self, master)
self.master = master
self.config(relief="sunken", borderwidth=1)
self.menubar = tk.Menu(self)
self.filemenu = tk.Menu(self.menubar, tearoff=0)
self.filemenu.add_command(label="Calibrate", command=lambda: master.show_frame(CalibrationPage))
self.filemenu.add_command(label="Measure", command=lambda: master.show_frame(MeasurePage))
self.filemenu.add_command(label="Delete K", command=self.k_val_delete)
self.filemenu.add_separator()
self.filemenu.add_command(label="Save Data", command=self.save_data)
self.filemenu.add_command(label="Load Data", command=self.load_data)
self.filemenu.add_command(label="Start Data", command=self.start_data_acquisiton)
self.filemenu.add_separator()
self.filemenu.add_command(label="Exit", command=self.quit)
self.menubar.add_cascade(label="File", menu=self.filemenu)
self.helpmenu = tk.Menu(self.menubar, tearoff=0)
self.helpmenu.add_command(label="Tutorial", command=self.help_pop_up)
self.menubar.add_cascade(label="Help", menu=self.helpmenu)
tk.Tk.config(self.master, menu=self.menubar)
def save_data(self):
self.master.save_data()
def k_val_delete(self):
self.master.k_val_delete()
def help_pop_up(self):
self.master.help_pop_up()
def load_data(self):
self.master.load_data()
def start_data_acquisiton(self):
self.master.start_data_acquisition()
class PlotFrame(tk.Frame):
'''Frame for hosting the canvas, toolbar and plot related buttons. Takes Controller as master.'''
def __init__(self, master):
tk.Frame.__init__(self, master)
self.master = master
self.start_stop_var = tk.StringVar()
self.start_stop_var.set("Stop")
# self.btn3 = ttk.Button(self, textvariable=self.start_stop_var, command=self.start_stop)
# self.btn3.pack(side=tk.RIGHT)
#
# self.btn4 = ttk.Button(self, text="Clear", command=self.clear_graph)
# self.btn4.pack(side=tk.RIGHT)
def start_stop(self):
self.master.start_stop()
def clear_graph(self):
self.master.clear_graph()
def call_entry_popup(self):
entry_popup = EnterConcentrationPopup(self.master, title="Concent. [mg/mL]")
def call_help_popup(self):
dialogg = RemoveDataPointPopup(self.master, title="Remove Check")
def create_buttons(self):
'''
Creates plot related buttons. This function will be called in the canvas class,
due to Pack geometry manager issues.
'''
btn3 = ttk.Button(self, textvariable=self.start_stop_var, command=self.start_stop)
btn3.pack(side=tk.RIGHT)
btn4 = ttk.Button(self, text="Clear", command=self.clear_graph)
btn4.pack(side=tk.RIGHT)
class Controller(tk.Tk): # Tk class from tk module is inherited
'''
Controller class. Communicates the Wiew with Model.
Serves as a root for Gui, a master all frames are created
'''
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
self.columnconfigure(0, weight=20)
self.columnconfigure(1, weight=1)
self.rowconfigure(0, weight=0)
self.rowconfigure(1, weight=5)
tk.Tk.wm_title(self, "Dndicino")
self.datasource = DataAcquisition()
self.plots = MyPlots(self.datasource)
self.container = Container(self) # frame is also a class from Tkinter, you basicly create a frame to fill in
self.container.grid(row=1, column=1, pady=(0, 200))
self.plot_frame = PlotFrame(self)
self.plot_frame.grid(row=1, column=0, sticky="news")
self.canvass = CanvasClass(self.plots, self.plot_frame)
self.frames = {} # this is an emtpy dictionary that will hold different frames
self.calibration_page = CalibrationPage(self, self.container)
self.frames[CalibrationPage] = self.calibration_page
self.calibration_page.grid(row=0, column=0, sticky="nsew")
self.measurement_page = MeasurePage(self, self.container)
self.frames[MeasurePage] = self.measurement_page
self.measurement_page.grid(row=0, column=0, sticky="nsew")
self.protocol("WM_DELETE_WINDOW", self.quit)
self.show_frame(MeasurePage)
self.ani = animation.FuncAnimation(self.plots, self.plots.animate, interval=400, blit=False)
def save_data(self):
self.datasource.save_data()
def k_val_delete(self):
popup = DeleteKValuesPopUp(self, title="Delete K values")
self.measurement_page.MeasurementFrame.get_k_values()
def help_pop_up(self):
popup_tutorial = TutorialPopUp(self, title="Tutorial")
def show_frame(self, cont):
frame = self.frames[cont] # takes the given argument as a dict key
self.clear_graph()
self.frames[MeasurePage].MeasurementFrame.get_k_values()
if frame == self.measurement_page:
self.plots.suptitle("Measurement", fontsize=20)
else:
self.plots.suptitle("Calibration", fontsize=20)
frame.tkraise() # raises the given frame
def start_stop(self):
if self.plots.start_stop:
self.plot_frame.start_stop_var.set("Start")
else:
self.plot_frame.start_stop_var.set("Stop")
self.flip_start_stop()
def clear_graph(self):
self.plots.clear_plots()
def get_slope(self):
slope = self.plots.get_slope()
return slope
def plot_linefit(self):
self.plots.linefit()
def find_and_remove_nearest(self):
xdata = self.canvass.xdata
self.plots.find_nearest(xdata)
def get_volt_and_concent(self, concent):
time1 = self.canvass.xx1
time2 = self.canvass.xx2
self.plots.get_volt_and_concent(concent, time1=time1, time2=time2)
def load_data(self):
self.plots.plot_static_data()
self.plot_frame.start_stop_var.set("Start")
# self.plots.flip_start_stop()
def flip_start_stop(self):
self.plots.flip_start_stop()
def start_data_acquisition(self):
self.datasource.start_data_acquisition()
class MeasurePage(tk.Frame):
'''
Main Frame for Measurement. Hosts the Measurement Frame.
Takes Container as master.
'''
def __init__(self, controller, container):
tk.Frame.__init__(self, container) # the base is class is being initialised
# self.label = ttk.Label(self, text="Measurement Unit", font=LARGE_FONT)
# self.label.grid(row=0, column=0)
self.controller = controller
self.start_stop = True
self.MeasurementFrame = MeasurementFrame(self)
self.MeasurementFrame.grid(column=0, row=1, sticky="nw", pady=(0, 30), padx=(40, 40))
def get_slope(self):
slope = self.controller.get_slope()
return slope
def plot_linefit(self):
self.controller.plot_linefit()
class CalibrationPage(tk.Frame):
'''
Main Frame for Calibration. Hosts the Calibration Frame.
Takes another frane(Container) as master.
'''
def __init__(self, parent, container):
tk.Frame.__init__(self, container) # the base is class is being initialised
# self.label = ttk.Label(self, text="Calibration", font=LARGE_FONT)
# self.label.grid(row=0, column=0) # since its such a basic example lets use pack
self.parent = parent
# self.plots = plots
self.start_stop = True
# self.button1 = ttk.Button(self, text="Back to Measure",
# command=lambda: parent.show_frame(MeasurePage))
# self.button1.grid(row=0, column=1, sticky="w")
self.CalibrationFrame = CalibrationFrame(self)
self.CalibrationFrame.grid(column=0, row=1, sticky="nw", pady=(0, 30), padx=(40, 40))
# self.canvas.get_tk_widget().grid(row=1, column=0)
def get_slope(self):
slope = self.parent.get_slope()
return slope
def plot_linefit(self):
self.parent.plot_linefit()
class CanvasClass(FigureCanvasTkAgg):
'''Canvas for the plots. Needs a plot and master. Hosts callback functions'''
def __init__(self, plot, master):
FigureCanvasTkAgg.__init__(self, plot, master)
self.plot = plot
self.master = master
self.get_tk_widget().pack(fill=tk.BOTH, expand=10, side=tk.TOP)
self.callbacks.connect('button_press_event', self.callback)
self.xdata = 0.0
self.xx1 = 0.0
self.xx2 = 0.0
self.toolbar = NavigationToolbar2TkAgg(self, self.master)
self.toolbar.pack(side=tk.LEFT, padx=5)
self.toolbar.update()
self.show()
self.master.create_buttons()
self.rs = RectangleSelector(self.plot.axes1, self.line_select_callback,
drawtype='box', useblit=True, rectprops=dict(facecolor='red', edgecolor='black',
alpha=0.2, fill=True),
button=[1, 3], # don't use middle button
minspanx=1, minspany=1,
spancoords='data',
interactive=False)
def line_select_callback(self, eclick, erelease):
self.xx1 = eclick.xdata
self.xx2 = erelease.xdata
self.master.call_entry_popup()
def callback(self, event):
if event.inaxes is not None:
if self.toolbar.mode == "" and event.inaxes.get_gid() == 'B':
self.xdata = event.xdata
print("x data 2 = ", self.xdata)
self.master.call_help_popup()
app = Controller() # create an object of the class
app.geometry("1280x720")
app.mainloop() # runs the mainloop
|
{"hexsha": "de7e87f3e07e2fe045026df2e2ef7021ae08d59b", "size": 41774, "ext": "py", "lang": "Python", "max_stars_repo_path": "Python_code/final_v1.py", "max_stars_repo_name": "tuw-chemistry/measurement_unit", "max_stars_repo_head_hexsha": "b135e6832bd393c285439431a40a563c27d864a4", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Python_code/final_v1.py", "max_issues_repo_name": "tuw-chemistry/measurement_unit", "max_issues_repo_head_hexsha": "b135e6832bd393c285439431a40a563c27d864a4", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Python_code/final_v1.py", "max_forks_repo_name": "tuw-chemistry/measurement_unit", "max_forks_repo_head_hexsha": "b135e6832bd393c285439431a40a563c27d864a4", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.4954582989, "max_line_length": 118, "alphanum_fraction": 0.5866567722, "include": true, "reason": "import numpy", "num_tokens": 9996}
|
#ifndef CONTROLLERS_IMAGEOPERATIONTOGGLECONTROLLER_H
#define CONTROLLERS_IMAGEOPERATIONTOGGLECONTROLLER_H
#include "imageoperationactioncontroller.h"
#include "s3d/cv/image_operation/image_operation.h"
#include <QAction>
#include <gsl/gsl>
class ImageOperationToggleController : public ImageOperationActionController {
Q_OBJECT
public:
ImageOperationToggleController(gsl::not_null<QAction*> action,
gsl::not_null<s3d::image_operation::ImageOperation*> imageOperation);
void onActionTriggered() override;
};
#endif //CONTROLLERS_IMAGEOPERATIONTOGGLECONTROLLER_H
|
{"hexsha": "4bdbe13ac04c507a2596c026eea936f500f2896f", "size": 608, "ext": "h", "lang": "C", "max_stars_repo_path": "src/apps/S3DAnalyzer/controllers/imageoperationtogglecontroller.h", "max_stars_repo_name": "hugbed/OpenS3D", "max_stars_repo_head_hexsha": "4ffad16f9b0973404b59eb1424cc45f68754fe12", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 8.0, "max_stars_repo_stars_event_min_datetime": "2017-04-16T16:38:15.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-20T03:23:15.000Z", "max_issues_repo_path": "src/apps/S3DAnalyzer/controllers/imageoperationtogglecontroller.h", "max_issues_repo_name": "hugbed/OpenS3D", "max_issues_repo_head_hexsha": "4ffad16f9b0973404b59eb1424cc45f68754fe12", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 40.0, "max_issues_repo_issues_event_min_datetime": "2017-04-12T17:24:44.000Z", "max_issues_repo_issues_event_max_datetime": "2017-12-21T18:41:23.000Z", "max_forks_repo_path": "src/apps/S3DAnalyzer/controllers/imageoperationtogglecontroller.h", "max_forks_repo_name": "hugbed/OpenS3D", "max_forks_repo_head_hexsha": "4ffad16f9b0973404b59eb1424cc45f68754fe12", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 6.0, "max_forks_repo_forks_event_min_datetime": "2017-07-13T21:51:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-18T16:22:03.000Z", "avg_line_length": 26.4347826087, "max_line_length": 102, "alphanum_fraction": 0.7927631579, "num_tokens": 134}
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from .calc_order_ratio import calc_order_ratio
from .collision_info import collision_info
from .matlab_utils import find
from .equation_tools.eq_tools import get_time_ratio
import math
def get_shrinking_intervals(delta, rz, inegDTAU, tol1, tol2, tol_coeff, tol_coeff2, tolerance, shrinking_ind = None):
if abs(delta) < tol2 * tol_coeff2:
if delta <= 0:
print("Negative delta:", delta)
tol_coeff2 = 0.1
resolved = False
while tol_coeff2 >= 0.001 and tol_coeff <= 0.01 / tolerance:
print('immediate collision ... resolving * ', tol_coeff2, 'delta:', delta)
if abs(delta) < tol2 * tol_coeff2:
print('... fail!')
tol_coeff2 = tol_coeff2 * 0.1
continue
elif delta <= -tol2 * tol_coeff2:
problem = 1
return None, problem, tol_coeff, True
elif delta >= tol2 * tol_coeff2:
test = np.fabs(rz * delta - 1)
ind1 = np.logical_and(test < tol1 * tol_coeff, inegDTAU)
if shrinking_ind is not None:
ind1 = np.logical_or(ind1,shrinking_ind)
ishrink = find(ind1)
nn1 = np.min(ishrink)
nn2 = np.max(ishrink)
if len(ishrink) < nn2 - nn1:
if tol_coeff <= 0.01 / tolerance:
print('multiple location shrinks...', ishrink)
tol_coeff = 10 * tol_coeff
continue
else:
break
else:
problem = 0
return [nn1, nn2], problem, tol_coeff, True
if not resolved:
ct = 0.1
while ct >= tol_coeff2:
test = rz * delta - 1
ishrink = find(np.logical_and(test < tol1 * tol_coeff/ct, inegDTAU))
nn1 = np.min(ishrink)
nn2 = np.max(ishrink)
if len(ishrink) < nn2 - nn1:
print('multiple location shrinks!...', ishrink)
ct = 0.1 * ct
continue
else:
problem = 0
return [nn1, nn2], problem, tol_coeff, True
problem = 3
return None, problem, tol_coeff, True
elif delta <= -tol2 * tol_coeff2:
problem = 4
return None, problem, tol_coeff, True
elif delta >= tol2 * tol_coeff2:
test = np.fabs(rz * delta - 1)
ind1 = np.logical_and(test < tol1 * tol_coeff, inegDTAU)
if shrinking_ind is not None:
ind1 = np.logical_or(ind1, shrinking_ind)
ishrink = find(ind1)
if (ishrink.shape[0] == 0):
print('here!')
nn1 = np.min(ishrink)
nn2 = np.max(ishrink)
if len(ishrink) < nn2 - nn1:
print('multiple location shrinks...', ishrink)
tol_coeff = 10 * tol_coeff
problem = -1
return None, problem, tol_coeff, True
else:
problem = 0
return [nn1, nn2], problem, tol_coeff, False
def resolve_and_classify(delta, rz, solution, param_line, tol_coeff0, tolerance, shrinking_ind = None):
problem = {'result': 0}
tol2 = 10 * tolerance
tol1 = tol2
tol_coeff = tol_coeff0
tol_coeff2 = 1
had_resolution = False
inegDTAU = solution.state.dtau < -tol2
while tol_coeff <= 0.01/tolerance:
if tol_coeff > 1:
print('trying to resolve * ',tol_coeff)
intervals, prob, tol_coeff, res = get_shrinking_intervals(delta, rz, inegDTAU, tol1, tol2, tol_coeff, tol_coeff2, tolerance, shrinking_ind)
if prob > 0:
problem['result'] = prob
return None, problem
else:
had_resolution = had_resolution or res
if prob == 0:
N1 = intervals[0] - 1
N2 = intervals[1] + 1
if prob == -1:
continue
if N1 == -1 and N2 == solution.NN:
problem['result'] = 5
print('Max tolerance coefficient reached ....')
return None, problem
col_info = classify_time_collision(delta, rz, tol_coeff, N1, N2, solution, param_line, tolerance)
if col_info is None:
had_resolution = True
else:
col_info.had_resolution = had_resolution
return col_info, problem
tol_coeff = 10 * tol_coeff
problem['result'] = 5
return None, problem
def reclassify(col_info, solution, param_line, tolerance, stateN=None):
if col_info.from_ztau:
res = solution.pivots.find_N1_N2_around(col_info.ztau_ind, col_info.N1-1)
if res is not None:
col = classify_time_collision(col_info.delta, col_info.rz, col_info.tol_coeff, res[0], res[1],
solution, param_line, tolerance)
if col is not None:
if col != col_info:
col.from_ztau = True
col.ztau_ind = col_info.ztau_ind
return col, True
res = solution.pivots.find_N1_N2_around(col_info.ztau_ind, col_info.N1, col_info.N2 + 1)
if res is not None:
col = classify_time_collision(col_info.delta, col_info.rz, col_info.tol_coeff, res[0], res[1],
solution, param_line, tolerance)
if col is not None:
if col != col_info:
col.from_ztau = True
col.ztau_ind = col_info.ztau_ind
return col, True
else:
if col_info.alternative is not None:
return col_info.alternative, True
return col_info, False
else:
tol_coeff = col_info.tol_coeff * 10
resolved = False
if col_info.case == 'Case iii':
if col_info.alternative is not None:
stateN = col_info.N1
col_info = col_info.alternative
new_col_info, problem = resolve_and_classify(col_info.delta, col_info.rz, solution, param_line, tol_coeff, tolerance)
if problem['result'] > 0:
return col_info, False
elif new_col_info.N1 <= stateN and stateN <= new_col_info.N2:
resolved = True
return new_col_info, resolved
else:
col_info = new_col_info
else:
return col_info, resolved
while tol_coeff <= 0.01/tolerance:
new_col_info, problem = resolve_and_classify(col_info.delta, col_info.rz, solution, param_line, tol_coeff, tolerance)
tol_coeff = tol_coeff * 10
if problem['result'] > 0:
break
if new_col_info != col_info:
col_info = new_col_info
if stateN is None:
resolved = True
break
elif col_info.N1 <= stateN and stateN <= col_info.N2:
resolved = True
break
if not resolved:
if col_info.alternative is not None:
return col_info.alternative, True
return col_info, resolved
def reclassify_ztau(col_info, solution, param_line, ztau_ind, tolerance, hard_find=False):
if len(ztau_ind) >= 3:
if ztau_ind[1] - ztau_ind[0] > 2:
if ztau_ind[-1] - ztau_ind[-2] > 2:
res = solution.pivots.find_N1_N2_around(ztau_ind[1:-1])
if res is not None:
col = classify_time_collision(col_info.delta, col_info.rz, col_info.tol_coeff, res[0], res[1],
solution, param_line, tolerance)
if col is not None:
col.from_ztau = True
col.ztau_ind = ztau_ind
return col
res = solution.pivots.find_N1_N2_around(ztau_ind[1:])
if res is not None:
col = classify_time_collision(col_info.delta, col_info.rz, col_info.tol_coeff, res[0], res[1],
solution, param_line, tolerance)
if col is not None:
col.from_ztau = True
col.ztau_ind = ztau_ind
return col
elif ztau_ind[-1] - ztau_ind[-2] > 2:
res = solution.pivots.find_N1_N2_around(ztau_ind[:-1])
if res is not None:
col = classify_time_collision(col_info.delta, col_info.rz, col_info.tol_coeff, res[0], res[1],
solution, param_line, tolerance)
if col is not None:
col.from_ztau = True
col.ztau_ind = ztau_ind
return col
if (len(ztau_ind) > 3 and (max(ztau_ind) - min(ztau_ind) + 1)/len(ztau_ind) > 0.9) or hard_find:
for n in range(len(ztau_ind)):
res = solution.pivots.find_N1_N2_around(ztau_ind[n:])
if res is not None:
col = classify_time_collision(col_info.delta, col_info.rz, col_info.tol_coeff, res[0], res[1],
solution, param_line, tolerance)
if col is not None:
col.from_ztau = True
col.ztau_ind = ztau_ind[n:]
return col
for n in range(1, len(ztau_ind)-1):
res = solution.pivots.find_N1_N2_around(ztau_ind[:-n])
if res is not None:
col = classify_time_collision(col_info.delta, col_info.rz, col_info.tol_coeff, res[0], res[1],
solution, param_line, tolerance)
if col is not None:
col.from_ztau = True
col.ztau_ind = ztau_ind[:-n]
return col
return None
def classify_time_collision(delta, rz, tol_coeff, N1, N2, solution, param_line, tolerance):
if N1 == -1 or N2 == solution.NN:
return collision_info('Case i__', delta, N1, N2, [], [], rz, tol_coeff)
else:
vlist = solution.pivots.get_out_difference(N1, N2)
if len(vlist) > 2:
print('More than two variables leave in time shrink ....')
return None
elif len(vlist) == 1:
return collision_info('Case i__', delta, N1, N2, [], [], rz, tol_coeff)
elif len(vlist) == 2:
case = 'Case ii_'
if N2 - N1 == 2:
return collision_info(case, delta, N1, N2, solution.pivots.outpivots[N1], solution.pivots.outpivots[N1+1], rz, tol_coeff)
else:
order_ratio, correct = calc_order_ratio(vlist[0], vlist[1], N1, N2, solution, param_line, delta / 2)
if abs(abs(order_ratio) - 1) < tolerance:
print('Tolerance in R unclear...')
if abs(order_ratio) < 1:
v1 = vlist[0]
v2 = vlist[1]
else:
v1 = vlist[1]
v2 = vlist[0]
if correct:
return collision_info(case, delta, N1, N2, v1, v2, rz, tol_coeff)
else:
return None
def calc_timecollide(TAU, DTAU, lastN1, lastN2, tolerance):
problem = {'result': 0, 'data': [], 'had_resolution': False, 'resolved_types':[]}
tol2 = 10 * tolerance
max_neg_coeff = 100000
tol_coeff = 1
min_tau = np.min(TAU)
if min_tau < -tol2:
print('negative interval length...', min_tau)
problem['had_resolution'] = True
tol2 = 10**math.ceil(math.log10(-min_tau))
if tol2 > tolerance * max_neg_coeff:
print('fail!')
problem['result'] = 1
problem['data'] = find(min_tau)
return [0], problem
print('resolved!')
# while np.any(inegTAU):
# print('negative interval length...', np.min(TAU))
# problem['had_resolution'] = True
# problem['resolved_types'].append(1)
# d = d * 10
# if d > max_neg_coeff:
# print('fail!')
# problem['result'] = 1
# problem['data'] = find(inegTAU)
# return [0], problem
# else:
# print('resolving * ', d)
# tol2 = 10 * tolerance * d
# iposTAU = TAU > tol2
# izerTAU = np.fabs(TAU) <= tol2
# inegTAU = TAU < -tol2
NN = TAU.shape[0]
iposTAU = TAU > tol2
izerTAU = np.fabs(TAU) <= tol2
inegTAU = TAU < -tol2
#iposDTAU = DTAU > tolerance
izerDTAU = np.fabs(DTAU) <= tol2
inegDTAU = DTAU < -tol2
# TODO: take this to cython
test1 = np.logical_and(izerTAU, inegDTAU)
zflag = np.any(test1)
if zflag:
problem['had_resolution'] = True
problem['resolved_types'].append(2)
ztau_ind = find(test1)
print('zero length interval shrinks:', ztau_ind, 'last N1:', lastN1, ' N2:', lastN2)
last_col_int = np.arange(lastN1 + 1, lastN2, dtype=int)
ind1 = len(np.intersect1d(ztau_ind, last_col_int, assume_unique=True)) == 0
# zmin = np.min(ztau_ind)
# zmax = np.max(ztau_ind)
# ztau_int= np.arange(zmin,zmax+1, dtype=int)
# ind2 = len(np.intersect1d(last_col_int, ztau_int , assume_unique=True)) != 0
# ind3 = (len(ztau_ind) + lastN2 - lastN1 - 1)/(zmax - zmin + 1) >= 1
if np.sum(izerTAU) == NN - 1:
locposTAU = find(iposTAU)[0]
if locposTAU > 0 and locposTAU < NN - 1:
if np.sum(DTAU[np.arange(0,locposTAU)]) < 0:
return [0, [0, locposTAU-1]], problem
elif np.sum(DTAU[np.arange(locposTAU + 1,NN)]) < 0:
return [0, [locposTAU + 1, NN-1]], problem
elif ind1:
# if last_case == 'rewind':
# zmin = np.min(ztau_ind)
# zmax = np.max(ztau_ind)
# if len(ztau_ind) / (zmax - zmin + 1) >= 0.75 and len(ztau_ind) > 3:
# if np.all(izerTAU[zmin: zmax+1]):
# print('trying to remove zero intervals...')
# rz = np.divide(-DTAU, TAU, where=inegDTAU, out=np.zeros_like(TAU))
# zz_ind = np.argmax(rz)
# zz = rz[zz_ind]
# return [1 / zz, zmin, zmax], problem
tol_coeff = 0.1
while tol_coeff >= 0.001 and zflag:
print('trying to resolve * ', tol_coeff, ' ...')
iposTAU = TAU > tol2 * tol_coeff
inegTAU = TAU < tol2 * tol_coeff
izerTAU = np.fabs(TAU) <= tol2 * tol_coeff
test1 = np.logical_and(izerTAU, inegDTAU)
zflag = np.any(test1)
if zflag:
if np.sum(izerTAU) == NN - 1:
locposTAU = find(iposTAU)[0]
if locposTAU > 0 and locposTAU < NN - 1:
if np.sum(DTAU[np.arange(0, locposTAU)]) < 0:
return [0, [0, locposTAU-1]], problem
elif np.sum(DTAU[np.arange(locposTAU + 1, NN)]) < 0:
return [0, [locposTAU + 1, NN-1]], problem
else:
break
tol_coeff = tol_coeff * 0.1
if zflag:
if lastN1 !=0 or lastN2 !=0:
print('zero length interval shrinks\n ')
problem['result'] = 2
problem['data'] = find(test1)
return [0], problem
else:
delta, rz = get_time_ratio(TAU, DTAU, inegTAU, inegDTAU, tol2 * tol_coeff)
if delta < 0:
return [], problem
else:
return [delta, rz], problem
# test3 = inegDTAU
# if not np.any(test3):
# return [], problem
# xTAU = TAU.copy()
# xTAU[inegTAU] = tol2 * tol_coeff
# rz = np.divide(-DTAU, xTAU, where=test3, out=np.zeros_like(TAU))
# zz_ind = np.argmax(rz)
# zz = rz[zz_ind]
# rz[ztau_ind] = zz
# return [1 / zz, rz], problem
else:
print('zero length interval shrinks\n ')
problem['result'] = 2
problem['data'] = find(test1)
return [0], problem
# TODO: take this to cython
test2 = np.logical_and(izerTAU, izerDTAU)
zflag = np.any(test2)
if zflag:
problem['had_resolution'] = True
problem['resolved_types'].append(5)
#this should be test2 - othervise this true
if len(np.intersect1d(find(test1), np.arange(lastN1 + 1, lastN2, dtype=int), assume_unique=True)) == 0:
print('zero length interval does not expand')
tol_coeff = 0.1
while tol_coeff >= 0.001 and zflag:
print('trying to resolve * ', tol_coeff, ' ...')
inegTAU = TAU < tol2 * tol_coeff
iposTAU = TAU > tol2 * tol_coeff
izerTAU = np.fabs(TAU) <= tol2 * tol_coeff
test2 = np.logical_and(izerTAU, izerDTAU)
zflag = np.any(test2)
tol_coeff = tol_coeff * 0.1
if zflag:
print('zero length interval does not expand... trying to ignore')
# problem['result'] = 5
# problem['data'] = find(test2)
# return [], problem
else:
problem['data'] = find(test2)
problem['result'] = 5
print('zero length interval does not expand\n')
#TODO: here is the source of potential bug!!!
return [0], problem
delta, rz = get_time_ratio(TAU, DTAU, inegTAU, inegDTAU, tol2 * tol_coeff)
if delta < 0:
return [], problem
else:
return [delta, rz], problem
# #test3 = np.logical_and(iposTAU, inegDTAU)
# #inegDTAU = DTAU < -tol2 * tol_coeff
# test3 = inegDTAU
# if not np.any(test3):
# return [], problem
#
# xTAU = TAU.copy()
# xTAU[inegTAU] = tol2 * tol_coeff
# rz = np.divide(-DTAU, xTAU, where=test3, out=np.zeros_like(TAU))
# zz_ind = np.argmax(rz)
# zz = rz[zz_ind]
# return [1/zz, rz], problem
|
{"hexsha": "a0a79f5a2807272a2d86b2c82713cdfb26de3da1", "size": 19195, "ext": "py", "lang": "Python", "max_stars_repo_path": "SCLPsolver/subroutines/time_collision_resolver.py", "max_stars_repo_name": "haroldship/SCLPsolver", "max_stars_repo_head_hexsha": "70b79acb074f51d4a269993f6a1fcf04a8196a89", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-08-11T16:01:55.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-11T16:01:55.000Z", "max_issues_repo_path": "SCLPsolver/subroutines/time_collision_resolver.py", "max_issues_repo_name": "haroldship/SCLPsolver", "max_issues_repo_head_hexsha": "70b79acb074f51d4a269993f6a1fcf04a8196a89", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "SCLPsolver/subroutines/time_collision_resolver.py", "max_forks_repo_name": "haroldship/SCLPsolver", "max_forks_repo_head_hexsha": "70b79acb074f51d4a269993f6a1fcf04a8196a89", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-06-29T15:23:59.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-07T08:58:40.000Z", "avg_line_length": 43.8242009132, "max_line_length": 147, "alphanum_fraction": 0.5183120604, "include": true, "reason": "import numpy", "num_tokens": 4946}
|
import torch
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
import models
# Return triplet of predictions, ground-truth and error
def wrapperNYU(dataBatch, opt,
encoder, albedoDecoder, normalDecoder, roughDecoder, depthDecoder ):
# Load data from cpu to gpu
normal_cpu = dataBatch['normal']
normalBatch = Variable(normal_cpu ).cuda()
depth_cpu = dataBatch['depth']
depthBatch = Variable(depth_cpu ).cuda()
seg_cpu = dataBatch['segNormal']
segNormalBatch = Variable( seg_cpu ).cuda()
seg_cpu = dataBatch['segDepth']
segDepthBatch = Variable(seg_cpu ).cuda()
# Load the image from cpu to gpu
im_cpu = (dataBatch['im'] )
imBatch = Variable(im_cpu ).cuda()
if opt.cascadeLevel > 0:
albedoPre_cpu = dataBatch['albedoPre']
albedoPreBatch = Variable(albedoPre_cpu ).cuda()
normalPre_cpu = dataBatch['normalPre']
normalPreBatch = Variable(normalPre_cpu ).cuda()
roughPre_cpu = dataBatch['roughPre']
roughPreBatch = Variable(roughPre_cpu ).cuda()
depthPre_cpu = dataBatch['depthPre']
depthPreBatch = Variable(depthPre_cpu ).cuda()
diffusePre_cpu = dataBatch['diffusePre']
diffusePreBatch = Variable(diffusePre_cpu ).cuda()
specularPre_cpu = dataBatch['specularPre']
specularPreBatch = Variable(specularPre_cpu ).cuda()
if albedoPreBatch.size(2) < opt.imHeight or albedoPreBatch.size(3) < opt.imWidth:
albedoPreBatch = F.interpolate(albedoPreBatch, [opt.imHeight, opt.imWidth ], mode='bilinear')
if normalPreBatch.size(2) < opt.imHeight or normalPreBatch.size(3) < opt.imWidth :
normalPreBatch = F.interpolate(normalPreBatch, [opt.imHeight, opt.imWidth ], mode='bilinear')
if roughPreBatch.size(2) < opt.imHeight or roughPreBatch.size(3) < opt.imWidth :
roughPreBatch = F.interpolate(roughPreBatch, [opt.imHeight, opt.imWidth ], mode='bilinear')
if depthPreBatch.size(2) < opt.imHeight or depthPreBatch.size(3) < opt.imWidth :
depthPreBatch = F.interpolate(depthPreBatch, [opt.imHeight, opt.imWidth ], mode='bilinear')
# Regress the diffusePred and specular Pred
envRow, envCol = diffusePreBatch.size(2), diffusePreBatch.size(3)
imBatchSmall = F.adaptive_avg_pool2d(imBatch, (envRow, envCol) )
diffusePreBatch, specularPreBatch = models.LSregressDiffSpec(
diffusePreBatch.detach(),
specularPreBatch.detach(),
imBatchSmall,
diffusePreBatch, specularPreBatch )
if diffusePreBatch.size(2) < opt.imHeight or diffusePreBatch.size(3) < opt.imWidth:
diffusePreBatch = F.interpolate(diffusePreBatch, [opt.imHeight, opt.imWidth ], mode='bilinear')
if specularPreBatch.size(2) < opt.imHeight or specularPreBatch.size(3) < opt.imWidth:
specularPreBatch = F.interpolate(specularPreBatch, [opt.imHeight, opt.imWidth ], mode='bilinear')
# Normalize Albedo and depth
bn, ch, nrow, ncol = albedoPreBatch.size()
albedoPreBatch = albedoPreBatch.view(bn, -1)
albedoPreBatch = albedoPreBatch / torch.clamp(torch.mean(albedoPreBatch, dim=1), min=1e-10).unsqueeze(1) / 3.0
albedoPreBatch = albedoPreBatch.view(bn, ch, nrow, ncol)
bn, ch, nrow, ncol = depthPreBatch.size()
depthPreBatch = depthPreBatch.view(bn, -1)
depthPreBatch = depthPreBatch / torch.clamp(torch.mean(depthPreBatch, dim=1), min=1e-10).unsqueeze(1) / 3.0
depthPreBatch = depthPreBatch.view(bn, ch, nrow, ncol)
########################################################
# Build the cascade network architecture #
if opt.cascadeLevel == 0:
inputBatch = imBatch
elif opt.cascadeLevel > 0:
inputBatch = torch.cat([imBatch, albedoPreBatch,
normalPreBatch, roughPreBatch, depthPreBatch,
diffusePreBatch, specularPreBatch ], dim=1)
# Initial Prediction
x1, x2, x3, x4, x5, x6 = encoder(inputBatch )
albedoPred = 0.5 * (albedoDecoder(imBatch, x1, x2, x3, x4, x5, x6) + 1)
normalPred = normalDecoder(imBatch, x1, x2, x3, x4, x5, x6)
roughPred = roughDecoder(imBatch, x1, x2, x3, x4, x5, x6)
depthPred = 0.5 * (depthDecoder(imBatch, x1, x2, x3, x4, x5, x6) + 1)
normalPred = F.interpolate(normalPred, [normalBatch.size(2), normalBatch.size(3)], mode='bilinear')
depthPred = F.interpolate(depthPred, [depthBatch.size(2), depthBatch.size(3)], mode='bilinear')
depthPred = models.LSregress(depthPred.detach() * segDepthBatch.expand_as(depthPred),
depthBatch * segDepthBatch.expand_as(depthBatch), depthPred)
## Compute Errors
pixelAllNumNormal = (torch.sum(segNormalBatch ).cpu().data).item()
normalErr = torch.sum( (normalPred - normalBatch)
* (normalPred - normalBatch) * segNormalBatch.expand_as(normalBatch) ) / pixelAllNumNormal / 3.0
pixelAllNumDepth = (torch.sum(segDepthBatch ).cpu().data).item()
depthErr = torch.sum( (torch.log(depthPred + 0.1) - torch.log(depthBatch + 0.1 ) )
* ( torch.log(depthPred + 0.1) - torch.log(depthBatch + 0.1) ) * segDepthBatch.expand_as(depthBatch ) ) / pixelAllNumDepth
angleMean = torch.sum(torch.acos( torch.clamp(torch.sum(normalPred * normalBatch, dim=1).unsqueeze(1), -1, 1) ) / np.pi * 180 * segNormalBatch) / pixelAllNumNormal
normalPred_np = normalPred.data.cpu().numpy()
normalBatch_np = normalBatch.data.cpu().numpy()
segNormalBatch_np = segNormalBatch.cpu().numpy()
theta = np.arccos( np.clip(np.sum(normalPred_np * normalBatch_np, axis=1)[:, np.newaxis, :, :], -1, 1) ) / np.pi * 180
angleMean_np = (theta * segNormalBatch_np ) / pixelAllNumNormal
return [albedoPred, None], [normalPred, normalErr, angleMean], \
[roughPred, None ], [depthPred, depthErr], \
|
{"hexsha": "9df712a61c104af137c3836ef28840763dfb0311", "size": 5919, "ext": "py", "lang": "Python", "max_stars_repo_path": "wrapperNYU.py", "max_stars_repo_name": "Z7Gao/InverseRenderingOfIndoorScene", "max_stars_repo_head_hexsha": "f245d20dcbe05b1de766c2e53af79fd489f58d74", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 171, "max_stars_repo_stars_event_min_datetime": "2020-06-28T04:03:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T08:50:20.000Z", "max_issues_repo_path": "wrapperNYU.py", "max_issues_repo_name": "Z7Gao/InverseRenderingOfIndoorScene", "max_issues_repo_head_hexsha": "f245d20dcbe05b1de766c2e53af79fd489f58d74", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2020-08-20T08:56:38.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-19T19:53:51.000Z", "max_forks_repo_path": "wrapperNYU.py", "max_forks_repo_name": "Z7Gao/InverseRenderingOfIndoorScene", "max_forks_repo_head_hexsha": "f245d20dcbe05b1de766c2e53af79fd489f58d74", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2020-06-23T11:49:03.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-22T01:49:26.000Z", "avg_line_length": 48.5163934426, "max_line_length": 167, "alphanum_fraction": 0.669369826, "include": true, "reason": "import numpy", "num_tokens": 1623}
|
conditionalhello{
if(*name!="Your Name"){
writeLine("stdout", "Hello *name!");
}
else { writeLine("stdout", "Hello world!"); }
}
INPUT *name="Your Name"
OUTPUT ruleExecOut, *name
|
{"hexsha": "4ec787f7456362b9f4e63675bbf6baa4edacc457", "size": 203, "ext": "r", "lang": "R", "max_stars_repo_path": "ExampleTrainings/iRODS-User-training/exampleRules/conditionalhello.r", "max_stars_repo_name": "chStaiger/B2SAFE-B2STAGE-Training", "max_stars_repo_head_hexsha": "ab1bc3e4f6b75b9c1501aa77e58233f1f7cc4da5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2016-03-17T13:50:27.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-16T09:01:54.000Z", "max_issues_repo_path": "ExampleTrainings/iRODS-User-training/exampleRules/conditionalhello.r", "max_issues_repo_name": "EUDAT-Training/B2SAFE-B2STAGE-Training", "max_issues_repo_head_hexsha": "ab1bc3e4f6b75b9c1501aa77e58233f1f7cc4da5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2016-04-26T06:25:07.000Z", "max_issues_repo_issues_event_max_datetime": "2018-07-26T07:13:09.000Z", "max_forks_repo_path": "ExampleTrainings/iRODS-User-training/exampleRules/conditionalhello.r", "max_forks_repo_name": "EUDAT-Training/B2SAFE-B2STAGE-Training", "max_forks_repo_head_hexsha": "ab1bc3e4f6b75b9c1501aa77e58233f1f7cc4da5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 20, "max_forks_repo_forks_event_min_datetime": "2016-03-08T08:18:33.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-24T16:00:45.000Z", "avg_line_length": 22.5555555556, "max_line_length": 49, "alphanum_fraction": 0.5960591133, "num_tokens": 53}
|
Require Import Coq.Lists.List.
Require Import MirrorCore.Reify.Reify.
Require Import MirrorCharge.Imp.Imp.
Require Import MirrorCharge.Imp.Syntax.
Reify Declare Patterns patterns_imp_typ := Syntax.typ.
Reify Declare Patterns patterns_imp := (ExprCore.expr Syntax.typ Syntax.func).
Reify Declare Syntax reify_imp_typ :=
{ (@Patterns.CPatterns Syntax.typ patterns_imp_typ) }.
Reify Declare Typed Table term_table : BinNums.positive => reify_imp_typ.
Let Ext x := @ExprCore.Inj Syntax.typ Syntax.func (inl (inl x)).
Reify Declare Syntax reify_imp :=
{ (@Patterns.CFirst _ ((@Patterns.CVar _ (@ExprCore.Var Syntax.typ Syntax.func)) ::
(@Patterns.CPatterns _ patterns_imp) ::
(@Patterns.CApp _ (@ExprCore.App Syntax.typ Syntax.func)) ::
(@Patterns.CAbs _ reify_imp_typ (@ExprCore.Abs Syntax.typ Syntax.func)) ::
(@Patterns.CTypedTable _ _ _ term_table Ext) :: nil))
}.
Let _Inj := @ExprCore.Inj Syntax.typ Syntax.func.
Local Notation "x @ y" := (@RApp x y) (only parsing, at level 30).
Local Notation "'!!' x" := (@RExact _ x) (only parsing, at level 25).
Local Notation "'?' n" := (@RGet n RIgnore) (only parsing, at level 25).
Local Notation "'?!' n" := (@RGet n RConst) (only parsing, at level 25).
Local Notation "'#'" := RIgnore (only parsing, at level 0).
Reify Pattern patterns_imp_typ += (@RImpl (?0) (?1)) => (fun (a b : function reify_imp_typ) => tyArr a b).
Reify Pattern patterns_imp_typ += (!! locals) => tyLocals.
Reify Pattern patterns_imp_typ += (!! lprop) => (tyArr tyLocals tyHProp).
Reify Pattern patterns_imp_typ += (!! icmd) => tyCmd.
Reify Pattern patterns_imp_typ += (!! SProp) => tySProp.
Reify Pattern patterns_imp_typ += (!! HProp) => tyHProp.
Reify Pattern patterns_imp_typ += (!! Prop) => tyProp.
Reify Pattern patterns_imp_typ += (!! var) => tyVariable.
Reify Pattern patterns_imp_typ += (!! function_name) => tyVariable.
Reify Pattern patterns_imp_typ += (!! iexpr) => tyExpr.
Reify Pattern patterns_imp_typ += (!! nat) => tyNat.
Reify Pattern patterns_imp_typ += (!! value) => tyNat.
Reify Pattern patterns_imp_typ += (!! Fun @ ?0 @ ?1) => (fun (a b : function reify_imp_typ) => tyArr a b).
Reify Pattern patterns_imp += (RHasType var (?!0)) => (fun (x : id Imp.var) => fVar x).
Reify Pattern patterns_imp += (RHasType String.string (?!0)) => (fun (x : id Imp.var) => fVar x).
Reify Pattern patterns_imp += (RHasType nat (?!0)) => (fun (x : id nat) => fConst x).
Reify Pattern patterns_imp += (!! (@eq) @ ?0) => (fun (x : function reify_imp_typ) => (fEq x)).
(** Commands **)
Reify Pattern patterns_imp += (!! Skip) => fSkip.
Reify Pattern patterns_imp += (!! Seq) => fSeq.
Reify Pattern patterns_imp += (!! Assign) => fAssign.
Reify Pattern patterns_imp += (!! Read) => fRead.
Reify Pattern patterns_imp += (!! Write) => fWrite.
(** TODO: Call **)
(** Expressions **)
Reify Pattern patterns_imp += (!! iConst) => fConst.
Reify Pattern patterns_imp += (!! iVar) => fVar.
(** Intuitionistic Operators **)
Reify Pattern patterns_imp += (!! @ILogic.lentails @ ?0 @ #) => (fun (x : function reify_imp_typ) => (_Inj (inr (ILogicFunc.ilf_entails x)))).
Reify Pattern patterns_imp += (!! @ILogic.ltrue @ ?0 @ #) => (fun (x : function reify_imp_typ) => (_Inj (inr (ILogicFunc.ilf_true x)))).
Reify Pattern patterns_imp += (!! @ILogic.lfalse @ ?0 @ #) => (fun (x : function reify_imp_typ) => (_Inj (inr (ILogicFunc.ilf_false x)))).
Reify Pattern patterns_imp += (!! @ILogic.land @ ?0 @ #) => (fun (x : function reify_imp_typ) => (_Inj (inr (ILogicFunc.ilf_and x)))).
Reify Pattern patterns_imp += (!! @ILogic.lor @ ?0 @ #) => (fun (x : function reify_imp_typ) => (_Inj (inr (ILogicFunc.ilf_or x)))).
Reify Pattern patterns_imp += (!! @ILogic.limpl @ ?0 @ #) => (fun (x : function reify_imp_typ) => (_Inj (inr (ILogicFunc.ilf_impl x)))).
Reify Pattern patterns_imp += (!! @ILogic.lexists @ ?0 @ # @ ?1) => (fun (x y : function reify_imp_typ) => (_Inj (inr (ILogicFunc.ilf_exists y x)))).
Reify Pattern patterns_imp += (!! @ILogic.lforall @ ?0 @ # @ ?1) => (fun (x y : function reify_imp_typ) => (_Inj (inr (ILogicFunc.ilf_forall y x)))).
(** Embedding Operators **)
Reify Pattern patterns_imp += (!! @ILEmbed.embed @ ?0 @ ?1 @ #) => (fun (x y : function reify_imp_typ) => (_Inj (inr (ILogicFunc.ilf_embed x y)))).
(** Special cases for Coq's primitives **)
Reify Pattern patterns_imp += (!! True) => (_Inj (inr (ILogicFunc.ilf_true tyProp))).
Reify Pattern patterns_imp += (!! False) => (_Inj (inr (ILogicFunc.ilf_false tyProp))).
Reify Pattern patterns_imp += (!! and) => (_Inj (inr (ILogicFunc.ilf_and tyProp))).
Reify Pattern patterns_imp += (!! or) => (_Inj (inr (ILogicFunc.ilf_or tyProp))).
Reify Pattern patterns_imp += (RPi (?0) (?1)) => (fun (x : function reify_imp_typ) (y : function reify_imp) =>
ExprCore.App (_Inj (inr (ILogicFunc.ilf_forall x tyProp )))
(ExprCore.Abs x y)).
Reify Pattern patterns_imp += (RImpl (?0) (?1)) => (fun (x y : function reify_imp) => ExprCore.App (ExprCore.App (_Inj (inr (ILogicFunc.ilf_impl tyProp))) x) y).
(** Separation Logic Operators **)
Reify Pattern patterns_imp += (!! @BILogic.sepSP @ ?0 @ #) => (fun (x : function reify_imp_typ) => (fStar x)).
Reify Pattern patterns_imp += (!! @BILogic.empSP @ ?0 @ #) => (fun (x : function reify_imp_typ) => (fEmp x)).
(** Program Logic **)
Reify Pattern patterns_imp += (!! triple) => fTriple.
Reify Pattern patterns_imp += (!! eval_iexpr) => feval_iexpr.
Reify Pattern patterns_imp += (!! locals_get) => flocals_get.
Reify Pattern patterns_imp += (!! locals_upd) => flocals_upd.
Reify Pattern patterns_imp += (!! PtsTo) => fPtsTo.
(** Applicative **)
Reify Pattern patterns_imp += (!! @Applicative.ap @ !! (Fun locals) @ # @ ?0 @ ?1) => (fun (x y : function reify_imp_typ) => (fAp x y)).
Reify Pattern patterns_imp += (!! @Applicative.pure @ !! (Fun locals) @ # @ ?0) => (fun (x : function reify_imp_typ) => (fPure x)).
(** Table Entries **)
Local Notation "a >> b" := (tyArr a b) (at level 31,right associativity).
Reify Seed Typed Table term_table += 1 => [ (tyLProp >> tyCmd >> tyLProp >> tySProp) , triple ].
Reify Seed Typed Table term_table += 2 => [ (tyCmd >> tyCmd >> tyCmd) , Seq ].
Reify Seed Typed Table term_table += 3 => [ (tyVariable >> tyExpr >> tyCmd) , Assign ].
Reify Seed Typed Table term_table += 4 => [ (tyVariable >> tyExpr >> tyCmd) , Read ].
Reify Seed Typed Table term_table += 5 => [ (tyExpr >> tyExpr >> tyCmd) , Write ].
Reify Seed Typed Table term_table += 6 => [ tyCmd , Skip ].
Reify Seed Typed Table term_table += 7 => [ (tyNat >> tyNat >> tyHProp) , PtsTo ].
Reify Seed Typed Table term_table += 8 => [ (tyVariable >> (tyNat >> tyLProp) >> (tyNat >> tyLProp) >> tySProp) , function_spec ].
Definition elem_ctor : forall x : Syntax.typ, (typD x) -> @SymEnv.function _ _ :=
@SymEnv.F _ _.
Ltac reify_imp e :=
let k fs e :=
pose e in
reify_expr reify_imp k
[ (fun (y : @mk_dvar_map _ _ _ _ term_table elem_ctor) => True) ]
[ e ].
Goal True.
reify_imp 1.
reify_imp Skip.
reify_imp (ILogic.lentails True True).
reify_imp ((True -> False) -> True).
reify_imp (forall G P Q, ILogic.lentails G (triple P Skip Q)).
generalize (String.EmptyString).
intro x.
exact I.
Defined.
|
{"author": "jesper-bengtson", "repo": "MirrorCharge", "sha": "cb0fe1da80be70ba4b744d4178a4e6e3afa38e62", "save_path": "github-repos/coq/jesper-bengtson-MirrorCharge", "path": "github-repos/coq/jesper-bengtson-MirrorCharge/MirrorCharge-cb0fe1da80be70ba4b744d4178a4e6e3afa38e62/MirrorCharge!/src/MirrorCharge/Imp/Reify.v"}
|
"""
By Dr Jie Zheng -Q, NAOC
v1 2019-04-27
"""
import numpy as np
from..util import *
def add_distort():
pass
# pro add_distort, hdr, astr
#; NAME:
#; ADD_DISTORT
#; PURPOSE:
#; Add the distortion parameters in an astrometry structure to a FITS header.
#; EXPLANATION:
#; Called by PUTAST to add SIP (http://fits.gsfc.nasa.gov/registry/sip.html )
#; or TNX ( http://fits.gsfc.nasa.gov/registry/tnx.html ) distortion
#; parameters in an astrometry structure to a FITS header
#;
#; Prior to April 2012, PUTAST did not add distortion parameters so one
#; had to call ADD_DISTORT after PUTAST.
#;
#; IDL> putast,h ,astr0
#; IDL> add_distort,h,astr0
#;
#; CALLING SEQUENCE:
#; add_distort, hdr, astr
#;
#; INPUTS:
#; HDR - FITS header, string array. HDR will be updated to contain
#; the supplied astrometry.
#; ASTR - IDL structure containing values of the astrometry parameters
#; CDELT, CRPIX, CRVAL, CTYPE, LONGPOLE, PV2, and DISTORT
#; See EXTAST.PRO for more info about the structure definition
#;
#; PROCEDURES USED:
#; SXADDPAR, TAG_EXIST()
#; REVISION HISTORY:
#; Written by W. Landsman May 2005
#; Enforce i+j = n for ij coefficients of order n W. Landsman April 2012
#; Support IRAF TNX distortion M. Sullivan March 2014
#;;-
# npar = N_params()
#
# if ( npar LT 2 ) then begin ;Was header supplied?
# print,'Syntax: ADD_DISTORT, Hdr, astr'
# return
# endif
#
# add_distort = tag_exist(astr,'distort')
# IF(~ add_distort)THEN RETURN
#
# IF(astr.distort.name EQ 'SIP') then begin
#
# sxaddpar,hdr,'CTYPE1','RA---TAN-SIP'
# sxaddpar,hdr,'CTYPE2','DEC--TAN-SIP'
# distort = astr.distort
# a_dimen = size(distort.a,/dimen)
# b_dimen = size(distort.b,/dimen)
# ap_dimen = size(distort.ap,/dimen)
# bp_dimen = size(distort.bp,/dimen)
#
# if a_dimen[0] GT 0 then begin
# a_order = a_dimen[0]-1
# sxaddpar, hdr, 'A_ORDER', a_order, /savec, $
# 'polynomial order, axis 1, detector to sky '
# for i=0, a_order do begin
# for j = 0, a_order-i do begin
# aij = distort.a[i,j]
# if aij NE 0.0 then $
# sxaddpar, hdr, 'A_' + strtrim(i,2)+ '_' + strtrim(j,2), aij, $
# ' distortion coefficient', /savec
# endfor
# endfor
# endif
#
# if b_dimen[0] GT 0 then begin
# b_order = b_dimen[0]-1
# sxaddpar, hdr, 'B_ORDER', a_order, /savec , $
# 'polynomial order, axis 2, detector to sky'
# for i=0, b_order do begin
# for j = 0, b_order-i do begin
# bij = distort.b[i,j]
# if bij NE 0.0 then $
# sxaddpar, hdr, 'B_' + strtrim(i,2)+ '_' + strtrim(j,2), bij, $
# ' distortion coefficient', /savec
# endfor
# endfor
# endif
#
# if ap_dimen[0] GT 0 then begin
# ap_order = ap_dimen[0]-1
# sxaddpar, hdr, 'AP_ORDER', a_order, /savec, $
# ' polynomial order, axis 1, sky to detector '
# for i=0, ap_order do begin
# for j = 0, ap_order-i do begin
# apij = distort.ap[i,j]
# if apij NE 0.0 then $
# sxaddpar, hdr, 'AP_' + strtrim(i,2)+ '_' + strtrim(j,2), apij, $
# ' distortion coefficient', /savec
# endfor
# endfor
# endif
#
#
# if bp_dimen[0] GT 0 then begin
# bp_order = bp_dimen[0]-1
# sxaddpar, hdr, 'BP_ORDER', a_order, /savec, $
# ' polynomial order, axis 2, sky to detector '
# for i=0, bp_order do begin
# for j = 0, bp_order-i do begin
# bpij = distort.bp[i,j]
# if bpij NE 0.0 then $
# sxaddpar, hdr, 'BP_' + strtrim(i,2)+ '_' + strtrim(j,2), bpij, $
# ' distortion coefficient', /savec
# endfor
# endfor
# endif
#
# ENDIF ELSE IF(astr.distort.name EQ 'TNX')THEN BEGIN
#
# sxaddpar, hdr,'WAT0_001','system=image'
#
# string1='wtype=tnx axtype=ra lngcor = "3.'
# string1+= ' '+STRN(astr.distort.lngcor.xiorder,FORMAT='(F2.0)')
# string1+= ' '+STRN(astr.distort.lngcor.etaorder,FORMAT='(F2.0)')
# string1+= ' '+STRN(astr.distort.lngcor.xterms,FORMAT='(F2.0)')
# string1+= ' '+STRN(astr.distort.lngcor.ximin,FORMAT='(F19.16)')
# string1+= ' '+STRN(astr.distort.lngcor.ximax,FORMAT='(F19.16)')
# string1+= ' '+STRN(astr.distort.lngcor.etamin,FORMAT='(F19.16)')
# string1+= ' '+STRN(astr.distort.lngcor.etamax,FORMAT='(F19.16)')
# FOR i=0,N_ELEMENTS(astr.distort.lngcor.coeff)-1 DO BEGIN
# string1+=' '+STRN(astr.distort.lngcor.coeff[i],FORMAT='(F19.16)')
# ENDFOR
# string1+= '"'
#
# string2='wtype=tnx axtype=dec latcor = "3. '
# string2+= ' '+STRN(astr.distort.latcor.xiorder,FORMAT='(F2.0)')
# string2+= ' '+STRN(astr.distort.latcor.etaorder,FORMAT='(F2.0)')
# string2+= ' '+STRN(astr.distort.latcor.xterms,FORMAT='(F2.0)')
# string2+= ' '+STRN(astr.distort.latcor.ximin,FORMAT='(F19.16)')
# string2+= ' '+STRN(astr.distort.latcor.ximax,FORMAT='(F19.16)')
# string2+= ' '+STRN(astr.distort.latcor.etamin,FORMAT='(F19.16)')
# string2+= ' '+STRN(astr.distort.latcor.etamax,FORMAT='(F19.16)')
# FOR i=0,N_ELEMENTS(astr.distort.latcor.coeff)-1 DO BEGIN
# string2+= ' '+STRN(astr.distort.latcor.coeff[i],FORMAT='(F19.16)')
# ENDFOR
# string2+= '"'
#
# len1=STRLEN(string1)
# n1=len1/70
# IF(len1 MOD 68 GT 0)THEN n1++
# FOR i=0,n1-1 DO BEGIN
# s=STRMID(string1,i*68,68)
#; PRINT,'WAT1_'+STRN(i+1,FORMAT='(I3.3)'),' ',s
# sxaddpar, hdr,'WAT1_'+STRN(i+1,FORMAT='(I3.3)'),s
# ENDFOR
# len2=STRLEN(string2)
# n2=len2/70
# IF(len2 MOD 68 GT 0)THEN n2++
# FOR i=0,n2-1 DO BEGIN
# s=STRMID(string2,i*68,68)
#; PRINT,'WAT1_'+STRN(i+1,FORMAT='(I3.3)'),' ',s
# sxaddpar, hdr,'WAT2_'+STRN(i+1,FORMAT='(I3.3)'),s
# ENDFOR
#
# ENDIF
#
# return
# end
|
{"hexsha": "df81e619d1bdbe7cebf2cf13b305e6b1fad1c4d5", "size": 6395, "ext": "py", "lang": "Python", "max_stars_repo_path": "idl2py/wcs/add_distort.py", "max_stars_repo_name": "RapidLzj/idl2py", "max_stars_repo_head_hexsha": "193051cd8d01db0d125b8975713b885ad521a992", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "idl2py/wcs/add_distort.py", "max_issues_repo_name": "RapidLzj/idl2py", "max_issues_repo_head_hexsha": "193051cd8d01db0d125b8975713b885ad521a992", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "idl2py/wcs/add_distort.py", "max_forks_repo_name": "RapidLzj/idl2py", "max_forks_repo_head_hexsha": "193051cd8d01db0d125b8975713b885ad521a992", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.3352272727, "max_line_length": 82, "alphanum_fraction": 0.5444878812, "include": true, "reason": "import numpy", "num_tokens": 2274}
|
#!/usr/bin/env python3.7
# -*- coding: utf-8 -*-
#
# Created on: Sat Nov 2 11:32:55 CET 2019
#
# Author(s): Francesco Urbani <https://urbanij.github.io>
#
# File wave_v0.py
# Description: Ex. Jan 27 201something
#
# ==========================================================
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.animation as animation
import scipy.constants
# from functions import *
from functions_py import *
### ******************
### constants
### ******************
π = np.pi
ε_0 = scipy.constants.epsilon_0
ζ_0 = scipy.constants.physical_constants['characteristic impedance of vacuum'][0]
### ******************
### data
### ******************
f = 1.8e9 # [Hz]
E_0 = 10.0 # [V]
ε_r_1 = 1.0
μ_r_1 = 1.0
σ_1 = 0.0 #[S/m]
ε_r_2 = 1.5
μ_r_2 = 1.0
σ_2 = 0.12 # [S/m]
wave = 'cosine'
### ******************
if __name__ == '__main__':
try:
while 1:
_ = float(input(f"Insert f [Hz]: (or enter to use default value {f:.4g}) "))
if _ > 0: f = _; break
except Exception as e:
print(f"Using default: f = {f:.4g}")
except UnboundLocalError: pass
try:
while 1:
_ = float(input(f"Insert E_0 [V]: (or enter to use default value {E_0:.4g}) "))
if _ > 0: E_0 = _; break
except Exception as e:
print(f"Using default: E_0 = {E_0:.4g}")
try:
while 1:
_ = float(input(f"Insert ε_r_1: (or enter to use default value {ε_r_1:.4g}) "))
if _ >= 1: ε_r_1 = _; break
except Exception as e:
print(f"Using default: ε_r_1 = {ε_r_1:.4g}")
try:
while 1:
_ = float(input(f"Insert μ_r_1: (or enter to use default value {μ_r_1:.4g}) "))
if _ >= 1: μ_r_1 = _; break
except Exception as e:
print(f"Using default: μ_r_1 = {μ_r_1:.4g}")
try:
while 1:
_ = float(input(f"Insert σ_1: (or enter to use default value {σ_1:.4g}) "))
if _ >= 0: σ_1 = _; break
except Exception as e:
print(f"Using default: σ_1 = {σ_1:.4g}")
try:
while 1:
_ = float(input(f"Insert ε_r_2: (or enter to use default value {ε_r_2:.4g}) "))
if _ >= 1: ε_r_2 = _; break
except Exception as e:
print(f"Using default: ε_r_2 = {ε_r_2:.4g}")
try:
while 1:
_ = float(input(f"Insert μ_r_2: (or enter to use default value {μ_r_2:.4g}) "))
if _ >= 1: μ_r_2 = _; break
except Exception as e:
print(f"Using default: μ_r_2 = {μ_r_2:.4g}")
try:
while 1:
_ = float(input(f"Insert σ_2: (or enter to use default value {σ_2:.4g}) "))
if _ >= 0: σ_2 = _; break
except Exception as e:
print(f"Using default: σ_2 = {σ_2:.4g}")
try:
_ = input("Insert wave: cosine / gaussian / rectangular pulse ([c]/g/r)? ")
if _ == 'c': wave = "cosine"
elif _ == 'g': wave = "gaussian"
elif _ == 'r': wave = "rect"
except Exception as e:
print(f"Using default: cosine wave")
wave = "cosine"
### ******************
### relations
### ******************
ω = 2 * π * f # [rad/s]
### ******************
### computations
### ******************
U = σ_2 / (ω * ε_0 * ε_r_2)
material_type = 'Good conductor' if (U >= 1e2) else \
'Dielectric' if (U < 1e2 and U >= 1e-2) else \
'Insulator'
print("*"*20)
print(f"σ_2/(ω*ε_0*ε_r_2) = {U:.4g} ==> medium 2 is a(n) \033[92m{material_type}\x1b[0m")
ε_eq_1 = epsilon_eq(ε_r_1, ω, σ_1)
ε_eq_2 = epsilon_eq(ε_r_2, ω, σ_2)
μ_eq_1 = mu_eq(μ_r_1)
μ_eq_2 = mu_eq(μ_r_2)
ζ_1 = zeta(ε_eq_1, μ_eq_1)
ζ_2 = zeta(ε_eq_2, μ_eq_2)
k_1 = k(μ_eq_1, ε_eq_1, ω)
k_2 = k(μ_eq_2, ε_eq_2, ω)
Γ_e = gamma(ζ_2, ζ_1)
τ_e = 1 + Γ_e
if material_type == 'Good conductor':
print("*"*3)
print("\tapprox results (good conductor material):")
δ = np.sqrt(2/(ω*μ_eq_2*σ_2))
k_2 = (1/δ)*(1-1j)
ζ_2 = (1/(σ_2*δ))*(1+1j)
print(f"\tδ = {δ:.4g}")
print(f"\tk_2 = {(1/δ):.4g}·(1-j)")
print(f"\tζ_2 = {(1/(σ_2*δ)):.4g}·(1+j)")
print("*"*3)
β = k_2.real #
α = k_2.imag # damping coefficient # k := β + j*α , (α < 0)
print(f"μ_eq_1 = {μ_eq_1:.4g}")
print(f"μ_eq_2 = {μ_eq_2:.4g}")
print(f"ε_eq_1 = {ε_eq_1:.4g}")
print(f"ε_eq_2 = {ε_eq_2:.4g}")
print(f"ζ_1 = {ζ_1:.4g}")
print(f"ζ_2 = ζ_0·({ζ_2/ζ_0:.4g}) = {ζ_2:.4g}")
print(f"k_1 = {k_1:.4g}")
print(f"k_2 = {k_2:.4g}")
print(f"Γ_e = {Γ_e:.4g} = {abs(Γ_e):.4g} ∠ {np.angle(Γ_e):.4g}")
print(f"τ_e = {τ_e:.4g} = {abs(τ_e):.4g} ∠ {np.angle(τ_e):.4g}")
d_neg = -3*Lambda(f, ε_eq_1, μ_eq_1) # show 3 wavelength before the discontinuity
try:
δ = -1/α # skin depth
d_pos = 5*δ + 0.1 * -d_neg/3 # 5 delta + 10 % λ_1
except ZeroDivisionError as e:
print(e)
else:
print(f"δ = {δ:.4g}")
if abs(δ) == np.inf:
d_pos = -d_neg # λ_1
d_pos = d_pos.real # just to make sure...
λ_1 = Lambda(f, ε_eq_1, μ_eq_1)
v_1 = v(ε_eq_1, μ_eq_1)
print(f"λ_1 = {λ_1:.4g}")
print(f"v_1 = {v_1:.4g}")
z_neg = np.linspace(d_neg, 0, 400, endpoint=True)
z_pos = np.linspace(0, d_pos, 400, endpoint=True)
z = z_neg + z_pos
frames = 80
t1 = np.linspace(0, λ_1/v_1, frames)
t2 = np.linspace(-1e-9, 1.3e-9, frames)
t3 = np.linspace(-2e-9, 2e-9, frames)
# E1_i = lambda k, z, t: E_0 * np.exp(1j*k*z) * np.exp(1j*ω*t)
def cosine(k, z, t):
return E_0 * np.exp(1j*k*z) * np.exp(1j*ω*t)
def windowed_cosine(k, z, t):
window = 3
return cosine(k, z, t) * (np.heaviside(ω*t + k.real*z+window,1e-6) - np.heaviside(ω*t + k.real*z-window, 1e-6))
def gaussian(k, z, t, rms=1.20, A=E_0):
return A * 1/np.sqrt(2*π*rms**2) * np.exp(-((ω*t + k.real*z)**2)/(2 * rms**2)) * np.exp(-k.imag*z)
def rect(k, z, t):
window = 3
return (np.heaviside(ω*t + k.real*z+window,1e-6) - np.heaviside(ω*t + k.real*z-window, 1e-6)) * np.exp(-k.imag*z)
if wave == 'cosine':
E1_i = cosine
elif wave == 'gaussian':
E1_i = gaussian
elif wave == 'rect':
E1_i = rect
else:
raise Exception("Invalid wave")
t = t2 if E1_i == gaussian else t3 if E1_i == rect else t1
e1_i = lambda z, t: ( E1_i(k_1, -z, t)).real
e1_r = lambda z, t: (Γ_e * E1_i(k_1, +z, t)).real
e2_t = lambda z, t: (τ_e * E1_i(k_2, -z, t)).real
e1_tot = lambda z, t: e1_i(z,t) + e1_r(z, t)
S_i = 0.5 * 1/ζ_1 * abs(E_0)**2
S_t = S_i * (1-(abs(Γ_e))**2)
print(f"S_i = {S_i:.4g}")
print(f"S_t = {S_t:.4g} = {100*S_t/S_i:.4g}% S_i")
E_max_over_E_min = lambda E_max, E_min : E_max/E_min
if __name__ == '__main__':
if input("Want to plot? ([y]/n)? ") != 'n':
fig = plt.figure(figsize=(10,8))
fig.set_dpi(100)
def animate(i):
plt.clf()
plt.title("Traveling wave, interface @ z = 0" + \
"\nmedium 1: z<0, $\epsilon_r$=" + str(ε_r_1) + ", $\mu_r$=" + str(μ_r_1) + ", $\sigma$ =" + str(σ_1) + \
"\nmedium 2: z>0, $\epsilon_r$=" + str(ε_r_2) + ", $\mu_r$=" + str(μ_r_2) + ", $\sigma$ =" + str(σ_2)
)
plt.plot(z_neg, e1_i(z_neg, t[i]), "--", color='blue', label='$e_1^i(z,t)$', linewidth=1)
plt.plot(z_neg, e1_r(z_neg, t[i]), "-.", color='red', label='$e_1^r(z,t)$', linewidth=1)
plt.plot(z_neg, e1_tot(z_neg, t[i]), "-", color='green', label='$e_1^{tot}(z,t)$', linewidth=1.5)
plt.plot(z_pos, e2_t(z_pos, t[i]), "-", color='orange', label='$e_2^t(z,t)$', linewidth=1.5)
plt.xlabel('space (z)')
plt.ylabel('E [V/m]')
plt.legend(loc='upper right')
if wave == "gaussian":
peak_high=max(e1_tot(z, t[0]))
peak_low=min(e1_tot(z, t[40]))
elif wave == "cosine":
peak_high=max(e1_tot(z, t[0]))
peak_low=-peak_high
else:
peak_high=max(e1_tot(z, t[2]))
peak_low=min(e1_tot(z, t[40]))
plt.ylim([1.6*peak_low, 1.6*peak_high])
plt.xlim([d_neg, d_pos])
plt.grid(True)
anim = animation.FuncAnimation(fig, animate, frames=frames, interval=10)
if input("Want to save plot animation? (y/[n])? ") == 'y':
### Save animation
Writer = animation.writers['ffmpeg']
writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)
anim.save('media/traveling_wave_1.mp4', writer=writer, dpi=200)
else:
plt.show()
|
{"hexsha": "2c7a271b574386e43f2544ddf4b229dcd99bbe25", "size": 8568, "ext": "py", "lang": "Python", "max_stars_repo_path": "ef/1_waves/wave_v0.py", "max_stars_repo_name": "urbanij/misc-scripts", "max_stars_repo_head_hexsha": "c4e6ee881dfe84342ee6bd34ceea1efe7d222dce", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ef/1_waves/wave_v0.py", "max_issues_repo_name": "urbanij/misc-scripts", "max_issues_repo_head_hexsha": "c4e6ee881dfe84342ee6bd34ceea1efe7d222dce", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ef/1_waves/wave_v0.py", "max_forks_repo_name": "urbanij/misc-scripts", "max_forks_repo_head_hexsha": "c4e6ee881dfe84342ee6bd34ceea1efe7d222dce", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.56, "max_line_length": 125, "alphanum_fraction": 0.5206582633, "include": true, "reason": "import numpy,import scipy", "num_tokens": 3181}
|
"""
pylayers
=========
This file is adapted from scikit-learn package
"""
import sys
__version__ = '0.12-git'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__PYLAYERS_SETUP__
except NameError:
__PYLAYERS_SETUP__ = False
if __PYLAYERS_SETUP__:
sys.stderr.write('Partial import of pylayers during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
try:
from numpy.testing import nosetester
class _NoseTester(nosetester.NoseTester):
""" Subclass numpy's NoseTester to add doctests by default
"""
def test(self, label='fast', verbose=1, extra_argv=['--exe'],
doctests=True, coverage=False):
"""Run the full test suite
Examples
--------
This will run the test suite and stop at the first failing
example
>>> from pylayers import test
>>> test(extra_argv=['--exe', '-sx']) #doctest: +SKIP
"""
return super(_NoseTester, self).test(label=label, verbose=verbose,
extra_argv=extra_argv,
doctests=doctests, coverage=coverage)
try:
test = _NoseTester(raise_warnings="release").test
except TypeError:
# Older versions of numpy do not have a raise_warnings argument
test = _NoseTester().test
del nosetester
except:
pass
__all__ = ['gis', 'signal', 'antprop', 'simul','util']
|
{"hexsha": "402e42a456619510bfd3e244aafbc1d8578408f1", "size": 1786, "ext": "py", "lang": "Python", "max_stars_repo_path": "pylayers/gui/__init__.py", "max_stars_repo_name": "usmanwardag/pylayers", "max_stars_repo_head_hexsha": "2e8a9bdc993b2aacc92610a9c7edf875c6c7b24a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 143, "max_stars_repo_stars_event_min_datetime": "2015-01-09T07:50:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-02T11:26:53.000Z", "max_issues_repo_path": "pylayers/gui/__init__.py", "max_issues_repo_name": "usmanwardag/pylayers", "max_issues_repo_head_hexsha": "2e8a9bdc993b2aacc92610a9c7edf875c6c7b24a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 148, "max_issues_repo_issues_event_min_datetime": "2015-01-13T04:19:34.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-11T23:48:25.000Z", "max_forks_repo_path": "pylayers/gui/__init__.py", "max_forks_repo_name": "usmanwardag/pylayers", "max_forks_repo_head_hexsha": "2e8a9bdc993b2aacc92610a9c7edf875c6c7b24a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 95, "max_forks_repo_forks_event_min_datetime": "2015-05-01T13:22:42.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-15T11:22:28.000Z", "avg_line_length": 31.3333333333, "max_line_length": 82, "alphanum_fraction": 0.5739081747, "include": true, "reason": "from numpy", "num_tokens": 396}
|
import numpy as np
import pandas as pd
'''
data: path to data directory organized as follows:
data
class_a
example1
example2
...
class_b
example3
...
class_c
example4
...
groups: number of groups of data to split into
returns names of examples in each group of data
'''
def stratified_split(data, groups):
|
{"hexsha": "40b6694daa787b71e9bfb51ace0ccdc320115278", "size": 329, "ext": "py", "lang": "Python", "max_stars_repo_path": "fairtorch/preprocessing/balancedata.py", "max_stars_repo_name": "maxhirsch/fair-ML", "max_stars_repo_head_hexsha": "9616cab93fd7c016c71250ce687f9f6faf131015", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-01-07T05:14:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-11T13:07:17.000Z", "max_issues_repo_path": "fairtorch/preprocessing/balancedata.py", "max_issues_repo_name": "maxhirsch/fair-ML", "max_issues_repo_head_hexsha": "9616cab93fd7c016c71250ce687f9f6faf131015", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-07-30T22:18:50.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-31T02:01:36.000Z", "max_forks_repo_path": "fairtorch/preprocessing/balancedata.py", "max_forks_repo_name": "maxhirsch/fair-ML", "max_forks_repo_head_hexsha": "9616cab93fd7c016c71250ce687f9f6faf131015", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-08-25T03:30:18.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-25T03:30:18.000Z", "avg_line_length": 13.16, "max_line_length": 50, "alphanum_fraction": 0.7112462006, "include": true, "reason": "import numpy", "num_tokens": 92}
|
"""
TechMiner.RecordsDataFrame
==================================================================================================
"""
import pandas as pd
import math
import numpy as np
from sklearn.decomposition import PCA
from techminer.common import cut_text
from techminer.result import Result
import matplotlib.pyplot as plt
import networkx as nx
from collections import OrderedDict
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.optimize import minimize
from shapely.geometry import Point, LineString
import matplotlib.colors as colors
import matplotlib.cm as cm
#---------------------------------------------------------------------------------------------
def _minmax(data, minmax):
"""Selectet records among (minval, maxval) = minmax.
Arguments:
data : df
Returns:
techminer.
"""
if minmax is None:
return data
minval, maxval = minmax
data = data[ data[data.columns[-1]] >= minval ]
data = data[ data[data.columns[-1]] <= maxval ]
return data
#------------------------------------------------------------------------------------------------------------
def _expand_column(data, column, sep):
if sep is None:
return data
data[column] = data[column].map(lambda x: x.split(sep) if x is not None else None)
data[column] = data[column].map(
lambda x: [z.strip() for z in x] if isinstance(x, list) else x
)
data = data.explode(column)
data.index = range(len(data))
return data
#---------------------------------------------------------------------------------------------
class RecordsDataFrame(pd.DataFrame):
"""Class to represent a dataframe of bibliographic records.
"""
#----------------------------------------------------------------------------------------------
@property
def _constructor_expanddim(self):
return self
#----------------------------------------------------------------------------------------------
def _add_documents_by_terms_to_label(self, result, column, sep):
count = self.documents_by_terms(column, sep)
count = {key : value for key, value in zip(count[count.columns[0]], count[count.columns[1]])}
result[column] = result[column].map(lambda x: cut_text(str(x) + ' [' + str(count[x]) + ']'))
return result
#----------------------------------------------------------------------------------------------
def _years_list(self):
df = self[['Year']].copy()
df['Year'] = df['Year'].map(lambda x: None if np.isnan(x) else x)
df = df.dropna()
df['Year'] = df['Year'].map(int)
minyear = min(df.Year)
maxyear = max(df.Year)
return pd.Series(0, index=range(minyear, maxyear+1), name='Year')
#----------------------------------------------------------------------------------------------
def _aduna_map(self, column, sep=None, top_n=None, figsize=(12,10), font_size=10):
"""
"""
# computes the number of documents by term
tdf_matrix = self.tdf(column, sep, top_n)
tdf_matrix.columns = [cut_text(w) for w in tdf_matrix.columns]
## figure properties
plt.figure(figsize=figsize)
## graph
graph = nx.Graph()
## adds nodes to graph
terms = list(set(tdf_matrix.columns.tolist()))
docs = [str(i) for i in range(len(tdf_matrix.index.tolist()))]
graph.add_nodes_from(terms)
graph.add_nodes_from(docs)
for col in terms:
for idx in tdf_matrix.index:
if tdf_matrix.at[idx, col] > 0:
graph.add_edge(col, str(idx))
## graph layout
path_length = nx.shortest_path_length(graph)
distances = pd.DataFrame(index=graph.nodes(), columns=graph.nodes())
for row, data in path_length:
for col, dist in data.items():
distances.loc[row,col] = dist
distances = distances.fillna(distances.max().max())
layout = nx.kamada_kawai_layout(graph, dist=distances.to_dict())
## draw terms nodes
nx.draw_networkx_nodes(
graph,
layout,
nodelist=terms,
node_size=300,
node_color='red')
nx.draw_networkx_nodes(
graph,
layout,
nodelist=docs,
node_size=200,
edgecolors='black',
node_color='lightgray')
x_left, x_right = plt.xlim()
y_left, y_right = plt.ylim()
delta_x = (x_right - x_left) * 0.01
delta_y = (y_right - y_left) * 0.01
# for node in terms:
# x_pos, y_pos = layout[node]
# plt.text(
# x_pos + delta_x,
# y_pos + delta_y,
# node,
# size=font_size,
# ha='left',
# va='bottom',
# bbox=dict(
# boxstyle="square",
# ec='gray',
# fc='white',
# ))
## edges
nx.draw_networkx_edges(
graph,
layout,
width=1
)
plt.axis('off')
#----------------------------------------------------------------------------------------------
def auto_corr(self, column, sep=None, top_n=20, cut_value=0):
"""Computes the autocorrelation among items in a column of the dataframe.
>>> from techminer.datasets import load_test_cleaned
>>> rdf = load_test_cleaned().data
>>> rdf.auto_corr(column='Authors', sep=',', top_n=5) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
Authors (row) Authors (col) Autocorrelation ID
0 Wang J. [7] Wang J. [7] 1.0 [[*3*], [*10*], [*15*], [*80*], [*87*], [*128*]]
1 Zhang G. [4] Zhang G. [4] 1.0 [[*27*], [*78*], [*117*], [*119*]]
2 Hernandez G. [3] Hernandez G. [3] 1.0 [[*52*], [*94*], [*100*]]
3 Yan X. [3] Yan X. [3] 1.0 [[*13*], [*44*], [*85*]]
4 Tefas A. [3] Tefas A. [3] 1.0 [[*8*], [*110*], [*114*]]
5 Hernandez G. [3] Wang J. [7] 0.0 None
6 Tefas A. [3] Hernandez G. [3] 0.0 None
7 Tefas A. [3] Yan X. [3] 0.0 None
8 Tefas A. [3] Zhang G. [4] 0.0 None
9 Tefas A. [3] Wang J. [7] 0.0 None
10 Hernandez G. [3] Tefas A. [3] 0.0 None
11 Wang J. [7] Yan X. [3] 0.0 None
12 Hernandez G. [3] Yan X. [3] 0.0 None
13 Hernandez G. [3] Zhang G. [4] 0.0 None
14 Wang J. [7] Tefas A. [3] 0.0 None
15 Zhang G. [4] Wang J. [7] 0.0 None
16 Yan X. [3] Hernandez G. [3] 0.0 None
17 Wang J. [7] Zhang G. [4] 0.0 None
18 Yan X. [3] Zhang G. [4] 0.0 None
19 Yan X. [3] Wang J. [7] 0.0 None
20 Zhang G. [4] Tefas A. [3] 0.0 None
21 Zhang G. [4] Hernandez G. [3] 0.0 None
22 Zhang G. [4] Yan X. [3] 0.0 None
23 Wang J. [7] Hernandez G. [3] 0.0 None
24 Yan X. [3] Tefas A. [3] 0.0 None
"""
result = self.cross_corr(
column_r=column, column_c=column, sep_r=sep, sep_c=sep, top_n=top_n, cut_value=cut_value)
result._call = 'auto_corr'
return result
#----------------------------------------------------------------------------------------------
def citations_by_terms(self, column, sep=None, top_n=None, minmax=None):
"""Computes the number of citations by item in a column.
>>> from techminer.datasets import load_test_cleaned
>>> rdf = load_test_cleaned().data
>>> rdf.citations_by_terms(column='Authors', sep=',', top_n=10)
Authors Cited by ID
0 Yeh W.-C. [1] 188.0 [[*140*]]
1 Hsieh T.-J. [1] 188.0 [[*140*]]
2 Hsiao H.-F. [1] 188.0 [[*140*]]
3 Hussain A.J. [2] 52.0 [[*125*], [*139*]]
4 Krauss C. [1] 49.0 [[*62*]]
5 Fischer T. [1] 49.0 [[*62*]]
6 Wang J. [7] 46.0 [[*80*], [*87*], [*128*], [*128*]]
7 Liatsis P. [1] 42.0 [[*139*]]
8 Ghazali R. [1] 42.0 [[*139*]]
9 Yoshihara A. [1] 37.0 [[*124*]]
>>> rdf.citations_by_terms(column='Authors', sep=',', minmax=(30,50))
Authors Cited by ID
0 Krauss C. [1] 49.0 [[*62*]]
1 Fischer T. [1] 49.0 [[*62*]]
2 Wang J. [7] 46.0 [[*80*], [*87*], [*128*], [*128*]]
3 Liatsis P. [1] 42.0 [[*139*]]
4 Ghazali R. [1] 42.0 [[*139*]]
5 Yoshihara A. [1] 37.0 [[*124*]]
6 Matsubara T. [1] 37.0 [[*124*]]
7 Akita R. [1] 37.0 [[*124*]]
8 Uehara K. [1] 37.0 [[*124*]]
9 Passalis N. [3] 31.0 [[*110*], [*114*]]
10 Gabbouj M. [3] 31.0 [[*110*], [*114*]]
11 Kanniainen J. [3] 31.0 [[*110*], [*114*]]
12 Tefas A. [3] 31.0 [[*110*], [*114*]]
13 Tsantekidis A. [2] 31.0 [[*110*], [*114*]]
14 Iosifidis A. [3] 31.0 [[*110*], [*114*]]
"""
data = self[[column, 'Cited by', 'ID']].dropna()
data = _expand_column(data, column, sep)
numcitations = data.groupby([column], as_index=True).agg({
'Cited by': np.sum
})
result = pd.DataFrame({
column : numcitations.index,
'Cited by' : numcitations['Cited by'].tolist()
})
result = result.sort_values(by='Cited by', ascending=False)
result.index = result[column]
if top_n is not None and len(result) > top_n:
result = result.head(top_n)
result = _minmax(result, minmax)
result['ID'] = None
for current_term in result[result.columns[0]]:
selected_IDs = data[data[column] == current_term]['ID']
if len(selected_IDs):
result.at[current_term,'ID'] = selected_IDs.tolist()
## counts the number of documents --------------------------------------------------------
resul = self._add_documents_by_terms_to_label(result, column, sep)
## end -----------------------------------------------------------------------------------
result.index = list(range(len(result)))
return Result(result, call='citations_by_terms')
#----------------------------------------------------------------------------------------------
def citations_by_terms_by_year(self, column, sep=None, top_n=None, minmax=None):
"""Computes the number of citations by term by year in a column.
>>> from techminer.datasets import load_test_cleaned
>>> rdf = load_test_cleaned().data
>>> rdf.citations_by_terms_by_year('Authors', sep=',', top_n=5)
Authors Year Cited by ID
0 Hsiao H.-F. [1] 2011 [2] 188.0 [[*140*]]
1 Hsieh T.-J. [1] 2011 [2] 188.0 [[*140*]]
2 Hussain A.J. [2] 2011 [2] 42.0 [[*139*]]
3 Hussain A.J. [2] 2016 [5] 10.0 [[*125*]]
4 Krauss C. [1] 2018 [52] 49.0 [[*62*]]
5 Yeh W.-C. [1] 2011 [2] 188.0 [[*140*]]
"""
data = self[[column, 'Cited by', 'Year', 'ID']].dropna()
data['Year'] = data['Year'].map(int)
data = _expand_column(data, column, sep)
numcitations = data.groupby(by=[column, 'Year'], as_index=True).agg({
'Cited by': np.sum
})
## results dataframe
a = [t for t,_ in numcitations.index]
b = [t for _,t in numcitations.index]
result = pd.DataFrame({
column : a,
'Year' : b,
'Cited by' : numcitations['Cited by'].tolist()
})
## rows
top = self.citations_by_terms(column, sep)
if top_n is not None and len(top) > top_n:
top = top[0:top_n][column].tolist()
top = [u[0:u.find('[')].strip() for u in top]
selected = [True if row[0] in top else False for idx, row in result.iterrows()]
result = result[selected]
result = _minmax(result, minmax)
result['ID'] = None
for idx, row in result.iterrows():
selected_IDs = data[(data[column] == row[0]) & (data['Year'] == row[1])]['ID']
if len(selected_IDs):
result.at[idx, 'ID'] = selected_IDs.tolist()
## counts the number of ddcuments only in the results matrix -----------------------
resul = self._add_documents_by_terms_to_label(result, column, sep)
resul = self._add_documents_by_terms_to_label(result, 'Year', sep=None)
result.index = list(range(len(result)))
return Result(result, call='citations_by_terms_by_year')
#----------------------------------------------------------------------------------------------
def citations_by_year(self, cumulative=False):
"""Computes the number of citations by year.
>>> from techminer.datasets import load_test_cleaned
>>> rdf = load_test_cleaned().data
>>> rdf.citations_by_year().head()
Year Cited by ID
0 2010 [3] 21.0 [[*142*], [*143*]]
1 2011 [2] 230.0 [[*139*], [*140*]]
2 2012 [2] 16.0 [[*137*], [*138*]]
3 2013 [4] 36.0 [[*133*], [*134*], [*135*], [*136*]]
4 2014 [2] 23.0 [[*131*], [*132*]]
"""
## computes number of citations
data = self[['Year', 'Cited by', 'ID']].dropna()
data['Year'] = data['Year'].map(int)
citations = data.groupby(['Year'], as_index=True).agg({
'Cited by': np.sum
})
result = self._years_list()
result = result.to_frame()
result['Year'] = result.index
result['Cited by'] = 0
result.at[citations.index, 'Cited by'] = citations['Cited by'].tolist()
result.index = list(range(len(result)))
## IDs ---------------------------------------------------------------------------------
result['ID'] = None
for idx, row in result.iterrows():
selected_IDs = data[(data['Year'] == row[0]) & (data['Cited by'] > 0)]['ID']
if len(selected_IDs):
result.at[idx, 'ID'] = selected_IDs.tolist()
## end ----------------------------------------------------------------------------------
if cumulative is True:
result['Cited by'] = result['Cited by'].cumsum()
## counts the number of documents --------------------------------------------------------
count = self.documents_by_year(cumulative=cumulative)
count = {key : value for key, value in zip(count[count.columns[0]], count[count.columns[1]])}
result['Year'] = result['Year'].map(lambda x: cut_text(str(x) + ' [' + str(count[x]) + ']'))
## end -----------------------------------------------------------------------------------
return Result(result, call='citations_by_year')
#----------------------------------------------------------------------------------------------
def co_ocurrence(self, column_r, column_c, sep_r=None, sep_c=None, top_n=None, minmax=None):
"""Computes the number of documents containing two given items in different columns.
>>> from techminer.datasets import load_test_cleaned
>>> rdf = load_test_cleaned().data
>>> rdf.co_ocurrence(column_r='Authors', sep_r=',', column_c='Document Type', top_n=5)
Authors (row) Document Type (col) Num Documents ID
0 Hernandez G. [3] Conference Paper [12] 3 [[*52*], [*94*], [*100*]]
1 Tefas A. [3] Conference Paper [12] 3 [[*8*], [*110*], [*114*]]
2 Wang J. [7] Article [8] 5 [[*3*], [*10*], [*80*], [*128*], [*128*]]
3 Wang J. [7] Conference Paper [12] 2 [[*15*], [*87*]]
4 Yan X. [3] Article [8] 1 [[*44*]]
5 Yan X. [3] Conference Paper [12] 2 [[*13*], [*85*]]
6 Zhang G. [4] Article [8] 2 [[*27*], [*117*]]
7 Zhang G. [4] Conference Paper [12] 2 [[*78*], [*119*]]
"""
## computes the number of documents by term by term
data = self[[column_r, column_c, 'ID']].dropna()
top_r = self.documents_by_terms(column_r, sep_r)
top_c = self.documents_by_terms(column_c, sep_c)
data.columns = [column_r + ' (row)', column_c + ' (col)', 'ID' ]
column_r += ' (row)'
column_c += ' (col)'
data = _expand_column(data, column_r, sep_r)
data = _expand_column(data, column_c, sep_c)
## number of documents
numdocs = data.groupby(by=[column_r, column_c]).size()
## results dataframe
a = [t for t,_ in numdocs.index]
b = [t for _,t in numdocs.index]
result = pd.DataFrame({
column_r : a,
column_c : b,
'Num Documents' : numdocs.tolist()
})
## compute top_n terms
if top_n is not None:
## rows
# top = self.documents_by_terms(column_r, sep_r)
if len(top_r) > top_n:
top_r = top_r[0:top_n][top_r.columns[0]].tolist()
selected = [True if row[0] in top_r else False for idx, row in result.iterrows()]
result = result[selected]
## cols
# top = self.documents_by_terms(column_c, sep_c)
if len(top_c) > top_n:
top_c = top_c[0:top_n][top_c.columns[0]].tolist()
selected = [True if row[1] in top_c else False for idx, row in result.iterrows()]
result = result[selected]
result = _minmax(result, minmax)
## collects the references
result['ID'] = None
for idx, row in result.iterrows():
term0 = row[0]
term1 = row[1]
selected_IDs = data[(data[column_r] == term0) & (data[column_c] == term1)]['ID']
if len(selected_IDs):
result.at[idx, 'ID'] = selected_IDs.tolist()
result.index = list(range(len(result)))
## counts the number of ddcuments only in the results matrix -----------------------
result = Result(result, call='citations_by_year')
result._add_count_to_label(column_r)
result._add_count_to_label(column_c)
return result
#----------------------------------------------------------------------------------------------
def coverage(self):
"""Counts the number of None values per column.
Returns:
Pandas DataFrame.
>>> from techminer.datasets import load_test_cleaned
>>> rdf = load_test_cleaned().data
>>> rdf.coverage()
Field Number of items Coverage (%)
0 Authors 144 100.00%
1 Author(s) ID 144 100.00%
2 Title 144 100.00%
3 Year 144 100.00%
4 Source title 144 100.00%
5 Volume 97 67.36%
6 Issue 27 18.75%
7 Art. No. 49 34.03%
8 Page start 119 82.64%
9 Page end 119 82.64%
10 Page count 0 0.00%
11 Cited by 68 47.22%
12 DOI 133 92.36%
13 Affiliations 143 99.31%
14 Document Type 144 100.00%
15 Access Type 16 11.11%
16 Source 144 100.00%
17 EID 144 100.00%
18 Abstract 144 100.00%
19 Author Keywords 124 86.11%
20 Index Keywords 123 85.42%
21 References 137 95.14%
22 keywords 144 100.00%
23 CONF 144 100.00%
24 keywords (cleaned) 144 100.00%
25 SELECTED 144 100.00%
26 ID 144 100.00%
"""
result = pd.DataFrame({
'Field': self.columns,
'Number of items': [len(self) - self[col].isnull().sum() for col in self.columns],
'Coverage (%)': [ '{:5.2%}'.format((len(self) - self[col].isnull().sum()) / len(self)) for col in self.columns]
})
return result
#----------------------------------------------------------------------------------------------
def cross_corr(self, column_r, column_c=None, sep_r=None, sep_c=None, top_n=20, cut_value=0):
"""Computes the crosscorrelation among items in two different columns of the dataframe.
>>> from techminer.datasets import load_test_cleaned
>>> rdf = load_test_cleaned().data
>>> rdf.cross_corr(column_r='Authors', sep_r=',', column_c='Author Keywords', sep_c=';', top_n=5)
Authors Author Keywords Crosscorrelation ID
0 Yan X. [3] Financial time series [7] 0.218218 [[*13*]]
1 Hernandez G. [3] Deep learning [34] 0.198030 [[*94*], [*100*]]
2 Zhang G. [4] Financial time series [7] 0.188982 [[*119*]]
3 Zhang G. [4] Deep learning [34] 0.171499 [[*27*], [*117*]]
4 Zhang G. [4] Deep Learning [10] 0.158114 [[*78*]]
5 Wang J. [7] Deep learning [34] 0.140028 [[*3*], [*87*]]
6 Wang J. [7] Deep Learning [10] 0.129099 [[*15*]]
7 Yan X. [3] Deep learning [34] 0.099015 [[*13*]]
8 Hernandez G. [3] LSTM [18] 0.000000 None
9 Tefas A. [3] Recurrent neural network [8] 0.000000 None
10 Tefas A. [3] Deep Learning [10] 0.000000 None
11 Tefas A. [3] LSTM [18] 0.000000 None
12 Tefas A. [3] Deep learning [34] 0.000000 None
13 Hernandez G. [3] Financial time series [7] 0.000000 None
14 Hernandez G. [3] Recurrent neural network [8] 0.000000 None
15 Hernandez G. [3] Deep Learning [10] 0.000000 None
16 Yan X. [3] Deep Learning [10] 0.000000 None
17 Yan X. [3] Recurrent neural network [8] 0.000000 None
18 Wang J. [7] LSTM [18] 0.000000 None
19 Yan X. [3] LSTM [18] 0.000000 None
20 Zhang G. [4] Recurrent neural network [8] 0.000000 None
21 Zhang G. [4] LSTM [18] 0.000000 None
22 Wang J. [7] Financial time series [7] 0.000000 None
23 Wang J. [7] Recurrent neural network [8] 0.000000 None
24 Tefas A. [3] Financial time series [7] 0.000000 None
"""
if column_r == column_c:
sep_c = None
column_c = None
tdf_r = self.tdf(column_r, sep_r, top_n)
if column_c is not None:
tdf_c = self.tdf(column_c, sep_c, top_n)
else:
tdf_c = tdf_r.copy()
if column_c is not None:
col0 = column_r
col1 = column_c
col2 = 'Crosscorrelation'
else:
col0 = column_r + ' (row)'
col1 = column_r + ' (col)'
col2 = 'Autocorrelation'
terms_r = tdf_r.columns.tolist()
terms_c = tdf_c.columns.tolist()
result = pd.DataFrame({
col0 : [None] * (len(terms_r) * len(terms_c)),
col1 : [None] * (len(terms_r) * len(terms_c)),
col2 : [0.0] * (len(terms_r) * len(terms_c))
})
result['ID'] = None
idx = 0
for a in terms_r:
for b in terms_c:
s1 = tdf_r[a]
s2 = tdf_c[b]
num = np.sum((s1 * s2))
den = np.sqrt(np.sum(s1**2) * np.sum(s2**2))
value = num / den
result.at[idx, col0] = a
result.at[idx, col1] = b
result.at[idx, col2] = value
selected_IDs = self[(s1 > 0) & (s2 > 0)]['ID']
if len(selected_IDs):
result.at[idx, 'ID'] = selected_IDs.tolist()
idx += 1
#result = result.style.format('{0:.4}')
## cluster computation -------------------------------------------------------------------
## number of clusters
mtx = Result(result.copy(), call='cross_corr')
mtx = mtx.tomatrix()
mtx = mtx.applymap(lambda x: 1 if x > 0 else 0)
mtx = mtx.transpose()
mtx = mtx.drop_duplicates()
mtx = mtx.transpose()
clusters = mtx.columns
## dataframe with relationships among items
map_cluster = []
map_from = []
map_to = []
map_similariry = []
map_color = []
norm = colors.Normalize(vmin=0, vmax=len(clusters))
cmap = cm.get_cmap('tab20')
## similarity computation
for idx_cluster, cluster_term in enumerate(clusters):
## terms in r selected in the current cluster
cluster_index = mtx.index[mtx[cluster_term] > 0]
for idx0_r, term0_r in enumerate(cluster_index):
for idx1_r, term1_r in enumerate(cluster_index):
if idx1_r <= idx0_r:
continue
## docs related to term0 and term1
idx = (tdf_r[term0_r] > 0) | (tdf_r[term1_r] > 0)
tdf_similarity = tdf_c[ (idx) & (tdf_c[cluster_term] > 0)]
jaccard = 0.0
n_jaccard = 0
for idx_i, doc_i in tdf_similarity.iterrows():
for idx_j, doc_j in tdf_similarity.iterrows():
if idx_i == idx_j:
break
terms_i = doc_i.tolist()
terms_j = doc_j.tolist()
intersection = [i*j for i, j in zip(terms_i, terms_j)]
len_i = sum(terms_i)
len_j = sum(terms_j)
len_c = sum(intersection)
jaccard += float(len_c) / (len_i + len_j - len_c)
n_jaccard += 1
if n_jaccard == 0:
jaccard = 1.0
else:
jaccard = jaccard / n_jaccard
map_cluster += [cluster_term]
map_from += [term0_r]
map_to += [term1_r]
map_similariry += [jaccard]
map_color += [cmap(norm(idx_cluster))]
map_data = pd.DataFrame({
'cluster' : map_cluster,
'from_node' : map_from,
'to_node' : map_to,
'similarity' : map_similariry,
'color' : map_color
})
map_data = map_data.drop_duplicates(subset=['from_node', 'to_node'])
## end -----------------------------------------------------------------------------------
## line style for diagrams ---------------------------------------------------------------
map_data['linewidth'] = None
map_data['linestyle'] = None
for idx, row in map_data.iterrows():
if row[3] >= 0.75:
map_data.at[idx, 'linewidth'] = 4
map_data.at[idx, 'linestyle'] = '-'
elif row[3] >= 0.50:
map_data.at[idx, 'linewidth'] = 2
map_data.at[idx, 'linestyle'] = '-'
elif row[3] >= 0.25:
map_data.at[idx, 'linewidth'] = 2
map_data.at[idx, 'linestyle'] = '--'
elif row[3] < 0.25:
map_data.at[idx, 'linewidth'] = 1
map_data.at[idx, 'linestyle'] = ':'
else:
map_data.at[idx, 'linewidth'] = 0
map_data.at[idx, 'linestyle'] = '-'
## end -----------------------------------------------------------------------------------
## adds number of records to columns -----------------------------------------------------
num = self.documents_by_terms(column_r, sep_r)
new_names = {}
for idx, row in num.iterrows():
old_name = row[0]
new_name = old_name + ' [' + str(row[1]) + ']'
new_names[old_name] = new_name
result[col0] = result[col0].map(lambda x: new_names[x])
## >>> adds number of records to cluster nodes ------------------------------------------------
map_data['from_node'] = map_data['from_node'].map(lambda x: new_names[x])
map_data['to_node'] = map_data['to_node'].map(lambda x: new_names[x])
## <<< end ------------------------------------------------------------------------------------
if column_c is not None:
num = self.documents_by_terms(column_c, sep_c)
new_names = {}
for idx, row in num.iterrows():
old_name = row[0]
new_name = old_name + ' [' + str(row[1]) + ']'
new_names[old_name] = new_name
result[col1] = result[col1].map(lambda x: new_names[x])
## end ------------------------------------------------------------------------------------
result = result.sort_values(col2, ascending=False)
result.index = list(range(len(result)))
return Result(result, call='cross_corr', cluster_data=map_data)
#----------------------------------------------------------------------------------------------
def documents_by_terms(self, column, sep=None, top_n=None, minmax=None):
"""Computes the number of documents per term in a given column.
>>> from techminer.datasets import load_test_cleaned
>>> rdf = load_test_cleaned().data
>>> rdf.documents_by_terms('Authors', sep=',').head(5)
Authors Num Documents ID
0 Wang J. 7 [[*3*], [*10*], [*15*], [*80*], [*87*], [*128*...
1 Zhang G. 4 [[*27*], [*78*], [*117*], [*119*]]
2 Yan X. 3 [[*13*], [*44*], [*85*]]
3 Hernandez G. 3 [[*52*], [*94*], [*100*]]
4 Tefas A. 3 [[*8*], [*110*], [*114*]]
"""
# computes the number of documents by term
data = self[[column, 'ID']].dropna()
data = _expand_column(data, column, sep)
numdocs = data.groupby(column, as_index=False).size()
## dataframe with results
result = pd.DataFrame({
column : numdocs.index,
'Num Documents' : numdocs.tolist()
})
result = result.sort_values(by='Num Documents', ascending=False)
result.index = result[column]
## compute top_n terms
if top_n is not None and len(result) > top_n:
result = result.head(top_n)
result = _minmax(result, minmax)
result['ID'] = None
for current_term in result[result.columns[0]]:
selected_IDs = data[data[column] == current_term]['ID']
if len(selected_IDs):
result.at[current_term,'ID'] = selected_IDs.tolist()
result.index = list(range(len(result)))
return Result(result, call='documents_by_terms')
#----------------------------------------------------------------------------------------------
def documents_by_year(self, cumulative=False):
"""Computes the number of documents per year.
>>> from techminer.datasets import load_test_cleaned
>>> rdf = load_test_cleaned().data
>>> rdf.documents_by_year().head()
Year Num Documents ID
0 2010 3 [[*141*], [*142*], [*143*]]
1 2011 2 [[*139*], [*140*]]
2 2012 2 [[*137*], [*138*]]
3 2013 4 [[*133*], [*134*], [*135*], [*136*]]
4 2014 2 [[*131*], [*132*]]
>>> rdf.documents_by_year(cumulative=True).head()
Year Num Documents ID
0 2010 3 [[*141*], [*142*], [*143*]]
1 2011 5 [[*139*], [*140*]]
2 2012 7 [[*137*], [*138*]]
3 2013 11 [[*133*], [*134*], [*135*], [*136*]]
4 2014 13 [[*131*], [*132*]]
"""
## number of documents by year
numdocs = self.groupby('Year')[['Year']].count()
## dataframe with results
result = self._years_list()
result = result.to_frame()
result['Year'] = result.index
result['Num Documents'] = 0
result.at[numdocs.index.tolist(), 'Num Documents'] = numdocs['Year'].tolist()
result.index = result['Year']
if cumulative is True:
result['Num Documents'] = result['Num Documents'].cumsum()
result['ID'] = None
for current_term in result['Year']:
selected_IDs = self[self['Year'] == current_term]['ID']
if len(selected_IDs):
result.at[current_term,'ID'] = selected_IDs.tolist()
result.index = list(range(len(result)))
return Result(result, call='documents_by_year')
#----------------------------------------------------------------------------------------------
def factor_analysis(self, column, sep=None, n_components=None, top_n=10):
"""Computes the matrix of factors for terms in a given column.
>>> from techminer.datasets import load_test_cleaned
>>> rdf = load_test_cleaned().data
>>> rdf.factor_analysis(
... column='Authors',
... sep=',',
... n_components=5,
... top_n=40).tomatrix().head(5) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
F0 F1 F2 F3 F4
Wang J. [7] -0.025355 -0.040096 -0.695298 0.624399 0.223693
Zhang G. [4] -0.017487 -0.028221 0.452656 0.559642 -0.167425
Yan X. [3] -0.010094 -0.013940 -0.057740 -0.006361 -0.018068
Hernandez G. [3] -0.029542 0.489821 0.002071 0.023022 0.012202
Tefas A. [3] 0.425717 0.023641 0.001237 0.014415 0.007825
"""
tdf = self.tdf(column, sep, top_n)
terms = tdf.columns.tolist()
if n_components is None:
n_components = int(math.sqrt(len(set(terms))))
pca = PCA(n_components=n_components)
values = np.transpose(pca.fit(X=tdf.values).components_)
cols = [['F'+str(i) for i in range(n_components)] for k in range(len(terms))]
rows = [[t for n in range(n_components) ] for t in terms]
values = [values[i,j] for i in range(len(terms)) for j in range(n_components)]
cols = [e for row in cols for e in row]
rows = [e for row in rows for e in row]
result = pd.DataFrame({
column : rows,
'Factor' : cols,
'value' : values})
## cluster computation -------------------------------------------------------------------
tdf_r = self.tdf(column, sep, top_n)
tdf_c = tdf_r
## number of clusters
mtx = Result(result.copy(), call='factor_analysis')
mtx = mtx.tomatrix()
mtx = mtx.applymap(lambda x: 1 if x > 0 else 0)
mtx = mtx.transpose()
mtx = mtx.drop_duplicates()
mtx = mtx.transpose()
clusters = mtx.columns
## dataframe with relationships among items
map_cluster = []
map_from = []
map_to = []
map_similariry = []
map_color = []
norm = colors.Normalize(vmin=0, vmax=len(clusters))
cmap = cm.get_cmap('tab20')
## similarity computation
for idx_cluster, cluster_term in enumerate(clusters):
## terms in r selected in the current cluster
cluster_index = mtx.index[mtx[cluster_term] > 0]
for idx0_r, term0_r in enumerate(cluster_index):
for idx1_r, term1_r in enumerate(cluster_index):
if idx1_r <= idx0_r:
continue
## docs related to term0 and term1
idx = (tdf_r[term0_r] > 0) | (tdf_r[term1_r] > 0)
tdf_similarity = tdf_c[idx]
jaccard = 0.0
n_jaccard = 0
for idx_i, doc_i in tdf_similarity.iterrows():
for idx_j, doc_j in tdf_similarity.iterrows():
if idx_i == idx_j:
break
terms_i = doc_i.tolist()
terms_j = doc_j.tolist()
intersection = [i*j for i, j in zip(terms_i, terms_j)]
len_i = sum(terms_i)
len_j = sum(terms_j)
len_c = sum(intersection)
jaccard += float(len_c) / (len_i + len_j - len_c)
n_jaccard += 1
if n_jaccard == 0:
jaccard = 1.0
else:
jaccard = jaccard / n_jaccard
map_cluster += [cluster_term]
map_from += [term0_r]
map_to += [term1_r]
map_similariry += [jaccard]
map_color += [cmap(norm(idx_cluster))]
map_data = pd.DataFrame({
'cluster' : map_cluster,
'from_node' : map_from,
'to_node' : map_to,
'similarity' : map_similariry,
'color' : map_color
})
map_data = map_data.drop_duplicates(subset=['from_node', 'to_node'])
## end -----------------------------------------------------------------------------------
## line style for diagrams ---------------------------------------------------------------
map_data['linewidth'] = None
map_data['linestyle'] = None
for idx, row in map_data.iterrows():
if row[3] >= 0.75:
map_data.at[idx, 'linewidth'] = 4
map_data.at[idx, 'linestyle'] = '-'
elif row[3] >= 0.50:
map_data.at[idx, 'linewidth'] = 2
map_data.at[idx, 'linestyle'] = '-'
elif row[3] >= 0.25:
map_data.at[idx, 'linewidth'] = 2
map_data.at[idx, 'linestyle'] = '--'
elif row[3] < 0.25:
map_data.at[idx, 'linewidth'] = 1
map_data.at[idx, 'linestyle'] = ':'
else:
map_data.at[idx, 'linewidth'] = 0
map_data.at[idx, 'linestyle'] = '-'
## end -----------------------------------------------------------------------------------
## adds number of records to columns
num = self.documents_by_terms(column, sep)
new_names = {}
for idx, row in num.iterrows():
old_name = row[0]
new_name = old_name + ' [' + str(row[1]) + ']'
new_names[old_name] = new_name
result[column] = result[column].map(lambda x: new_names[x])
## end
## >>> adds number of records to cluster nodes ------------------------------------------------
map_data['from_node'] = map_data['from_node'].map(lambda x: new_names[x])
map_data['to_node'] = map_data['to_node'].map(lambda x: new_names[x])
## <<< end ------------------------------------------------------------------------------------
return Result(result, call='factor_analysis', cluster_data=map_data)
#----------------------------------------------------------------------------------------------
def generate_ID(self):
"""Generates a unique ID for each document.
"""
self['ID'] = [ '[*'+str(x)+ '*]' for x in range(len(self))]
self.index = self['ID']
return self
#----------------------------------------------------------------------------------------------
def get_records_by_IDs(self, IDs):
"""Extracts records using the ID number.
"""
result = None
for ID in IDs:
rdf = self[self['ID'] == ID].copy()
if result is None:
result = rdf
else:
result = result.append(rdf)
return result
#----------------------------------------------------------------------------------------------
def most_cited_documents(self, top_n=10, minmax=None):
""" Returns the top N most cited documents and citations > min_value .
Args:
top_n (int) : number of documents to be returned.
minmax ((int, int) tuple) : minimal number of citations
Results:
pandas.DataFrame
>>> from techminer.datasets import load_test_cleaned
>>> rdf = load_test_cleaned().data
>>> rdf.most_cited_documents(top_n=5)[['Authors', 'Title']]
Authors Title
140 Hsieh T.-J., Hsiao H.-F., Yeh W.-C. Forecasting stock markets using wavelet transf...
62 Fischer T., Krauss C. Deep learning with long short-term memory netw...
139 Ghazali R., Hussain A.J., Liatsis P. Dynamic Ridge Polynomial Neural Network: Forec...
124 Akita R., Yoshihara A., Matsubara T., Uehara K. Deep learning for stock prediction using numer...
134 Sharma V., Srinivasan D. A hybrid intelligent model based on recurrent ...
"""
result = self.sort_values(by='Cited by', ascending=False)
if top_n is not None and len(result) > top_n:
result = result[0:top_n]
if minmax is not None:
minval, maxval = minmax
result = result[ result['Cited by'] >= minval ]
result = result[ result['Cited by'] <= maxval ]
return result[['Title', 'Authors', 'Year', 'Cited by', 'ID']]
#----------------------------------------------------------------------------------------------
@property
def num_of_sources(self):
"""Returns the number of source titles in the dataframe.
>>> from techminer.datasets import load_test_cleaned
>>> rdf = load_test_cleaned().data
>>> rdf.num_of_sources
103
"""
return len(self['Source title'].unique())
#----------------------------------------------------------------------------------------------
def ocurrence(self, column, sep=None, top_n=None, minmax=None):
"""
>>> from techminer.datasets import load_test_cleaned
>>> rdf = load_test_cleaned().data
>>> rdf.ocurrence(column='Authors', sep=',', top_n=10)
Authors (row) Authors (col) Num Documents ID
0 Ar\xe9valo A. [9] Ar\xe9valo A. [9] 3 [[*52*], [*94*], [*100*]]
1 Ar\xe9valo A. [9] Hernandez G. [9] 3 [[*52*], [*94*], [*100*]]
2 Ar\xe9valo A. [9] Sandoval J. [9] 3 [[*52*], [*94*], [*100*]]
3 Hernandez G. [9] Ar\xe9valo A. [9] 3 [[*52*], [*94*], [*100*]]
4 Hernandez G. [9] Hernandez G. [9] 3 [[*52*], [*94*], [*100*]]
5 Hernandez G. [9] Sandoval J. [9] 3 [[*52*], [*94*], [*100*]]
6 Iosifidis A. [6] Iosifidis A. [6] 3 [[*8*], [*110*], [*114*]]
7 Iosifidis A. [6] Tefas A. [6] 3 [[*8*], [*110*], [*114*]]
8 Sandoval J. [9] Ar\xe9valo A. [9] 3 [[*52*], [*94*], [*100*]]
9 Sandoval J. [9] Hernandez G. [9] 3 [[*52*], [*94*], [*100*]]
10 Sandoval J. [9] Sandoval J. [9] 3 [[*52*], [*94*], [*100*]]
11 Tefas A. [6] Iosifidis A. [6] 3 [[*8*], [*110*], [*114*]]
12 Tefas A. [6] Tefas A. [6] 3 [[*8*], [*110*], [*114*]]
13 Wang J. [9] Wang J. [9] 9 [[*3*], [*10*], [*15*], [*80*], [*87*], [*128*...
14 Wu J. [3] Wu J. [3] 3 [[*34*], [*66*], [*115*]]
15 Yan X. [3] Yan X. [3] 3 [[*13*], [*44*], [*85*]]
16 Zhang G. [4] Zhang G. [4] 4 [[*27*], [*78*], [*117*], [*119*]]
17 Zhang Y. [3] Zhang Y. [3] 3 [[*4*], [*6*], [*109*]]
"""
## computes the number of documents by term by term
data = self[[column, 'ID']].dropna()
data.columns = [column + ' (row)', 'ID']
data[column + ' (col)'] = data[column + ' (row)'].copy()
top = self.documents_by_terms(column, sep)
column_r = column + ' (row)'
column_c = column + ' (col)'
data = data[[column_r, column_c, 'ID']]
data = _expand_column(data, column_r, sep)
data = _expand_column(data, column_c, sep)
## number of documents
numdocs = data.groupby(by=[column_r, column_c]).size()
## results dataframe
a = [t for t,_ in numdocs.index]
b = [t for _,t in numdocs.index]
result = pd.DataFrame({
column_r : a,
column_c : b,
'Num Documents' : numdocs.tolist()
})
## compute top_n terms
if top_n is not None:
## rows
# top = self.documents_by_terms(column_r, sep_r)
if len(top) > top_n:
top = top[0:top_n][top.columns[0]].tolist()
selected = [True if row[0] in top else False for idx, row in result.iterrows()]
result = result[selected]
selected = [True if row[1] in top else False for idx, row in result.iterrows()]
result = result[selected]
result = _minmax(result, minmax)
## collects the references
result['ID'] = None
for idx, row in result.iterrows():
term0 = row[0]
term1 = row[1]
selected_IDs = data[(data[column_r] == term0) & (data[column_c] == term1)]['ID']
if len(selected_IDs):
result.at[idx, 'ID'] = selected_IDs.tolist()
result.index = list(range(len(result)))
## counts the number of ddcuments only in the results matrix -----------------------
result = Result(result, call='ocurrence')
result._add_count_to_label(column_r)
result._add_count_to_label(column_c)
return result
#----------------------------------------------------------------------------------------------
def tdf(self, column, sep, top_n=20):
"""
>>> from techminer.datasets import load_test_cleaned
>>> rdf = load_test_cleaned().data
>>> rdf.tdf('Authors', sep=',', top_n=5).head() # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
Wang J. Zhang G. Yan X. Hernandez G. Tefas A.
0 0.0 0.0 0.0 0.0 0.0
1 0.0 0.0 0.0 0.0 0.0
2 0.0 0.0 0.0 0.0 0.0
3 1.0 0.0 0.0 0.0 0.0
4 0.0 0.0 0.0 0.0 0.0
"""
## computa los N terminos mas frecuentes
x = self.documents_by_terms(column, sep=sep)
terms = x[x.columns[0]].tolist()
if top_n is not None and len(terms) > top_n:
terms = terms[0:top_n]
tdf = pd.DataFrame(
data = np.zeros((len(self), len(terms))),
columns = terms,
index = self.index)
for idx in self.index:
txt = self.loc[idx, column]
if txt is not None:
if sep is not None:
txt = [t.strip() for t in txt.split(sep)]
else:
txt = [txt.strip()]
for t in txt:
if t in terms:
tdf.at[idx, t] = 1
return tdf
#----------------------------------------------------------------------------------------------
def terms_by_terms_by_year(self, column_r, column_c, sep_r=None, sep_c=None, top_n=None, minmax=None):
"""
>>> from techminer.datasets import load_test_cleaned
>>> rdf = load_test_cleaned().data
>>> rdf.terms_by_terms_by_year(column_r='Authors', sep_r=',', column_c='Author Keywords', sep_c=';', top_n=5)
Authors Author Keywords Year Num Documents ID
519 Hernandez G. [2] Deep learning [7] 2018 [4] 2 [[*94*], [*100*]]
1582 Wang J. [3] Deep Learning [2] 2019 [5] 1 [[*15*]]
1583 Wang J. [3] Deep learning [7] 2018 [4] 1 [[*87*]]
1584 Wang J. [3] Deep learning [7] 2019 [5] 1 [[*3*]]
1741 Yan X. [2] Deep learning [7] 2019 [5] 1 [[*13*]]
1745 Yan X. [2] Financial time series [2] 2019 [5] 1 [[*13*]]
1853 Zhang G. [4] Deep Learning [2] 2018 [4] 1 [[*78*]]
1854 Zhang G. [4] Deep learning [7] 2017 [2] 1 [[*117*]]
1855 Zhang G. [4] Deep learning [7] 2019 [5] 1 [[*27*]]
1856 Zhang G. [4] Financial time series [2] 2017 [2] 1 [[*119*]]
"""
## computes the number of documents by term by term
data = self[[column_r, column_c, 'Year', 'ID']].dropna()
data = _expand_column(data, column_r, sep_r)
data = _expand_column(data, column_c, sep_c)
numdocs = data.groupby(by=[column_r, column_c, 'Year']).size()
## results dataframe
a = [t for t,_,_ in numdocs.index]
b = [t for _,t,_ in numdocs.index]
y = [t for _,_,t in numdocs.index]
result = pd.DataFrame({
column_r : a,
column_c : b,
'Year' : y,
'Num Documents' : numdocs.tolist()
})
## compute top_n terms
if top_n is not None:
## rows
top = self.documents_by_terms(column_r, sep_r)
if len(top) > top_n:
top = top[0:top_n][column_r].tolist()
selected = [True if row[0] in top else False for idx, row in result.iterrows()]
result = result[selected]
## cols
top = self.documents_by_terms(column_c, sep_c)
if len(top) > top_n:
top = top[0:top_n][column_c].tolist()
selected = [True if row[1] in top else False for idx, row in result.iterrows()]
result = result[selected]
result = _minmax(result, minmax)
result['ID'] = None
for idx, row in result.iterrows():
term0 = row[0]
term1 = row[1]
term2 = row[2]
selected_IDs = data[
(data[column_r] == term0) & (data[column_c] == term1) & (data['Year'] == term2)
]['ID']
if len(selected_IDs):
result.at[idx, 'ID'] = selected_IDs.tolist()
## counts the number of ddcuments only in the results matrix -----------------------
result = Result(result, call='terms_by_terms_by_year')
result._add_count_to_label(column_r)
result._add_count_to_label(column_c)
result._add_count_to_label('Year')
return result
#----------------------------------------------------------------------------------------------
def terms_by_year(self, column, sep=None, top_n=None, minmax=None):
"""
>>> from techminer.datasets import load_test_cleaned
>>> rdf = load_test_cleaned().data
>>> rdf.terms_by_year(column='Author Keywords', sep=';', top_n=5).head()
Author Keywords Year Num Documents ID
0 Deep Learning [10] 2018 [37] 6 [[*54*], [*78*], [*79*], [*86*], [*95*], [*97*]]
1 Deep Learning [10] 2019 [27] 4 [[*15*], [*23*], [*26*], [*36*]]
2 Deep learning [34] 2013 [3] 1 [[*134*]]
3 Deep learning [34] 2016 [2] 1 [[*125*]]
4 Deep learning [34] 2017 [7] 2 [[*117*], [*120*]]
>>> rdf.terms_by_year('Author Keywords', minmax=(2,3), sep=';').head()
Author Keywords Year Num Documents ID
0 ARIMA [2] 2017 [13] 2 [[*115*], [*122*]]
1 CNN [4] 2018 [47] 2 [[*72*], [*89*]]
2 CNN [4] 2019 [33] 2 [[*18*], [*50*]]
3 Convolutional Neural Networks [2] 2018 [47] 2 [[*78*], [*79*]]
4 Convolutional neural network [4] 2018 [47] 2 [[*64*], [*77*]]
>>> rdf.terms_by_year('Author Keywords', top_n=3, minmax=(1,3), sep=';').head()
Author Keywords Year Num Documents ID
0 Deep learning [4] 2013 [3] 1 [[*134*]]
1 Deep learning [4] 2016 [1] 1 [[*125*]]
2 Deep learning [4] 2017 [5] 2 [[*117*], [*120*]]
3 LSTM [6] 2013 [3] 2 [[*133*], [*135*]]
4 LSTM [6] 2015 [1] 1 [[*130*]]
"""
## computes the number of documents by year
data = self[[column, 'Year', 'ID']].dropna()
data = _expand_column(data, column, sep)
numdocs = data.groupby(by=[column, 'Year'], as_index=False).size()
## dataframe with results
idx_term = [t for t,_ in numdocs.index]
idx_year = [t for _,t in numdocs.index]
result = pd.DataFrame({
column : idx_term,
'Year' : idx_year,
'Num Documents' : numdocs.tolist()
})
## compute top_n terms
if top_n is not None:
top = self.documents_by_terms(column, sep)
if len(top) > top_n:
top = top[0:top_n][column].tolist()
selected = [True if row[0] in top else False for idx, row in result.iterrows()]
result = result[selected]
result = _minmax(result, minmax)
result['ID'] = None
for idx, row in result.iterrows():
current_term = row[0]
year = row[1]
selected_IDs = data[(data[column] == current_term) & (data['Year'] == year)]['ID']
if len(selected_IDs):
result.at[idx, 'ID'] = selected_IDs.tolist()
result.index = list(range(len(result)))
## adds the number of documents to text ---------------------------------------------------
result = Result(result, call='terms_by_year')
result._add_count_to_label(column)
result._add_count_to_label('Year')
return result
|
{"hexsha": "6f2068bb92add8742eb2b1ff72d33d359e0c2bcf", "size": 60709, "ext": "py", "lang": "Python", "max_stars_repo_path": "techminer/dataframe.py", "max_stars_repo_name": "jdvelasq/tech-miner", "max_stars_repo_head_hexsha": "85735b3b94b9d56784eafce73c7f9bee37d8c6ed", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-03T11:10:35.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-03T11:10:35.000Z", "max_issues_repo_path": "techminer/dataframe.py", "max_issues_repo_name": "jdvelasq/tech-miner", "max_issues_repo_head_hexsha": "85735b3b94b9d56784eafce73c7f9bee37d8c6ed", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "techminer/dataframe.py", "max_forks_repo_name": "jdvelasq/tech-miner", "max_forks_repo_head_hexsha": "85735b3b94b9d56784eafce73c7f9bee37d8c6ed", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.4103877103, "max_line_length": 123, "alphanum_fraction": 0.4179446211, "include": true, "reason": "import numpy,from scipy,import networkx", "num_tokens": 14653}
|
[STATEMENT]
lemma wf_juncts:
"wf idx \<phi> \<longleftrightarrow> (\<forall>\<psi> \<in> disjuncts \<phi>. wf idx \<psi>)"
"wf idx \<phi> \<longleftrightarrow> (\<forall>\<psi> \<in> conjuncts \<phi>. wf idx \<psi>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. local.wf idx \<phi> = (\<forall>\<psi>\<in>disjuncts \<phi>. local.wf idx \<psi>) &&& local.wf idx \<phi> = (\<forall>\<psi>\<in>conjuncts \<phi>. local.wf idx \<psi>)
[PROOF STEP]
by (induct \<phi>) auto
|
{"llama_tokens": 189, "file": "Formula_Derivatives_Abstract_Formula", "length": 1}
|
"""
One-moment bulk microphysics scheme, which includes:
- terminal velocity of precipitation
- condensation and evaporation of cloud liquid water and
deposition and sublimation of cloud ice (relaxation to equilibrium)
- autoconversion of cloud liquid water into rain and of cloud ice into snow
- accretion due to collisions between categories of condensed species
- evaporation and sublimation of hydrometeors
- melting of snow into rain
"""
module Microphysics_1M
import SpecialFunctions
import Thermodynamics
import CloudMicrophysics
import CLIMAParameters
const SF = SpecialFunctions
const TD = Thermodynamics
const CO = CloudMicrophysics.Common
const CP = CLIMAParameters
const CP_planet = CLIMAParameters.Planet
const CP_micro = CLIMAParameters.Atmos.Microphysics
const APS = CP.AbstractParameterSet
# Additional type hierarchy to dispatch over for some microphysics parameters
"""
AbstractCloudType
The top-level super-type for cloud liquid water and cloud ice types
"""
abstract type AbstractCloudType end
"""
AbstractPrecipType
The top-level super-type for precipitation types (rain and snow)
"""
abstract type AbstractPrecipType end
"""
LiquidType
The type for cloud liquid water condensate
"""
struct LiquidType <: AbstractCloudType end
"""
IceType
The type for cloud ice condensate
"""
struct IceType <: AbstractCloudType end
"""
RainType
The type for rain
"""
struct RainType <: AbstractPrecipType end
"""
SnowType
The type for snow
"""
struct SnowType <: AbstractPrecipType end
E(param_set::APS, ::LiquidType, ::RainType) = CP_micro.E_liq_rai(param_set::APS)
E(param_set::APS, ::LiquidType, ::SnowType) = CP_micro.E_liq_sno(param_set::APS)
E(param_set::APS, ::IceType, ::RainType) = CP_micro.E_ice_rai(param_set::APS)
E(param_set::APS, ::IceType, ::SnowType) = CP_micro.E_ice_sno(param_set::APS)
E(param_set::APS, ::RainType, ::SnowType) = CP_micro.E_rai_sno(param_set::APS)
E(param_set::APS, ::SnowType, ::RainType) = CP_micro.E_rai_sno(param_set::APS)
export τ_relax
export terminal_velocity
export conv_q_vap_to_q_liq_ice
export conv_q_liq_to_q_rai
export conv_q_ice_to_q_sno
export accretion
export accretion_rain_sink
export accretion_snow_rain
export evaporation_sublimation
export snow_melt
"""
v0_rai(param_set, ρ)
- `param_set` - abstract set with Earth parameters
- `ρ` air density
Returns the proportionality coefficient in terminal velocity(r/r0).
"""
function v0_rai(param_set::APS, ρ::FT) where {FT <: Real}
_ρ_cloud_liq::FT = CP_planet.ρ_cloud_liq(param_set)
_C_drag::FT = CP_micro.C_drag(param_set)
_grav::FT = CP_planet.grav(param_set)
_r0_rai::FT = CP_micro.r0_rai(param_set)
return sqrt(
FT(8 / 3) / _C_drag * (_ρ_cloud_liq / ρ - FT(1)) * _grav * _r0_rai,
)
end
"""
n0_sno(param_set, q_sno, ρ)
- `param_set` - abstract set with Earth parameters
- `q_sno` - snow specific humidity
- `ρ` - air density
Returns the intercept parameter of the assumed Marshall-Palmer distribution of
snow particles.
"""
function n0_sno(param_set::APS, q_sno::FT, ρ::FT) where {FT <: Real}
_ν_sno::FT = CP_micro.ν_sno(param_set)
_μ_sno::FT = CP_micro.μ_sno(param_set)
# TODO this max should be replaced by
# limiting inside a PhasePartition struct for
# precipitation (once it is implemented)
return _μ_sno * (ρ * max(0, q_sno))^_ν_sno
end
"""
unpack_params(param_set, micro, ρ, q_)
- `param_set` - abstract set with Earth parameters
- `micro` - type for cloud ice, rain or snow
- `q_` - specific humidity
- `ρ` - air density
Utility function that unpacks microphysics parameters.
"""
function unpack_params(
param_set::APS,
ice::IceType,
ρ::FT,
q_ice::FT,
) where {FT <: Real}
#TODO - make ρ and q_ice optional
_n0_ice::FT = CP_micro.n0_ice(param_set)
_r0_ice::FT = CP_micro.r0_ice(param_set)
_m0_ice::FT = CP_micro.m0_ice(param_set)
_me_ice::FT = CP_micro.me_ice(param_set)
_χm_ice::FT = CP_micro.χm_ice(param_set)
_Δm_ice::FT = CP_micro.Δm_ice(param_set)
return (_n0_ice, _r0_ice, _m0_ice, _me_ice, _χm_ice, _Δm_ice)
end
function unpack_params(
param_set::APS,
rain::RainType,
ρ::FT,
q_rai::FT,
) where {FT <: Real}
#TODO - make q_rai optional
_n0_rai::FT = CP_micro.n0_rai(param_set)
_r0_rai::FT = CP_micro.r0_rai(param_set)
_m0_rai::FT = CP_micro.m0_rai(param_set)
_me_rai::FT = CP_micro.me_rai(param_set)
_a0_rai::FT = CP_micro.a0_rai(param_set)
_ae_rai::FT = CP_micro.ae_rai(param_set)
_v0_rai::FT = v0_rai(param_set, ρ)
_ve_rai::FT = CP_micro.ve_rai(param_set)
_χm_rai::FT = CP_micro.χm_rai(param_set)
_Δm_rai::FT = CP_micro.Δm_rai(param_set)
_χa_rai::FT = CP_micro.χa_rai(param_set)
_Δa_rai::FT = CP_micro.Δa_rai(param_set)
_χv_rai::FT = CP_micro.χv_rai(param_set)
_Δv_rai::FT = CP_micro.Δv_rai(param_set)
return (
_n0_rai,
_r0_rai,
_m0_rai,
_me_rai,
_χm_rai,
_Δm_rai,
_a0_rai,
_ae_rai,
_χa_rai,
_Δa_rai,
_v0_rai,
_ve_rai,
_χv_rai,
_Δv_rai,
)
end
function unpack_params(
param_set::APS,
snow::SnowType,
ρ::FT,
q_sno::FT,
) where {FT <: Real}
_n0_sno::FT = n0_sno(param_set, q_sno, ρ)
_r0_sno::FT = CP_micro.r0_sno(param_set)
_m0_sno::FT = CP_micro.m0_sno(param_set)
_me_sno::FT = CP_micro.me_sno(param_set)
_a0_sno::FT = CP_micro.a0_sno(param_set)
_ae_sno::FT = CP_micro.ae_sno(param_set)
_v0_sno::FT = CP_micro.v0_sno(param_set)
_ve_sno::FT = CP_micro.ve_sno(param_set)
_χm_sno::FT = CP_micro.χm_sno(param_set)
_Δm_sno::FT = CP_micro.Δm_sno(param_set)
_χa_sno::FT = CP_micro.χa_sno(param_set)
_Δa_sno::FT = CP_micro.Δa_sno(param_set)
_χv_sno::FT = CP_micro.χv_sno(param_set)
_Δv_sno::FT = CP_micro.Δv_sno(param_set)
return (
_n0_sno,
_r0_sno,
_m0_sno,
_me_sno,
_χm_sno,
_Δm_sno,
_a0_sno,
_ae_sno,
_χa_sno,
_Δa_sno,
_v0_sno,
_ve_sno,
_χv_sno,
_Δv_sno,
)
end
"""
lambda(q, ρ, n0, m0, me, r0, χm, Δm)
- `q` - specific humidity of rain, ice or snow
- `ρ` - air density
- `n0` - size distribution parameter
- `m0`, `me`, `χm`, `Δm`, `r0` - mass(radius) parameters
Returns the rate parameter of the assumed size distribution of
particles (rain drops, ice crystals, snow crystals).
"""
function lambda(
q::FT,
ρ::FT,
n0::FT,
m0::FT,
me::FT,
r0::FT,
χm::FT,
Δm::FT,
) where {FT <: Real}
λ::FT = FT(0)
if q > FT(0)
λ =
(
χm * m0 * n0 * SF.gamma(me + Δm + FT(1)) / ρ / q / r0^(me + Δm)
)^FT(1 / (me + Δm + 1))
end
return λ
end
"""
τ_relax(param_set, liquid)
τ_relax(param_set, ice)
- `param_set` - abstract set with Earth parameters
- `liquid` - a type for cloud liquid water
- `ice` - a type for cloud ice
Returns the relaxation timescale for condensation and evaporation of
cloud liquid water or the relaxation timescale for sublimation and
deposition of cloud ice.
"""
function τ_relax(param_set::APS, liquid::LiquidType)
_τ_relax = CP_micro.τ_cond_evap(param_set)
return _τ_relax
end
function τ_relax(param_set::APS, ice::IceType)
_τ_relax = CP_micro.τ_sub_dep(param_set)
return _τ_relax
end
"""
terminal_velocity(param_set, precip, ρ, q_)
- `param_set` - abstract set with Earth parameters
- `precip` - a type for rain or snow
- `ρ` - air density
- `q_` - rain or snow specific humidity
Returns the mass weighted average terminal velocity assuming
a Marshall-Palmer (1948) distribution of rain drops and snow crystals.
"""
function terminal_velocity(
param_set::APS,
precip::AbstractPrecipType,
ρ::FT,
q_::FT,
) where {FT <: Real}
fall_w = FT(0)
if q_ > FT(0)
(_n0, _r0, _m0, _me, _χm, _Δm, _a0, _ae, _χa, _Δa, _v0, _ve, _χv, _Δv) =
unpack_params(param_set, precip, ρ, q_)
_λ::FT = lambda(q_, ρ, _n0, _m0, _me, _r0, _χm, _Δm)
fall_w =
_χv *
_v0 *
(_λ * _r0)^(-_ve - _Δv) *
SF.gamma(_me + _ve + _Δm + _Δv + FT(1)) /
SF.gamma(_me + _Δm + FT(1))
end
return fall_w
end
"""
conv_q_vap_to_q_liq_ice(param_set, liquid, q_sat, q)
conv_q_vap_to_q_liq_ice(param_set, ice, q_sat, q)
- `param_set` - abstract set with Earth parameters
- `liquid` - a type for cloud water
- `ice` - a type for cloud ice
- `q_sat` - PhasePartition at equilibrium
- `q` - current PhasePartition
Returns the cloud water tendency due to condensation and evaporation
or cloud ice tendency due to sublimation and vapor deposition.
The tendency is obtained assuming a relaxation to equilibrium with
a constant timescale.
"""
function conv_q_vap_to_q_liq_ice(
param_set::APS,
liquid::LiquidType,
q_sat::TD.PhasePartition{FT},
q::TD.PhasePartition{FT},
) where {FT <: Real}
_τ_cond_evap::FT = τ_relax(param_set, liquid)
return (q_sat.liq - q.liq) / _τ_cond_evap
end
function conv_q_vap_to_q_liq_ice(
param_set::APS,
ice::IceType,
q_sat::TD.PhasePartition{FT},
q::TD.PhasePartition{FT},
) where {FT <: Real}
_τ_sub_dep::FT = τ_relax(param_set, ice)
return (q_sat.ice - q.ice) / _τ_sub_dep
end
"""
conv_q_liq_to_q_rai(param_set, q_liq)
- `param_set` - abstract set with Earth parameters
- `q_liq` - liquid water specific humidity
Returns the q_rai tendency due to collisions between cloud droplets
(autoconversion), parametrized following Kessler (1995).
"""
function conv_q_liq_to_q_rai(param_set::APS, q_liq::FT) where {FT <: Real}
_τ_acnv_rai::FT = CP_micro.τ_acnv_rai(param_set)
_q_liq_threshold::FT = CP_micro.q_liq_threshold(param_set)
return max(0, q_liq - _q_liq_threshold) / _τ_acnv_rai
end
"""
conv_q_ice_to_q_sno_no_supersat(param_set, q_ice)
- `param_set` - abstract set with Earth parameters
- `q_ice` - cloud ice specific humidity
Returns the q_sno tendency due to autoconversion from ice.
This is a simplified version of a snow autoconversion rate that can be used in
simulations where there is no supersaturation
(for example in TC.jl when using saturation adjustment).
"""
function conv_q_ice_to_q_sno_no_supersat(
param_set::APS,
q_ice::FT,
) where {FT <: Real}
_τ_acnv_sno::FT = CP_micro.τ_acnv_sno(param_set)
_q_ice_threshold::FT = CP_micro.q_ice_threshold(param_set)
return max(0, q_ice - _q_ice_threshold) / _τ_acnv_sno
end
"""
conv_q_ice_to_q_sno(param_set, q, ρ, T)
- `param_set` - abstract set with Earth parameters
- `q` - phase partition
- `ρ` - air density
- `T` - air temperature
Returns the q_sno tendency due to autoconversion from ice.
Parameterized following Harrington et al. (1996) and Kaul et al. (2015).
"""
function conv_q_ice_to_q_sno(
param_set::APS,
q::TD.PhasePartition{FT},
ρ::FT,
T::FT,
) where {FT <: Real}
acnv_rate = FT(0)
_S::FT = TD.supersaturation(param_set, q, ρ, T, TD.Ice())
if (q.ice > FT(0) && _S > FT(0))
_G::FT = CO.G_func(param_set, T, TD.Ice())
_r_ice_snow::FT = CP_micro.r_ice_snow(param_set)
(_n0, _r0, _m0, _me, _χm, _Δm) =
unpack_params(param_set, IceType(), ρ, q.ice)
_λ::FT = lambda(q.ice, ρ, _n0, _m0, _me, _r0, _χm, _Δm)
acnv_rate =
4 * FT(π) * _S * _G * _n0 / ρ *
exp(-_λ * _r_ice_snow) *
(
_r_ice_snow^FT(2) / (_me + _Δm) +
(_r_ice_snow * _λ + FT(1)) / _λ^FT(2)
)
end
return acnv_rate
end
"""
accretion(param_set, cloud, precip, q_clo, q_pre, ρ)
- `param_set` - abstract set with Earth parameters
- `cloud` - type for cloud water or cloud ice
- `precip` - type for rain or snow
- `q_clo` - cloud water or cloud ice specific humidity
- `q_pre` - rain water or snow specific humidity
- `ρ` - rain water or snow specific humidity
Returns the source of precipitating water (rain or snow)
due to collisions with cloud water (liquid or ice).
"""
function accretion(
param_set::APS,
cloud::AbstractCloudType,
precip::AbstractPrecipType,
q_clo::FT,
q_pre::FT,
ρ::FT,
) where {FT <: Real}
accr_rate = FT(0)
if (q_clo > FT(0) && q_pre > FT(0))
(_n0, _r0, _m0, _me, _χm, _Δm, _a0, _ae, _χa, _Δa, _v0, _ve, _χv, _Δv) =
unpack_params(param_set, precip, ρ, q_pre)
_λ::FT = lambda(q_pre, ρ, _n0, _m0, _me, _r0, _χm, _Δm)
_E::FT = E(param_set, cloud, precip)
accr_rate =
q_clo * _E * _n0 * _a0 * _v0 * _χa * _χv / _λ *
SF.gamma(_ae + _ve + _Δa + _Δv + FT(1)) /
(_λ * _r0)^(_ae + _ve + _Δa + _Δv)
end
return accr_rate
end
"""
accretion_rain_sink(param_set, q_ice, q_rai, ρ)
- `param_set` - abstract set with Earth parameters
- `q_ice` - cloud ice specific humidity
- `q_rai` - rain water specific humidity
- `ρ` - air density
Returns the sink of rain water (partial source of snow) due to collisions
with cloud ice.
"""
function accretion_rain_sink(
param_set::APS,
q_ice::FT,
q_rai::FT,
ρ::FT,
) where {FT <: Real}
accr_rate = FT(0)
if (q_ice > FT(0) && q_rai > FT(0))
(_n0_ice, _r0_ice, _m0_ice, _me_ice, _χm_ice, _Δm_ice) =
unpack_params(param_set, IceType(), ρ, q_ice)
(
_n0_rai,
_r0_rai,
_m0_rai,
_me_rai,
_χm_rai,
_Δm_rai,
_a0_rai,
_ae_rai,
_χa_rai,
_Δa_rai,
_v0_rai,
_ve_rai,
_χv_rai,
_Δv_rai,
) = unpack_params(param_set, RainType(), ρ, q_rai)
_E::FT = E(param_set, IceType(), RainType())
_λ_rai::FT = lambda(
q_rai,
ρ,
_n0_rai,
_m0_rai,
_me_rai,
_r0_rai,
_χm_rai,
_Δm_rai,
)
_λ_ice::FT = lambda(
q_ice,
ρ,
_n0_ice,
_m0_ice,
_me_ice,
_r0_ice,
_χm_ice,
_Δm_ice,
)
accr_rate =
_E / ρ *
_n0_rai *
_n0_ice *
_m0_rai *
_a0_rai *
_v0_rai *
_χm_rai *
_χa_rai *
_χv_rai / _λ_ice / _λ_rai * SF.gamma(
_me_rai +
_ae_rai +
_ve_rai +
_Δm_rai +
_Δa_rai +
_Δv_rai +
FT(1),
) /
(
_r0_rai * _λ_rai
)^(_me_rai + _ae_rai + _ve_rai + _Δm_rai + _Δa_rai + _Δv_rai)
end
return accr_rate
end
"""
accretion_snow_rain(param_set, type_i, type_j, q_i, q_j, ρ)
- `i` - snow for temperatures below freezing
or rain for temperatures above freezing
- `j` - rain for temperatures below freezing
or rain for temperatures above freezing
- `param_set` - abstract set with Earth parameters
- `type_i`, `type_j` - a type for snow or rain
- `q_` - specific humidity of snow or rain
- `ρ` - air density
Returns the accretion rate between rain and snow.
Collisions between rain and snow result in
snow at temperatures below freezing and in rain at temperatures above freezing.
"""
function accretion_snow_rain(
param_set::APS,
type_i::AbstractPrecipType,
type_j::AbstractPrecipType,
q_i::FT,
q_j::FT,
ρ::FT,
) where {FT <: Real}
accr_rate = FT(0)
if (q_i > FT(0) && q_j > FT(0))
(
_n0_i,
_r0_i,
_m0_i,
_me_i,
_χm_i,
_Δm_i,
_a0_i,
_ae_i,
_χa_i,
_Δa_i,
_v0_i,
_ve_i,
_χv_i,
_Δv_i,
) = unpack_params(param_set, type_i, ρ, q_i)
(
_n0_j,
_r0_j,
_m0_j,
_me_j,
_χm_j,
_Δm_j,
_a0_j,
_ae_j,
_χa_j,
_Δa_j,
_v0_j,
_ve_j,
_χv_j,
_Δv_j,
) = unpack_params(param_set, type_j, ρ, q_j)
_E_ij::FT = E(param_set, type_i, type_j)
_λ_i::FT = lambda(q_i, ρ, _n0_i, _m0_i, _me_i, _r0_i, _χm_i, _Δm_i)
_λ_j::FT = lambda(q_j, ρ, _n0_j, _m0_j, _me_j, _r0_j, _χm_j, _Δm_j)
_v_ti = terminal_velocity(param_set, type_i, ρ, q_i)
_v_tj = terminal_velocity(param_set, type_j, ρ, q_j)
accr_rate =
FT(π) / ρ *
_n0_i *
_n0_j *
_m0_j *
_χm_j *
_E_ij *
abs(_v_ti - _v_tj) / _r0_j^(_me_j + _Δm_j) * (
FT(2) * SF.gamma(_me_j + _Δm_j + FT(1)) / _λ_i^FT(3) /
_λ_j^(_me_j + _Δm_j + FT(1)) +
FT(2) * SF.gamma(_me_j + _Δm_j + FT(2)) / _λ_i^FT(2) /
_λ_j^(_me_j + _Δm_j + FT(2)) +
SF.gamma(_me_j + _Δm_j + FT(3)) / _λ_i /
_λ_j^(_me_j + _Δm_j + FT(3))
)
end
return accr_rate
end
"""
evaporation_sublimation(param_set, rain, q, q_rai, ρ, T)
evaporation_sublimation(param_set, snow, q, q_sno, ρ, T)
- `param_set` - abstract set with Earth parameters
- `rain` - a type for rain
- `snow` - a type for snow
- `q` - phase partition
- `q_rai` - rain specific humidity
- `q_sno` - snow specific humidity
- `ρ` - air density
- `T` - air temperature
Returns the tendency due to rain evaporation or snow sublimation.
"""
function evaporation_sublimation(
param_set::APS,
rain::RainType,
q::TD.PhasePartition{FT},
q_rai::FT,
ρ::FT,
T::FT,
) where {FT <: Real}
evap_subl_rate = FT(0)
_S::FT = TD.supersaturation(param_set, q, ρ, T, TD.Liquid())
if (q_rai > FT(0) && _S < FT(0))
_a_vent::FT = CP_micro.a_vent_rai(param_set)
_b_vent::FT = CP_micro.b_vent_rai(param_set)
_ν_air::FT = CP_micro.ν_air(param_set)
_D_vapor::FT = CP_micro.D_vapor(param_set)
_G::FT = CO.G_func(param_set, T, TD.Liquid())
(_n0, _r0, _m0, _me, _χm, _Δm, _a0, _ae, _χa, _Δa, _v0, _ve, _χv, _Δv) =
unpack_params(param_set, rain, ρ, q_rai)
_λ::FT = lambda(q_rai, ρ, _n0, _m0, _me, _r0, _χm, _Δm)
evap_subl_rate =
4 * FT(π) * _n0 / ρ * _S * _G / _λ^FT(2) * (
_a_vent +
_b_vent * (_ν_air / _D_vapor)^FT(1 / 3) /
(_r0 * _λ)^((_ve + _Δv) / FT(2)) *
(FT(2) * _v0 * _χv / _ν_air / _λ)^FT(1 / 2) *
SF.gamma((_ve + _Δv + FT(5)) / FT(2))
)
end
# only evaporation is considered for rain
return min(0, evap_subl_rate)
end
function evaporation_sublimation(
param_set::APS,
snow::SnowType,
q::TD.PhasePartition{FT},
q_sno::FT,
ρ::FT,
T::FT,
) where {FT <: Real}
evap_subl_rate = FT(0)
if q_sno > FT(0)
_a_vent::FT = CP_micro.a_vent_sno(param_set)
_b_vent::FT = CP_micro.b_vent_sno(param_set)
_ν_air::FT = CP_micro.ν_air(param_set)
_D_vapor::FT = CP_micro.D_vapor(param_set)
_S::FT = TD.supersaturation(param_set, q, ρ, T, TD.Ice())
_G::FT = CO.G_func(param_set, T, TD.Ice())
(_n0, _r0, _m0, _me, _χm, _Δm, _a0, _ae, _χa, _Δa, _v0, _ve, _χv, _Δv) =
unpack_params(param_set, snow, ρ, q_sno)
_λ::FT = lambda(q_sno, ρ, _n0, _m0, _me, _r0, _χm, _Δm)
evap_subl_rate =
4 * FT(π) * _n0 / ρ * _S * _G / _λ^FT(2) * (
_a_vent +
_b_vent * (_ν_air / _D_vapor)^FT(1 / 3) /
(_r0 * _λ)^((_ve + _Δv) / FT(2)) *
(FT(2) * _v0 * _χv / _ν_air / _λ)^FT(1 / 2) *
SF.gamma((_ve + _Δv + FT(5)) / FT(2))
)
end
return evap_subl_rate
end
"""
snow_melt(param_set, q_sno, ρ, T)
- `param_set` - abstract set with Earth parameters
- `q_sno` - snow water specific humidity
- `ρ` - air density
- `T` - air temperature
Returns the tendency due to snow melt.
"""
function snow_melt(param_set::APS, q_sno::FT, ρ::FT, T::FT) where {FT <: Real}
snow_melt_rate = FT(0)
_T_freeze = CP_planet.T_freeze(param_set)
if (q_sno > FT(0) && T > _T_freeze)
_a_vent::FT = CP_micro.a_vent_sno(param_set)
_b_vent::FT = CP_micro.b_vent_sno(param_set)
_ν_air::FT = CP_micro.ν_air(param_set)
_D_vapor::FT = CP_micro.D_vapor(param_set)
_K_therm::FT = CP_micro.K_therm(param_set)
L = TD.latent_heat_fusion(param_set, T)
(_n0, _r0, _m0, _me, _χm, _Δm, _a0, _ae, _χa, _Δa, _v0, _ve, _χv, _Δv) =
unpack_params(param_set, SnowType(), ρ, q_sno)
_λ::FT = lambda(q_sno, ρ, _n0, _m0, _me, _r0, _χm, _Δm)
snow_melt_rate =
4 * FT(π) * _n0 / ρ * _K_therm / L * (T - _T_freeze) / _λ^FT(2) * (
_a_vent +
_b_vent * (_ν_air / _D_vapor)^FT(1 / 3) /
(_r0 * _λ)^((_ve + _Δv) / FT(2)) *
(FT(2) * _v0 * _χv / _ν_air / _λ)^FT(1 / 2) *
SF.gamma((_ve + _Δv + FT(5)) / FT(2))
)
end
return snow_melt_rate
end
end #module Microphysics_1M.jl
|
{"hexsha": "b99bf3da2365aad1f19b9c95df9b2194c270d876", "size": 21496, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Microphysics_1M.jl", "max_stars_repo_name": "CliMA/CloudMicrophysics.jl", "max_stars_repo_head_hexsha": "951148546761bf2208066c3890a274ec85fbf1c7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-08-03T12:09:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-05T07:09:59.000Z", "max_issues_repo_path": "src/Microphysics_1M.jl", "max_issues_repo_name": "CliMA/CloudMicrophysics.jl", "max_issues_repo_head_hexsha": "951148546761bf2208066c3890a274ec85fbf1c7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 50, "max_issues_repo_issues_event_min_datetime": "2021-07-26T15:48:06.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T23:41:25.000Z", "max_forks_repo_path": "src/Microphysics_1M.jl", "max_forks_repo_name": "CliMA/CloudMicrophysics.jl", "max_forks_repo_head_hexsha": "951148546761bf2208066c3890a274ec85fbf1c7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-01T20:36:02.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-01T20:36:02.000Z", "avg_line_length": 26.7696139477, "max_line_length": 80, "alphanum_fraction": 0.6025307034, "num_tokens": 7335}
|
include("Include.jl")
# extra:
using Flux
using Flux: @epochs
using BSON: @save
# load the data -
path_data_file_TF = joinpath(_PATH_TO_DATA, "Training-Thrombin-TF.csv")
full_data_table_TF = load(path_data_file_TF)
path_data_file_TF_TM = joinpath(_PATH_TO_DATA, "Training-Thrombin-TF-TM.csv")
full_data_table_TF_TM = load(path_data_file_TF_TM)
full_training_data_frame = vcat(full_data_table_TF, full_data_table_TF_TM)
# filter -
has_TM_flag = 0
experimental_data_table = filter([:visitid, :TM] => (x, y) -> (x == 2 || x == 3), full_training_data_frame)
# get input and output data -
input_data = convert.(Float32, Matrix(experimental_data_table[!, 3:14]))
output_data = convert.(Float32, Matrix(experimental_data_table[!, 15:19]))
training_data = [(transpose(input_data), transpose(output_data))]
# build a model architecture -
deep_coag_model = Chain(Dense(12, 12, σ), Dense(12, 5));
# setup a loss function -
loss(x, y) = Flux.Losses.mae(deep_coag_model(x), y; agg = mean)
# pointer to params -
ps = Flux.params(deep_coag_model)
# # use old school gradient descent -
opt = Momentum(0.25, 0.95)
# # train -
@epochs 10000 Flux.train!(loss, ps, training_data, opt)
# save -
model_file_path = joinpath(_PATH_TO_MODELS, "deep_coag_model.bson")
@save model_file_path deep_coag_model
|
{"hexsha": "0d34cfb1adcb7c1b31aacba92909bcbc0aacc06c", "size": 1296, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "build_ann_model.jl", "max_stars_repo_name": "varnerlab/UVM-TopDown-LegacyModel", "max_stars_repo_head_hexsha": "eaab26053ce720b3affc8f1d27aaa0a4b624ca89", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "build_ann_model.jl", "max_issues_repo_name": "varnerlab/UVM-TopDown-LegacyModel", "max_issues_repo_head_hexsha": "eaab26053ce720b3affc8f1d27aaa0a4b624ca89", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "build_ann_model.jl", "max_forks_repo_name": "varnerlab/UVM-TopDown-LegacyModel", "max_forks_repo_head_hexsha": "eaab26053ce720b3affc8f1d27aaa0a4b624ca89", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.0, "max_line_length": 107, "alphanum_fraction": 0.7453703704, "num_tokens": 379}
|
#!/usr/bin/env python3
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('filename', help='Data file to process')
args = parser.parse_args()
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
import os
plt.rcParams.update({'font.size': 10})
filename = args.filename
time, temp, rh, wspd, clear = [], [], [], [], []
data = [line.strip() for line in open(filename).readlines()[1:]]
for line in data:
line = line.split(',')
time.append(datetime.strptime(line[1], '%Y-%m-%d %H:%M'))
temp.append(float(line[4]) if line[4] != 'M' else np.nan)
rh.append(float(line[6]) if line[6] != 'M' else np.nan)
wspd.append(float(line[8]) if line[8] != 'M' else np.nan)
clear.append(False if any([x in line[14:18] for x in ['SCT', 'BKN', 'OVC', 'VV']])\
else np.nan if all([x == 'M' for x in line[14:18]])\
else True)
fig = plt.figure(figsize=(8, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), colspan=1, rowspan=1)
ax2 = plt.subplot2grid((3, 1), (1, 0), colspan=1, rowspan=1)
ax3 = plt.subplot2grid((3, 1), (2, 0), colspan=1, rowspan=1)
ax1.plot(time, temp, 'k.', ms=1)
ax2.plot(time, rh, 'k.', ms=1)
ax3.plot(time, wspd, 'k.', ms=1)
ax1.set_ylabel('Temperature [F]')
ax2.set_ylabel('Relative humidity [%]')
ax3.set_ylabel('Wind speed [kts]')
ax1.set_title('MIA')
plt.tight_layout()
plt.savefig(os.path.basename(filename)[:-3] + 'png', dpi=300)
plt.savefig(os.path.basename(filename)[:-3] + 'svg')
plt.close(fig)
|
{"hexsha": "8148460e4c2b669d8cea48e18e0674d905e7da61", "size": 1490, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/fortify/mf/generic-procedures/data/plot_station_data.py", "max_stars_repo_name": "wilsonify/c-consumer", "max_stars_repo_head_hexsha": "e19a1baf4efb68436bbec50395f2a3a2c6e3c078", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 20, "max_stars_repo_stars_event_min_datetime": "2019-01-05T21:15:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T07:30:25.000Z", "max_issues_repo_path": "src/fortify/mf/generic-procedures/data/plot_station_data.py", "max_issues_repo_name": "wilsonify/c-consumer", "max_issues_repo_head_hexsha": "e19a1baf4efb68436bbec50395f2a3a2c6e3c078", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/fortify/mf/generic-procedures/data/plot_station_data.py", "max_forks_repo_name": "wilsonify/c-consumer", "max_forks_repo_head_hexsha": "e19a1baf4efb68436bbec50395f2a3a2c6e3c078", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-10-10T08:50:54.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-13T21:36:14.000Z", "avg_line_length": 33.1111111111, "max_line_length": 87, "alphanum_fraction": 0.6429530201, "include": true, "reason": "import numpy", "num_tokens": 470}
|
### A Pluto.jl notebook ###
# v0.16.1
using Markdown
using InteractiveUtils
# This Pluto notebook uses @bind for interactivity. When running this notebook outside of Pluto, the following 'mock version' of @bind gives bound variables a default value (instead of an error).
macro bind(def, element)
quote
local el = $(esc(element))
global $(esc(def)) = Core.applicable(Base.get, el) ? Base.get(el) : missing
el
end
end
# ╔═╡ 94f8e29e-ef91-11eb-1ae9-29bc46fa505a
begin
using Pkg
Pkg.activate(joinpath(Pkg.devdir(), "MLCourse"))
using Plots, DataFrames, Random, CSV, MLJ, MLJLinearModels
end
# ╔═╡ 12d5824c-0873-49a8-a5d8-f93c73b633ae
using PlutoUI; PlutoUI.TableOfContents()
# ╔═╡ 20c5c7bc-664f-4c04-8215-8f3a9a2095c9
begin
using MLCourse
MLCourse.list_notebooks(@__FILE__)
end
# ╔═╡ 8217895b-b120-4b08-b18f-d921dfdddf10
md"# Linear Regression
## Wind speed prediction with one predictor
"
# ╔═╡ 9f84bcc5-e5ab-4935-9076-c19e9bd668e8
weather = CSV.read(joinpath(@__DIR__, "..", "data", "weather2015-2018.csv"), DataFrame);
# ╔═╡ 34e527f2-ef80-4cb6-be3a-bee055eca125
begin
training_set1 = (X = (LUZ_pressure = weather.LUZ_pressure[1:end-5],),
y = weather.LUZ_wind_peak[6:end])
m1 = machine(LinearRegressor(), training_set1.X, training_set1.y) |> fit!
end;
# ╔═╡ 006fc1eb-50d5-4206-8c87-53e873f158f4
begin
scatter(training_set1.X.LUZ_pressure[1:10:end],
training_set1.y[1:10:end], label = "data")
plot!(fitted_linear_func(m1), label = "linear fit", w = 3)
end
# ╔═╡ e4712ebe-f395-418b-abcc-e10ada4b05c2
md"Let us inspect the results.
First we look at the fitted parameters.
We find that there is a negative correlation between the pressure in Luzern
and the wind speed 5 hours later in Luzern.
"
# ╔═╡ 8c9ae8f7-81b2-4d60-a8eb-ded5364fe0cc
fitted_params(m1)
# ╔═╡ f4f890b6-0ad4-4155-9321-15d673e15489
md"Next we predict the mean for different pressure values the distribution of
wind speeds. In the probabilistic interpretation of supervised learning,
standard linear regression finds conditional normal distributions with
input-dependent mean ``\mu = \hat y = \theta_0 + \theta_1 x`` and
constant standard deviation σ.
"
# ╔═╡ 9b62c374-c26e-4990-8ffc-790928e62e88
predict(m1, (LUZ_pressure = [930., 960., 990.],))
# ╔═╡ 7923a0a8-3033-4dde-91e8-22bf540c9866
md"We use the root-mean-squared-error (`rmse`) = ``\sqrt{\frac1n\sum_{i=1}^n(y_i - \hat y_i)^2}`` to evaluate our training error. To compute the test error, we use hourly data from 2019 to 2020.
"
# ╔═╡ 57f352dc-55ee-4e14-b68d-698938a97d92
rmse(predict(m1, training_set1.X), training_set1.y)
# ╔═╡ b0de002f-3f39-4378-8d68-5c4606e488b7
begin
weather_test = CSV.read(joinpath(@__DIR__, "..", "data", "weather2019-2020.csv"), DataFrame);
test_set1 = (X = (LUZ_pressure = weather_test.LUZ_pressure[1:end-5],),
y = weather_test.LUZ_wind_peak[6:end])
end
# ╔═╡ ce536f60-68b3-4901-bd6a-c96378054b12
rmse(predict(m1, test_set1.X), test_set1.y)
# ╔═╡ c65b81bd-395f-4461-a73b-3535903cb2d7
md"## Multiple Linear Regression
In multiple linear regression there are multiple predictors ``x_1, x_2, \ldots, x_p``
and the response is ``\hat y = \beta_0 + \beta_1 x_1 + \beta_2 x_2 + \cdots + \beta_p x_p
`` (we use here ``\beta`` instead of ``\theta`` for the parameters).
With ``p = 2`` predictors we can visualize linear regression as the plane
that is closest to the data. Use the sliders below to get a feeling for the
parameters. You can also change the viewing angle by clicking and dragging in
the figure."
# ╔═╡ 51c9ea74-3110-4536-a4af-7cc73b45a4a6
md"β₀ = $(@bind β₀ Slider(-1:.02:1, default = .4, show_value = true))
β₁ = $(@bind β₁ Slider(-1:.02:1, default = .2, show_value = true))
β₂ = $(@bind β₂ Slider(-1:.02:1, default = -.6, show_value = true))
"
# ╔═╡ 0f544053-1b7a-48d6-b18b-72092b124305
begin
Random.seed!(3)
X = DataFrame(X1 = randn(20), X2 = randn(20))
f0(X1, X2, β₀, β₁, β₂) = β₀ + β₁ * X1 + β₂ * X2
f0(β₀, β₁, β₂) = (X1, X2) -> f0(X1, X2, β₀, β₁, β₂)
data_generator(X1, X2; β₀, β₁, β₂, σ = 0.8) = f0(X1, X2, β₀, β₁, β₂) + σ * randn()
y = data_generator.(X.X1, X.X2, β₀ = .4, β₁ = .5, β₂ = -.6)
end;
# ╔═╡ d541a8cd-5aa4-4c2d-bfdf-5e6297bb65a8
begin
plotly()
p1 = scatter3d(X.X1, X.X2, y, markersize = 1,
xlims = (-3, 3), xlabel = "X1",
ylims = (-3, 3), ylabel = "X2",
zlims = (-4, 4), zlabel = "y", label = "data")
wireframe!(-3:.1:3, -3:.1:3, f0(β₀, β₁, β₂),
label = "function", title = "data & function", color = :green)
plot_residuals!(X.X1, X.X2, y, f0(β₀, β₁, β₂))
p2 = contour(-1:.1:1, -1:.1:1, (β₀, β₁) -> mean((β₀ .+ β₁ .* X.X1 .+ β₂ .* X.X2 .- y).^2), levels = 100, ylabel = "β₁", cbar = false, title = "loss")
scatter!([β₀], [β₁], label = nothing)
p3 = contour(-1:.1:1, -1:.1:1, (β₀, β₂) -> mean((β₀ .+ β₁ .* X.X1 .+ β₂ .* X.X2 .- y).^2), levels = 100, xlabel = "β₀", ylabel = "β₂")
scatter!([β₀], [β₂], label = "current loss")
plot(p1, plot(p2, p3, layout = (2, 1)), layout = (1, 2),
size = (700, 400), legend = false)
end
# ╔═╡ da6462d8-3343-41d8-82dd-48770176d4ba
md"## Wind speed prediction with multiple predictors"
# ╔═╡ 753ec309-1363-485d-a2bd-b9fa100d9058
m2 = machine(LinearRegressor(), select(weather[1:end-5,:], Not([:LUZ_wind_peak, :time])),
weather.LUZ_wind_peak[6:end]) |> fit!;
# ╔═╡ 618ef3c7-0fda-4970-88e8-1dac195545de
sort!(DataFrame(predictor = names(select(weather, Not([:LUZ_wind_peak, :time]))),
value = fitted_params(m2).coefs), :value)
# ╔═╡ 2d25fbb6-dc9b-40ad-bdce-4c952cdad077
rmse(predict(m2, select(weather[1:end-5,:], Not([:LUZ_wind_peak, :time]))),
weather.LUZ_wind_peak[6:end])
# ╔═╡ c9f10ace-3299-45fb-b98d-023a35dd405a
rmse(predict(m2, select(weather_test[1:end-5,:], Not([:LUZ_wind_peak, :time]))),
weather_test.LUZ_wind_peak[6:end])
# ╔═╡ 99a371b2-5158-4c42-8f50-329352b6c1f2
md"# Error Decomposition
"
# ╔═╡ f10b7cad-eda3-4ec9-99ee-d43ed013a057
begin
f(x) = sin(2x) + 2*(x - .5)^3 - .5x
conditional_generator(x; n = 50) = f.(x) .+ .2*randn(n)
end;
# ╔═╡ 05354df5-a803-422f-87a3-1c56a34e8a48
f̂(x) = 0.1 + x
# ╔═╡ 9e61b4c3-1a9f-41a7-9882-25ed797a7b8d
expected_error(f, x) = mean((conditional_generator(x, n = 10^6) .- f(x)).^2);
# ╔═╡ c6a59b85-d031-4ad4-9e24-691494d08cde
expected_error(f̂, .1)
# ╔═╡ e50b8196-e804-473a-b3b5-e22fdb9d2f45
(f(.1) - f̂(.1))^2
# ╔═╡ f413ea94-36ca-4afc-8ca8-9a7e88101980
expected_error(f, .1)
# ╔═╡ dbf7fc72-bfd0-4c57-a1a9-fb5881e16e7e
let x = rand(100), grid = 0:.05:1
gr()
p1 = scatter(x, vcat(conditional_generator.(x, n = 1)...), label = "samples")
plot!(f, label = "f")
plot!(f̂, label = "f̂")
p2 = plot(grid, expected_error.(f̂, grid), label = "expected error f̂", w = 3)
plot!(grid, (f.(grid) .- f̂.(grid)).^2, label = "reducible error", w = 3)
hline!([.2^2], label = "irreducible error", ylims = (0, .15), w = 3, xlabel = "x")
plot(p1, p2, layout = (2, 1), legend = :right, ylabel = "y")
end
# ╔═╡ ad5b293d-c0f4-4693-84f4-88308639a501
md"# Logistic Regression
## Preparing the spam data
The text in our spam data set is already preprocessed. But we do not yet have
a format similar to our weather prediction data set with a fixed number ``p`` of
predictors for each email. In this section we create a very simple feature
representation of our emails:
1. We create a lexicon of words that are neither very frequent nor very rare.
2. For each email we count how often every word in this lexicon appears.
3. Our feature matrix will consist of ``n`` rows (one for each email) and ``p``
predictors (one for each word in the lexicon) with ``x_{ij}`` measuring how
often word ``j`` appears in document ``i``, normalized by the number of
lexicon words in each email (such that the elements in every row sum to 1).
"
# ╔═╡ 210b977d-7136-407f-a1c9-eeea869d0312
begin
spamdata = CSV.read(joinpath(@__DIR__, "..", "data", "spam.csv"), DataFrame)
dropmissing!(spamdata) # remove entries without any text (missing values).
end
# ╔═╡ 72969aca-b203-4d83-8923-74e523aa1c01
import TextAnalysis: Corpus, StringDocument, DocumentTermMatrix, lexicon,
update_lexicon!, tf
# ╔═╡ 4cbb3057-01f4-4e80-9029-4e80d6c9e5e6
md"In the next cell we create the full lexicon of words appearing in the first
2000 emails. Each lexicon entry is of the form `\"word\" => count`."
# ╔═╡ c50c529f-d393-4854-b5ed-91e90d557d12
begin
crps = Corpus(StringDocument.(spamdata.text[1:2000]))
update_lexicon!(crps)
lexicon(crps)
end
# ╔═╡ 72b50cee-d436-42ce-add9-07b0c012cb31
md"Now we select only those words of the full lexicon that appear at least 100
times and at most 10^3 times. These numbers are pulled out of thin air (like
all the design choses of this very crude feature engineering).
"
# ╔═╡ bf4110a9-31a4-48a3-bd6d-85c404d0e72d
begin
small_lex = Dict(k => lexicon(crps)[k]
for k in findall(x -> 100 <= last(x) <= 10^3, lexicon(crps)))
m = DocumentTermMatrix(crps, small_lex)
end
# ╔═╡ 534681d5-71d8-402a-b455-f491cfbb353e
begin
spam_or_ham = coerce(String.(spamdata.label[1:2000]), Binary)
normalized_word_counts = float.(DataFrame(tf(m), :auto))
end
# ╔═╡ ec1c2ea5-29ce-4371-be49-08798305ff50
Markdown.parse("Here we go: now we have a matrix of size
$(join(size(normalized_word_counts), " x ")) as input and a vector of binary label as
output. We will be able to use this as input in multiple logistic regression.
For future usage we save this preprocessed representation of the spam data to
a file.")
# ╔═╡ 681cb7b9-f041-4aea-907e-4d85135c005a
CSV.write(joinpath(dirname(pathof(MLCourse)), "..", "data", "spam_preprocessed.csv"),
[normalized_word_counts DataFrame(spam_or_ham = spam_or_ham)])
# ╔═╡ f7117513-283f-4e32-a2a1-3594c794c94d
md"## Multiple Logistic Regression
In the top row of the figure below we see in two different ways (once as a 3D
plot and once as a contour plot) the probability of class A for the selected
parameter values. The bottom row shows samples (large points, red = class A)
obtained with this probability distribution and predictions (small points)
at decision threshold 0.5.
Play with the parameters to get a feeling for how they affect the probability
and the samples.
θ₀ = $(@bind θ₀ Slider(-3:3, default = 0, show_value = true))
θ₁ = $(@bind θ₁ Slider(-8:8, default = 3, show_value = true))
θ₂ = $(@bind θ₂ Slider(-8:8, default = 0, show_value = true))
"
# ╔═╡ fd4165dc-c3e3-4c4c-9605-167b5b4416da
md"## Confusion Matrix, ROC and AUC"
# ╔═╡ 7738c156-8e1b-4723-9818-fba364822171
md"s = $(@bind s Slider(-4:.1:4, default = 0, show_value = true))
seed = $(@bind seed Slider(1:100, show_value = true))
threshold = $(@bind threshold Slider(.01:.01:.99, default = 0.5, show_value = true))
"
# ╔═╡ 0fcfd7d2-6ea3-4c75-bad3-7d0fdd6fde11
begin
logistic(x) = 1/(1 + exp(-x))
logodds(p) = log(p/(1-p))
function error_rates(x, y, t)
P = sum(y)
N = length(y) - P
pos_pred = y[(logistic.(x) .> t)]
TP = sum(pos_pred)
FP = sum(1 .- pos_pred)
FP/N, TP/P
end
end;
# ╔═╡ 4f89ceab-297f-4c2c-9029-8d2d7fad084f
let f(x1, x2) = logistic(θ₀ + θ₁ * x1 + θ₂ * x2)
p1 = wireframe(-3:.1:3, -3:.1:3, f, zlims = (0, 1))
p2 = contour(-3:.1:3, -3:.1:3, f, contour_labels = true, levels = 20, cbar = false)
plotly()
samples = (X1 = 6 * rand(200) .- 3, X2 = 6 * rand(200) .- 3)
labels = f.(samples.X1, samples.X2) .> rand(200)
xgrid = MLCourse.grid(-3:.2:3, -3:.2:3, names = (:X1, :X2))
scatter(xgrid.X1, xgrid.X2, color = (f.(xgrid.X1, xgrid.X2) .> .5) .+ 1,
markersize = 2, markerstrokewidth = 0, label = nothing)
p3 = scatter!(samples.X1, samples.X2, color = labels .+ 1, xlabel = "X1")
plot(p1, p2, plot(), p3, layout = (2, 2), size = (700, 600),
ylabel = "X2", legend = false)
end
# ╔═╡ 285c6bfc-5f29-46e0-a2c1-8abbec74501b
begin
Random.seed!(seed)
auc_samples_x = 2 * randn(200)
end;
# ╔═╡ c98524b5-d6b3-469c-82a1-7d231cc792d6
begin
auc_samples_y = logistic.(2.0^s * auc_samples_x) .> rand(200)
auc = [error_rates(2.0^s * auc_samples_x, auc_samples_y, t)
for t in .01:.01:.99]
push!(auc, (0., 0.))
prepend!(auc, [(1., 1.)])
end;
# ╔═╡ 3336ab15-9e9b-44af-a7d5-1d6472241e62
let
gr()
p1 = scatter(auc_samples_x, auc_samples_y, markershape = :vline, label = nothing, color = :black)
plot!(x -> logistic(2.0^s * x), color = :blue, label = nothing, xlims = (-8, 8))
vline!([1/(2.0^s) * logodds(threshold)], w = 3, color = :red,
label = nothing, xlabel = "x", ylabel = "y")
p2 = plot(first.(auc), last.(auc), title = "ROC", label = nothing)
fp, tp = auc[floor(Int, threshold * 100)]
scatter!([fp], [tp], color = :red, xlims = (-.01, 1.01), ylims = (-.01, 1.01),
labels = nothing, ylabel = "true positive rate",
xlabel = "false positive rate")
plot(p1, p2, size = (700, 400))
end
# ╔═╡ 62ad57e5-1366-4635-859b-ccdab2efd3b8
md"## Multiple Logistic Regression on the spam data"
# ╔═╡ 29e1d9ff-4375-455a-a69b-8dd0c2cac57d
m3 = fit!(machine(LogisticClassifier(penalty = :none),
normalized_word_counts,
spam_or_ham));
# ╔═╡ 1d1a24c6-c166-49a2-aa21-7acf50b55a66
predict(m3, normalized_word_counts)
# ╔═╡ 32bafa9e-a35e-4f54-9857-d269b47f95c3
confusion_matrix(predict_mode(m3, normalized_word_counts), spam_or_ham)
# ╔═╡ 4e4f4adf-364f-49b9-9391-5050a4c1286a
md"With our simple features, logistic regression can classify the training data
almost always correctly. Let us see how well this works for test data.
"
# ╔═╡ 50c035e6-b892-4157-a52f-824578366977
begin
test_crps = Corpus(StringDocument.(spamdata.text[2001:4000]))
test_input = float.(DataFrame(tf(DocumentTermMatrix(test_crps, small_lex)), :auto))
test_labels = coerce(String.(spamdata.label[2001:4000]), Binary)
confusion_matrix(predict_mode(m3, test_input), test_labels)
end
# ╔═╡ 21b66582-3fda-401c-9421-73ae2f455a75
predict_mode(m3, normalized_word_counts)
# ╔═╡ ba4b5683-5932-415e-8772-8b3eef5eb63d
md"We save also the test data for future usage."
# ╔═╡ a30578dd-aecb-46eb-b947-f009282cf2fc
md"Let us evaluate the fit in terms of commonly used losses for binary classification."
# ╔═╡ 8ed39cdc-e99e-48ff-9973-66df41aa0f78
function losses(machine, input, response)
(loglikelihood = -sum(log_loss(predict(machine, input), response)),
misclassification_rate = mean(predict_mode(machine, input) .!= response),
accuracy = accuracy(predict_mode(machine, input), response),
auc = MLJ.auc(predict(machine, input), response)
)
end;
# ╔═╡ dd463687-b73d-4e70-b2cf-97a56a0ad409
losses(m3, normalized_word_counts, spam_or_ham)
# ╔═╡ 57dcadc0-2da2-4521-aeaf-6fd01f4bd82b
spam_or_ham
# ╔═╡ 935adbcd-48ab-4a6f-907c-b04137ca3abe
losses(m3, test_input, test_labels)
# ╔═╡ 8b0451bf-59b0-4e71-be84-549e23b5bfe7
md"""# Exercises
## Conceptual
1. Suppose we have a data set with three predictors, ``X_1`` = Final Grade, ``X_2`` = IQ, ``X_3`` = Level (1 for College and 0 for High School). The response is starting salary after graduation (in thousands of dollars). Suppose we use least squares to fit the model, and get ``\hat\beta_0 = 25, \hat\beta_1 = 2, \hat\beta_2 = 0.07, \hat\beta_3 = 15``.
- Which answer is correct, and why?
- For a fixed value of IQ and Final Grade, high school graduates earn more, on average, than college graduates.
- For a fixed value of IQ and Final Grade, college graduates earn more, on average, than high school graduates.
- Predict the salary of a college graduate with IQ of 110 and a Final Grade of 4.0.
2. Suppose we collect data for a group of students in a machine learning class with variables ``X_1 =`` hours studied, ``X_2 =`` grade in statistics class, and ``Y =`` receive a 6 in the machine learning class. We fit a logistic regression and produce estimated coefficients, ``\hat{\beta}_0 = -6``, ``\hat{\beta}_1 = 0.025``, ``\hat{\beta}_2 = 1``.
- Estimate the probability that a student who studies for 75 hours and had a 4 in the statistics class gets a 6 in the machine learning class.
- How many hours would the above student need to study to have a 50% chance of getting an 6 in the machine learning class?
3. In this exercise we will derive the loss function implicitly defined by maximum likelihood estimation of the parameters in a classification setting with multiple classes. Remember that the input ``f(x)`` of the softmax function ``s`` is a vector-valued function. Here we assume a linear function ``f`` and write the ``i``th component of this function as ``f_i(x) = \theta_{i0} + \theta_{i1}x_1 + \cdots + \theta_{ip}x_p``. Note that each component ``i`` has now its own parameters ``\theta_{i0}`` to ``\theta_{ip}``. Using matrix multiplication we can also write ``f(x) = \theta x`` where ``\theta = \left(\begin{array}{ccc}\theta_{10} & \cdots & \theta_{1p}\\\vdots & \ddots & \cdots\\\theta_{K0} & \cdots & \theta_{Kp}\end{array}\right)`` is a ``K\times(p+1)`` dimensional matrix and ``x = (1, x_1, x_2, \ldots, x_p)`` is a column vector of length ``p+1``.
- Write the log-likelihood function for a classification problem with ``K`` classes. *Hint*: to simplify the notation we can use the convention ``s_y(f(x)) = P(y|x)`` to write the conditional probability of class ``y`` given input ``x``. This convention makes sense when the classes are identified by the integers ``1, 2, \ldots, K``; in this case ``s_y(f(x))`` is the ``y``th component of ``s(f(x))``. Otherwise we would could specify a mapping from classes ``C_1, C_2, \ldots, C_K`` to the integers ``1, 2, \ldots, K`` for this convention to make sense.
- Assume now ``K = 3`` and ``p = 2``. Explicitly write the loss function for the training set ``\mathcal D = ((x_1 = (0, 0), y_1 = C), (x_2 = (3, 0), y_2 = A), (x_3 = (0, 2), y_3 = B))``.
- Assume ``K = 2`` and ``p = 1`` and set ``\theta_{20} = 0`` and ``\theta_{21} = 0``. Show that we recover standard logistic regression in this case. *Hint*: show that ``s_1(f(x)) = \sigma(f_1(x))`` and ``s_2(f(x)) = 1 - \sigma(f_1(x))``, where ``s`` is the softmax function and ``\sigma(x) = 1/(1 + e^{-x})`` is the logistic function.
- Show that one can always set ``\theta_{K0}, \theta_{K1}, \ldots, \theta_{Kp}`` to zero. *Hint* Show that the softmax function with the transformed parameters ``\tilde\theta_{ij}=\theta_{ij} - \theta_{Kj}`` has the same value as the softmax function in the original parameters.
## Applied
1. In the multiple linear regression of the weather data set above we used all
available predictors. We do not know if all of them are relevant. In this exercise our aim is to find models with fewer predictors and quantify the loss in prediction accuracy.
- Systematically search for the model with at most 2 predictors that has the lowest test rmse. *Hint* write a function `train_and_evaluate` that takes the training and the test data as input as well as an array of two predictors; remember that `data[:, [\"A\", \"B\"]]` returns a sub-dataframe with columns \"A\" and \"B\". This function should fit a `LinearRegressor` on the training set with those two predictors and return the test rmse for the two predictors. To get a list of all pairs of predictors you can use something like `predictors = setdiff(names(train), ["time", "LUZ_wind_peak"]); predictor_pairs = [[p1, p2] for p1 in predictors, p2 in predictors if p1 != p2 && p1 > p2]`
- How much higher is the test error compared to the fit with all available predictors?
- How many models did you have to fit to find your result above?
- How many models would you have to fit to find the best model with at most 5 predictors? *Hint* the function `binomial` may be useful.
2. In this exercise we perform linear classification of the MNIST handwritten digits
dataset.
- Load the MNIST data set with `using OpenML; mnist = OpenML.load(554) |> DataFrame; dropmissing!(mnist);`
- Usually the first 60'000 images are taken as training set, but for this exercise I recommend to use fewer rows, e.g. the first 5000.
- Scale the input values to the interval [0, 1) with `mnist[:, 1:784] ./= 255`
- Fit a `MLJLinearModels.MultinomialClassifier(penalty = :none)` to the data. Be patient! This can take a few minutes.
- Compute the misclassification rate and the confusion matrix on the training set.
- Use as test data rows 60001 to 70000 and compute the misclassification rate
and the confusion matrix on this test set.
- Plot some of the wrongly classified training and test images.
Are they also difficult for you to classify?
"""
# ╔═╡ 7f08fcaa-000d-422d-80b4-e58a2f489d74
MLCourse.footer()
# ╔═╡ Cell order:
# ╟─12d5824c-0873-49a8-a5d8-f93c73b633ae
# ╠═94f8e29e-ef91-11eb-1ae9-29bc46fa505a
# ╟─8217895b-b120-4b08-b18f-d921dfdddf10
# ╠═9f84bcc5-e5ab-4935-9076-c19e9bd668e8
# ╠═34e527f2-ef80-4cb6-be3a-bee055eca125
# ╠═006fc1eb-50d5-4206-8c87-53e873f158f4
# ╟─e4712ebe-f395-418b-abcc-e10ada4b05c2
# ╠═8c9ae8f7-81b2-4d60-a8eb-ded5364fe0cc
# ╟─f4f890b6-0ad4-4155-9321-15d673e15489
# ╠═9b62c374-c26e-4990-8ffc-790928e62e88
# ╟─7923a0a8-3033-4dde-91e8-22bf540c9866
# ╠═57f352dc-55ee-4e14-b68d-698938a97d92
# ╠═b0de002f-3f39-4378-8d68-5c4606e488b7
# ╠═ce536f60-68b3-4901-bd6a-c96378054b12
# ╟─c65b81bd-395f-4461-a73b-3535903cb2d7
# ╟─51c9ea74-3110-4536-a4af-7cc73b45a4a6
# ╟─d541a8cd-5aa4-4c2d-bfdf-5e6297bb65a8
# ╟─0f544053-1b7a-48d6-b18b-72092b124305
# ╟─da6462d8-3343-41d8-82dd-48770176d4ba
# ╠═753ec309-1363-485d-a2bd-b9fa100d9058
# ╠═618ef3c7-0fda-4970-88e8-1dac195545de
# ╠═2d25fbb6-dc9b-40ad-bdce-4c952cdad077
# ╠═c9f10ace-3299-45fb-b98d-023a35dd405a
# ╟─99a371b2-5158-4c42-8f50-329352b6c1f2
# ╠═f10b7cad-eda3-4ec9-99ee-d43ed013a057
# ╠═05354df5-a803-422f-87a3-1c56a34e8a48
# ╠═9e61b4c3-1a9f-41a7-9882-25ed797a7b8d
# ╠═c6a59b85-d031-4ad4-9e24-691494d08cde
# ╠═e50b8196-e804-473a-b3b5-e22fdb9d2f45
# ╠═f413ea94-36ca-4afc-8ca8-9a7e88101980
# ╟─dbf7fc72-bfd0-4c57-a1a9-fb5881e16e7e
# ╟─ad5b293d-c0f4-4693-84f4-88308639a501
# ╠═210b977d-7136-407f-a1c9-eeea869d0312
# ╠═72969aca-b203-4d83-8923-74e523aa1c01
# ╟─4cbb3057-01f4-4e80-9029-4e80d6c9e5e6
# ╠═c50c529f-d393-4854-b5ed-91e90d557d12
# ╟─72b50cee-d436-42ce-add9-07b0c012cb31
# ╠═bf4110a9-31a4-48a3-bd6d-85c404d0e72d
# ╠═534681d5-71d8-402a-b455-f491cfbb353e
# ╟─ec1c2ea5-29ce-4371-be49-08798305ff50
# ╠═681cb7b9-f041-4aea-907e-4d85135c005a
# ╟─f7117513-283f-4e32-a2a1-3594c794c94d
# ╟─4f89ceab-297f-4c2c-9029-8d2d7fad084f
# ╟─fd4165dc-c3e3-4c4c-9605-167b5b4416da
# ╟─7738c156-8e1b-4723-9818-fba364822171
# ╠═0fcfd7d2-6ea3-4c75-bad3-7d0fdd6fde11
# ╟─3336ab15-9e9b-44af-a7d5-1d6472241e62
# ╟─285c6bfc-5f29-46e0-a2c1-8abbec74501b
# ╟─c98524b5-d6b3-469c-82a1-7d231cc792d6
# ╟─62ad57e5-1366-4635-859b-ccdab2efd3b8
# ╠═29e1d9ff-4375-455a-a69b-8dd0c2cac57d
# ╠═1d1a24c6-c166-49a2-aa21-7acf50b55a66
# ╠═32bafa9e-a35e-4f54-9857-d269b47f95c3
# ╟─4e4f4adf-364f-49b9-9391-5050a4c1286a
# ╠═50c035e6-b892-4157-a52f-824578366977
# ╠═21b66582-3fda-401c-9421-73ae2f455a75
# ╟─ba4b5683-5932-415e-8772-8b3eef5eb63d
# ╟─a30578dd-aecb-46eb-b947-f009282cf2fc
# ╠═8ed39cdc-e99e-48ff-9973-66df41aa0f78
# ╠═dd463687-b73d-4e70-b2cf-97a56a0ad409
# ╠═57dcadc0-2da2-4521-aeaf-6fd01f4bd82b
# ╠═935adbcd-48ab-4a6f-907c-b04137ca3abe
# ╟─8b0451bf-59b0-4e71-be84-549e23b5bfe7
# ╟─20c5c7bc-664f-4c04-8215-8f3a9a2095c9
# ╟─7f08fcaa-000d-422d-80b4-e58a2f489d74
|
{"hexsha": "f3ef373c4d4a14eb94095fe13a9bfcded869992b", "size": 23579, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "notebooks/generalized_linear_regression.jl", "max_stars_repo_name": "jbrea/MLCourse", "max_stars_repo_head_hexsha": "68b120f3be86a8f6140b272ceecc1d5746fabc6e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 27, "max_stars_repo_stars_event_min_datetime": "2021-09-02T07:40:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T01:08:27.000Z", "max_issues_repo_path": "notebooks/generalized_linear_regression.jl", "max_issues_repo_name": "davibarreira/MLCourse", "max_issues_repo_head_hexsha": "68b120f3be86a8f6140b272ceecc1d5746fabc6e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-10-04T12:54:30.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-04T12:54:30.000Z", "max_forks_repo_path": "notebooks/generalized_linear_regression.jl", "max_forks_repo_name": "davibarreira/MLCourse", "max_forks_repo_head_hexsha": "68b120f3be86a8f6140b272ceecc1d5746fabc6e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2021-09-29T22:20:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-03T20:49:25.000Z", "avg_line_length": 45.5193050193, "max_line_length": 861, "alphanum_fraction": 0.688366767, "num_tokens": 9194}
|
using InvariantMeasures
using ValidatedNumerics
using Plots
using LaTeXStrings
using StatsPlots
"""
Simple and quick experiment to precompile all needed functions before timing measurements
"""
D = Iterate(mod1_dynamic(x->2*x+0.5*x*(1-x)), 4)
B = Ulam(8)
Q = DiscretizedOperator(B, D)
norms = powernormbounds(B, D; Q=Q)
B_fine = Ulam(64)
Q_fine = DiscretizedOperator(B_fine, D)
norms_fine = finepowernormbounds(B, B_fine, D, norms; Q_fine=Q_fine)
w_fine = invariant_vector(B_fine, Q_fine)
error_fine = distance_from_invariant(B_fine, D, Q_fine, w_fine, norms_fine)
D = Mod1Dynamic(x -> 4*x + 0.01*InvariantMeasures.sinpi(8*x))
B = Hat(64)
Q = DiscretizedOperator(B, D)
norms = powernormbounds(B, D; Q=Q)
B_fine = Hat(128)
Q_fine = DiscretizedOperator(B_fine, D)
norms_fine = finepowernormbounds(B, B_fine, D, norms; Q_fine=Q_fine)
w_fine = invariant_vector(B_fine, Q_fine)
error_fine = distance_from_invariant(B_fine, D, Q_fine, w_fine, norms_fine)
|
{"hexsha": "50a517088aace949b8f79ff921dcacf702ef0282", "size": 959, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/warmup.jl", "max_stars_repo_name": "orkolorko/InvariantMeasures.jl", "max_stars_repo_head_hexsha": "2ade4708a5b61b3e5eca50d182736687d5a7a856", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-03-03T14:17:54.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-24T22:54:02.000Z", "max_issues_repo_path": "examples/warmup.jl", "max_issues_repo_name": "orkolorko/InvariantMeasures.jl", "max_issues_repo_head_hexsha": "2ade4708a5b61b3e5eca50d182736687d5a7a856", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-03-31T14:28:24.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-06T23:40:12.000Z", "max_forks_repo_path": "examples/warmup.jl", "max_forks_repo_name": "orkolorko/InvariantMeasures.jl", "max_forks_repo_head_hexsha": "2ade4708a5b61b3e5eca50d182736687d5a7a856", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.9189189189, "max_line_length": 89, "alphanum_fraction": 0.7580813347, "num_tokens": 332}
|
# -*- coding: utf-8 -*-
from essentia.streaming import *
import essentia.standard as es
import essentia
import librosa
import librosa.display
import numpy as np
def melspectrogram(audio, sampleRate=44100, frameSize=2048, hopSize=1024,
window='blackmanharris62', zeroPadding=0, center=True,
numberBands=[128, 96, 48, 32, 24, 16, 8],
lowFrequencyBound=0, highFrequencyBound=None,
weighting='linear', warpingFormula='slaneyMel', normalize='unit_tri'):
if highFrequencyBound is None:
highFrequencyBound = sampleRate/2
windowing = es.Windowing(type=window, normalized=False, zeroPadding=zeroPadding)
spectrum = es.Spectrum()
melbands = {}
for nBands in numberBands:
melbands[nBands] = es.MelBands(numberBands=nBands,
sampleRate=sampleRate,
lowFrequencyBound=lowFrequencyBound,
highFrequencyBound=highFrequencyBound,
inputSize=(frameSize+zeroPadding)//2+1,
weighting=weighting,
normalize=normalize,
warpingFormula=warpingFormula,
type='power')
norm10k = es.UnaryOperator(type='identity', shift=1, scale=10000)
log10 = es.UnaryOperator(type='log10')
amp2db = es.UnaryOperator(type='lin2db', scale=2)
results = essentia.Pool()
for frame in es.FrameGenerator(audio, frameSize=frameSize, hopSize=hopSize,
startFromZero=not center):
spectrumFrame = spectrum(windowing(frame))
for nBands in numberBands:
melFrame = melbands[nBands](spectrumFrame)
results.add('mel_' + str(nBands)+'_db', amp2db(melFrame))
results.add('mel_' + str(nBands)+'_log1+10kx', log10(norm10k(melFrame)))
return results
def cut_audio(filename, sampleRate=44100, segment_duration=None):
audio = es.MonoLoader(filename=filename, sampleRate=sampleRate)()
if segment_duration:
segment_duration = round(segment_duration*sampleRate)
segment_start = (len(audio) - segment_duration) // 2
segment_end = segment_start + segment_duration
else:
segment_start = 0
segment_end = len(audio)
if segment_start < 0 or segment_end > len(audio):
raise ValueError('Segment duration is larger than the input audio duration')
return audio[segment_start:segment_end]
def analyze_mel(filename, segment_duration=None, maxFrequency=11025, replaygain=True):
lowlevelFrameSize=2048
lowlevelHopSize=1024
# Compute replay gain and duration on the entire file, then load the
# segment that is centered in time with replaygain applied
audio = es.MonoLoader(filename=filename)()
if replaygain:
replaygain = es.ReplayGain()(audio)
else:
replaygain = -6 # Default replaygain value in EasyLoader
if segment_duration:
segment_start = (len(audio) / 44100 - segment_duration) / 2
segment_end = segment_start + segment_duration
else:
segment_start = 0
segment_end = len(audio)/44100
if segment_start < 0 or segment_end > len(audio)/44100:
raise ValueError('Segment duration is larger than the input audio duration')
loader_mel = EasyLoader(filename=filename, replayGain=replaygain,
startTime=segment_start, endTime=segment_end)
# Processing for Mel bands
framecutter_mel = FrameCutter(frameSize=lowlevelFrameSize,
hopSize=lowlevelHopSize)
window_mel = Windowing(type='blackmanharris62', zeroPadding=lowlevelFrameSize)
spectrum_mel = Spectrum()
melbands128 = MelBands(numberBands=128,
lowFrequencyBound=0,
highFrequencyBound=maxFrequency,
inputSize=lowlevelFrameSize+1)
melbands96 = MelBands(numberBands=96,
lowFrequencyBound=0,
highFrequencyBound=maxFrequency,
inputSize=lowlevelFrameSize+1)
melbands48 = MelBands(numberBands=48,
lowFrequencyBound=0,
highFrequencyBound=maxFrequency,
inputSize=lowlevelFrameSize+1)
melbands32 = MelBands(numberBands=32,
lowFrequencyBound=0,
highFrequencyBound=maxFrequency,
inputSize=lowlevelFrameSize+1)
melbands24 = MelBands(numberBands=24,
lowFrequencyBound=0,
highFrequencyBound=maxFrequency,
inputSize=lowlevelFrameSize+1)
melbands16 = MelBands(numberBands=16,
lowFrequencyBound=0,
highFrequencyBound=maxFrequency,
inputSize=lowlevelFrameSize+1)
melbands8 = MelBands(numberBands=8,
lowFrequencyBound=0,
highFrequencyBound=maxFrequency,
inputSize=lowlevelFrameSize+1)
# Normalize Mel bands: log10(1+x*10000)
norm128 = UnaryOperator(type='identity', shift=1, scale=10000)
log10128 = UnaryOperator(type='log10')
norm96 = UnaryOperator(type='identity', shift=1, scale=10000)
log1096 = UnaryOperator(type='log10')
norm48 = UnaryOperator(type='identity', shift=1, scale=10000)
log1048 = UnaryOperator(type='log10')
norm32 = UnaryOperator(type='identity', shift=1, scale=10000)
log1032 = UnaryOperator(type='log10')
norm24 = UnaryOperator(type='identity', shift=1, scale=10000)
log1024 = UnaryOperator(type='log10')
norm16 = UnaryOperator(type='identity', shift=1, scale=10000)
log1016 = UnaryOperator(type='log10')
norm8 = UnaryOperator(type='identity', shift=1, scale=10000)
log108 = UnaryOperator(type='log10')
p = essentia.Pool()
loader_mel.audio >> framecutter_mel.signal
framecutter_mel.frame >> window_mel.frame >> spectrum_mel.frame
spectrum_mel.spectrum >> melbands128.spectrum
spectrum_mel.spectrum >> melbands96.spectrum
spectrum_mel.spectrum >> melbands48.spectrum
spectrum_mel.spectrum >> melbands32.spectrum
spectrum_mel.spectrum >> melbands24.spectrum
spectrum_mel.spectrum >> melbands16.spectrum
spectrum_mel.spectrum >> melbands8.spectrum
melbands128.bands >> norm128.array >> log10128.array >> (p, 'mel128')
melbands96.bands >> norm96.array >> log1096.array >> (p, 'mel96')
melbands48.bands >> norm48.array >> log1048.array >> (p, 'mel48')
melbands32.bands >> norm32.array >> log1032.array >> (p, 'mel32')
melbands24.bands >> norm24.array >> log1024.array >> (p, 'mel24')
melbands16.bands >> norm16.array >> log1016.array >> (p, 'mel16')
melbands8.bands >> norm8.array >> log108.array >> (p, 'mel8')
essentia.run(loader_mel)
return p
def analyze(filename, segment_duration=20):
lowlevelFrameSize=2048
lowlevelHopSize=1024
tonalFrameSize=4096
tonalHopSize=1024
# Compute replay gain and duration on the entire file, then load the
# segment that is centered in time with replaygain applied
audio = es.MonoLoader(filename=filename)()
replaygain = es.ReplayGain()(audio)
segment_start = (len(audio) / 44100 - segment_duration) / 2
segment_end = segment_start + segment_duration
if segment_start < 0 or segment_end > len(audio)/44100:
raise ValueError('Segment duration is larger than the input audio duration')
# TODO
# There's a bug in streaming mode Python wrapper: running both Mel and HPCP
# in the same network with the same loader will result in a memory error.
# This does not happen in C++. As a workaround, compute Mel and HPCP in
# two separate networks with two separate loaders.
loader_mel = EasyLoader(filename=filename, replayGain=replaygain,
startTime=segment_start, endTime=segment_end)
loader_hpcp = EasyLoader(filename=filename, replayGain=replaygain,
startTime=segment_start, endTime=segment_end)
# Processing for Mel bands
framecutter_mel = FrameCutter(frameSize=lowlevelFrameSize,
hopSize=lowlevelHopSize)
window_mel = Windowing(type='blackmanharris62')
spectrum_mel = Spectrum()
melbands = MelBands(numberBands=96,
lowFrequencyBound=0,
highFrequencyBound=11025)
# Processing for HPCPs
framecutter_hpcp = FrameCutter(frameSize=tonalFrameSize,
hopSize=tonalHopSize)
window_hpcp = Windowing(type='blackmanharris62')
spectrum_hpcp = Spectrum()
speaks = SpectralPeaks(maxPeaks=60,
magnitudeThreshold=0.00001,
minFrequency=20.0,
maxFrequency=3500.0,
orderBy='magnitude')
# Normalize Mel bands: log10(1+x*10000)
norm = UnaryOperator(type='identity', shift=1, scale=10000)
log10 = UnaryOperator(type='log10')
hpcp = HPCP(size=12,
bandPreset=False,
minFrequency=20.0,
maxFrequency=3500.0,
weightType='cosine',
windowSize=1.)
p = essentia.Pool()
loader_mel.audio >> framecutter_mel.signal
framecutter_mel.frame >> window_mel.frame >> spectrum_mel.frame
spectrum_mel.spectrum >> melbands.spectrum
melbands.bands >> norm.array >> log10.array >> (p, 'melbands')
essentia.run(loader_mel)
loader_hpcp.audio >> framecutter_hpcp.signal
framecutter_hpcp.frame >> window_hpcp.frame >> spectrum_hpcp.frame
spectrum_hpcp.spectrum >> speaks.spectrum
speaks.frequencies >> hpcp.frequencies
speaks.magnitudes >> hpcp.magnitudes
hpcp.hpcp >> (p, 'hpcp')
essentia.run(loader_hpcp)
return p
def analyze_misc(filename, segment_duration=20):
# Compute replay gain and duration on the entire file, then load the
# segment that is centered in time with replaygain applied
audio = es.MonoLoader(filename=filename)()
replaygain = es.ReplayGain()(audio)
segment_start = (len(audio) / 44100 - segment_duration) / 2
segment_end = segment_start + segment_duration
if segment_start < 0 or segment_end > len(audio)/44100:
raise ValueError('Segment duration is larger than the input audio duration')
loader = es.EasyLoader(filename=filename, replayGain=replaygain,
startTime=segment_start, endTime=segment_end)
windowing = es.Windowing(type='blackmanharris62')
spectrum = es.Spectrum()
powerspectrum = es.PowerSpectrum()
centroid = es.Centroid()
zcr = es.ZeroCrossingRate()
rms = es.RMS()
hfc = es.HFC()
pool = essentia.Pool()
audio = loader()
for frame in es.FrameGenerator(audio, frameSize=2048, hopSize=1024):
frame_spectrum = spectrum(windowing(frame))
pool.add('rms', rms(frame))
pool.add('rms_spectrum', rms(frame_spectrum))
pool.add('hfc', hfc(frame_spectrum))
pool.add('spectral_centroid', centroid(frame_spectrum))
pool.add('zcr', zcr(frame))
audio_st, sr, _, _, _, _ = es.AudioLoader(filename=filename)()
# Ugly hack because we don't have a StereoResample
left, right = es.StereoDemuxer()(audio_st)
resampler = es.Resample(inputSampleRate=sr, outputSampleRate=44100)
left = resampler(left)
right = resampler(right)
audio_st = es.StereoMuxer()(left, right)
audio_st = es.StereoTrimmer(startTime=segment_start, endTime=segment_end)(audio_st)
ebu_momentary, _, _, _ = es.LoudnessEBUR128(hopSize=1024/44100, startAtZero=True)(audio_st)
pool.set('ebu_momentary', ebu_momentary)
return pool
def analyze_hp(filename, segment_duration=20):
lowlevelFrameSize=2048
lowlevelHopSize=1024
tonalFrameSize=4096
tonalHopSize=1024
# Compute replay gain and duration on the entire file, then load the
# segment that is centered in time with replaygain applied
audio = es.MonoLoader(filename=filename)()
replaygain = es.ReplayGain()(audio)
segment_start = (len(audio) / 44100 - segment_duration) / 2
segment_end = segment_start + segment_duration
if segment_start < 0 or segment_end > len(audio)/44100:
raise ValueError('Segment duration is larger than the input audio duration')
loader = es.EasyLoader(filename=filename, replayGain=replaygain,
startTime=segment_start, endTime=segment_end)
window = es.Windowing(type='blackmanharris62')
fft = es.FFT()
stft = []
audio = loader()
for frame in es.FrameGenerator(audio, frameSize=lowlevelFrameSize, hopSize=lowlevelHopSize):
stft.append(fft(window(frame)))
# Librosa requires bins x frames format
stft = np.array(stft).T
D_harmonic, D_percussive = librosa.decompose.hpss(stft, margin=8)
D_percussive_magnitude, _ = librosa.magphase(D_percussive)
D_harmonic_magnitude, _ = librosa.magphase(D_harmonic)
# Convert back to Essentia format (frames x bins)
spectrum_harmonic = D_harmonic_magnitude.T
specturm_percussive = D_percussive_magnitude.T
# Processing for Mel bands
melbands = es.MelBands(numberBands=96,
lowFrequencyBound=0,
highFrequencyBound=11025)
# Normalize Mel bands: log10(1+x*10000)
norm = es.UnaryOperator(type='identity', shift=1, scale=10000)
log10 = es.UnaryOperator(type='log10')
p = essentia.Pool()
for spectrum_frame in spectrum_harmonic:
p.add('melbands_harmonic', log10(norm(melbands(spectrum_frame))))
for spectrum_frame in specturm_percussive:
p.add('melbands_percussive', log10(norm(melbands(spectrum_frame))))
return p
|
{"hexsha": "d3a5ca16baf0449c36760b536b9378d2dd5ad405", "size": 14101, "ext": "py", "lang": "Python", "max_stars_repo_path": "audio_extract.py", "max_stars_repo_name": "andrebola/icassp2021", "max_stars_repo_head_hexsha": "bc551a5453f17728dacafdc74971337d4ee92e44", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-02-03T07:03:49.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-04T06:56:30.000Z", "max_issues_repo_path": "audio_extract.py", "max_issues_repo_name": "andrebola/icassp2021", "max_issues_repo_head_hexsha": "bc551a5453f17728dacafdc74971337d4ee92e44", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "audio_extract.py", "max_forks_repo_name": "andrebola/icassp2021", "max_forks_repo_head_hexsha": "bc551a5453f17728dacafdc74971337d4ee92e44", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.5026595745, "max_line_length": 96, "alphanum_fraction": 0.6501666548, "include": true, "reason": "import numpy", "num_tokens": 3477}
|
<figure>
<IMG SRC="gfx/Logo_norsk_pos.png" WIDTH=100 ALIGN="right">
</figure>
# Particle in a box
*Roberto Di Remigio*, *Luca Frediani*
The [particle in a box] model is among the simplest, exactly solvable models in quantum mechanics.
In the one-dimensional case, we assume a particle of mass $m$ to be confined into a box of length $L$.
The confinement is achieved by means of a potential energy operator that is zero inside the box and infinite outside, as in the Figure below.
<figure>
<IMG SRC="gfx/Infinite_potential_well-en.svg">
</figure>
In practice, this means that the particle **cannot** escape the box: a result that we would have obtained also
from classical mechanics.
How does the quantum particle behave? We need to find the **eigenfunctions** and **eigenvalues** of the **Hamiltonian operator**, that is we have to solve the following ordinary differential equation:
\begin{equation}
-\frac{\hbar^2}{2m}\frac{\mathrm{d}^2}{\mathrm{d}x^2} \psi_n(x) = E_n\psi_n(x)
\end{equation}
with **boundary conditions**:
\begin{equation}
\begin{aligned}
\psi_n(0) &= 0 \\
\psi_n(L) &= 0
\end{aligned}
\end{equation}
Thus acceptable solutions are of the form:
\begin{equation}
\psi_n(x) = \sin(\frac{n\pi x}{L}) \quad\quad \forall n \neq 0
\end{equation}
with energies:
\begin{equation}
E_n = \frac{h^2n^2}{8mL^2} \quad\quad \forall n \neq 0
\end{equation}
[particle in a box]: https://en.wikipedia.org/wiki/Particle_in_a_box
## Exercise 1: Normalization
The wavefunction(s) given above as solution to the particle in a box problem are not normalized:
\begin{equation}
\left\langle \psi_n | \psi_n \right\rangle = \int \mathrm{d}x \psi_n^*(x)\psi_n(x) = |A|^2 \neq 1
\end{equation}
Find the normalization constant.
## Exercise 2: Ground-state and probabilities
Given the **normalized** ground-state wavefunction for a particle confined to a one-dimensional box of length $L$, suppose the box is $10.0\,\,\mathrm{nm}$ long. What is the probability that the particle is:
1. Between $a = 4.95\,\,\mathrm{nm}$ and $b=5.05\,\,\mathrm{nm}$
2. Between $a = 1.95\,\,\mathrm{nm}$ and $b=2.05\,\,\mathrm{nm}$
3. Between $a = 9.90\,\,\mathrm{nm}$ and $b=10.0\,\,\mathrm{nm}$
4. In the right half of the box
5. In the central third of the box
How can we generalize to the excited states?
## Exercise 3: Eigenfunctions and probability distributions
Write a Python function to plot the eigenfunctions of the particle in a box model. The function should accept the quantum number $n$, the length of the box $L$ and a NumPy array of $x$ values as arguments:
```Python
def eigenfunction1D(n, L, x):
""" Normalized eigenfunction for the 1D particle in a box.
n -- the quantum number
L -- the size of the box
x -- the NumPy array with the x values
"""
```
Once this function is defined, we can obtain the respective probability distribution by taking its square:
```Python
x = np.linspace(0, 10.0, 1000)
eig = eigenfunction1D(1, 10.0, x)
prob = eigenfunction1D(1, 10.0, x)**2
```
and plot both of them with:
```Python
plt.plot(x, eig)
plt.plot(x, prob)
```
```python
import numpy as np
import matplotlib.pyplot as plt
# make sure we see it on this notebook
%matplotlib inline
def eigenfunction1D(n, L, x):
""" Normalized eigenfunction for the 1D particle in a box.
n -- the quantum number
L -- the size of the box
x -- the NumPy array with the x values
"""
raise NotImplementedError('You need to write this function!')
x = np.linspace(0, 10.0, 1000)
eig = eigenfunction1D(1, 10.0, x)
prob = eigenfunction1D(1, 10.0, x)**2
plt.plot(x, eig)
plt.plot(x, prob)
```
## Exercise 4: Normalization of linear combinations of eigenfunctions
A linear combination is defined as:
\begin{equation}
\Psi(x) = \sum_{i=0}^{N} c_i \psi_i(x)
\end{equation}
where $c_i$ are the coefficients and $\psi_i(x)$ are the eigenfunctions of the particle in a box.
Is $\Psi(x)$ normalized? If not, how can we normalize it? Write a Python function that returns the normalization constant given a vector of coefficients $c_i$ in the linear combination.
```Python
def normalize(coeffs):
""" Normalization constant for a linear combination of 1D particle in a box eigenfunctions
coeffs -- a NumPy array with the coefficients
"""
```
```python
import numpy as np
import matplotlib.pyplot as plt
# make sure we see it on this notebook
%matplotlib inline
def normalize(coeffs):
""" Normalization constant for a linear combination of 1D particle in a box eigenfunctions
coeffs -- a NumPy array with the coefficients
"""
raise NotImplementedError('You need to write this function!')
coeffs = np.array([0.5, 0.5])
normalize(coeffs)
```
## Exercise 5: Energy of linear combinations
The **expectation value** of an operator $O$ on the wavefunction $\Psi$ is defined as:
\begin{equation}
\bar{O} = \frac{\langle\Psi|O|\Psi\rangle}{\langle\Psi|\Psi\rangle} = \langle\Psi|O|\Psi\rangle
\end{equation}
where the last equality holds only if $\Psi$ is normalized.
The energy is the expectation value of the Hamiltonian operator:
\begin{equation}
\bar{H} = E = \langle\Psi|H|\Psi\rangle
\end{equation}
Given $\Psi$ a linear combination of particle in a box eigenfunctions, calculate its respective energy.
**Hint**: remember that $H\psi_i(x) = E_i\psi_i(x)$ and that the $\psi_i(x)$ are normalizd. You can also assume $\Psi$ to be already normalized.
Write a Python function to calculate the energy of such linear combinations. The function should accept the coefficients $c_i$ of the linear combination, the mass $M$ of the particle and the length $L$ of the box as arguments.
```Python
def energy(coeffs, M, L):
""" Return energy of a linear combination of 1D particle in a box eigenfunctions.
coeffs -- the coefficients of the linear combination
M -- the mass of the particle
L -- the size of the box
"""
```
```python
def energy(coeffs, M, L):
""" Return energy of a linear combination of 1D particle in a box eigenfunctions.
coeffs -- the coefficients of the linear combination
M -- the mass of the particle
L -- the size of the box
"""
raise NotImplementedError('You have to write this function')
coeffs = np.array([0.5, 0.5])
energy(coeffs, 1.0, 10.0)
```
## Exercise 6: Linear combinations of eigenfunctions
We can now try to plot linear combinations of eigenfuntions and their corresponding probability densities.
Define the appropriate function to do so. Your function should use your previous implementation of `eigenfunction1D` to achieve its purpose.
|
{"hexsha": "2dc3c10197ca5658e17f06f8b7dba3672de433ae", "size": 16274, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "particle_in_a_box.ipynb", "max_stars_repo_name": "ilfreddy/KJE1005", "max_stars_repo_head_hexsha": "be919140e830ed741b04e982183fdd2f14cc8b77", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "particle_in_a_box.ipynb", "max_issues_repo_name": "ilfreddy/KJE1005", "max_issues_repo_head_hexsha": "be919140e830ed741b04e982183fdd2f14cc8b77", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "particle_in_a_box.ipynb", "max_forks_repo_name": "ilfreddy/KJE1005", "max_forks_repo_head_hexsha": "be919140e830ed741b04e982183fdd2f14cc8b77", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 52.6666666667, "max_line_length": 1214, "alphanum_fraction": 0.614907214, "converted": true, "num_tokens": 1823}
|
from math import pi
import numpy as np
## 3.2 Tension Members
def sec3_2(An, FY):
'''Design Tensile Strength. Eq 3.2-1.
Parameters
----------
An: float,
net area de la seccion.
Fy: float,
tension de fluencia segun Tabla A1 - ASCE 8.
Returns
-------
fiTn: float,
resistencia de diseno a la tension.
midC: diccionario,
valores de fi y Tn.
Raises
------
none
Tests
-----
>>> fiTn, _ = sec3_2(An=1.551, FY=50)
>>> round(fiTn, 2)
65.92
'''
fi = 0.85
Tn = An*FY
midC = {'Tension_Tn': Tn, 'Tension_fi': fi}
return Tn*fi, midC
## 3.3 Flexural Memebers
## 3.3.1 Strength for Bending Only
def sec3_3_1_1(FY, Se, procedure = 'PI', comp_flange = 'UNSTIFF', localDistorsion= False):
'''Strength for Bending Only. Nominal Section Strength.
Parameters
----------
Fy: float,
tension de fluencia segun Tabla A1 - ASCE 8.
Se: float,
modulo de seccion elastico efectivo, calculado con la fibra extrema en compresion con f=Fyc o f=Fyc, la que plastifique primero.
procedure: string,
especifica el procedimiento a implementar (Opciones: PI - PII - LD).
comp_flange: string;
determina si las alas en compresion estan rigidizadas o no.
Returns
-------
Mn: float,
resistencia de diseno a la flexion nominal de la seccion.
midC: diccionario,
calculos intermedios y parametros.
Raises
------
none
Tests
-----
Example 8.1 - I-section w/unstiffened flanges
>>> fiMn, _ = sec3_3_1_1(FY=50, Se=1.422, procedure = 'PI', comp_flange = 'UNSTIFF')
>>> round(fiMn, 2)
60.43
'''
midC={}
if comp_flange == 'UNSTIFF': # Unstiffened compresion flanges
fi = 0.85
elif comp_flange == 'STIFF': # Stiffened or partially stiffened flanges
fi = 0.90
if procedure == 'PI': # Procedimiento I - basado en fluencia
Mn = E_3_3_1_1_e1(Se=Se, FY=FY)
elif procedure == 'PII': # Procedimiento II - basado en endurecimiento
print('Seccion 3.3.1.1 - Procedimiento II No implementada.')
raise NotImplementedError
if localDistorsion: # Local Distorsion Considerations
Mld, midC = LocalDistorsion()
midC['Mld']= Mld
midC.update({'Mn_no': Mn, 'fi_no': fi})
fiMn = fi*Mn
return fiMn, midC
def LocalDistorsion():
'''Nominal Section Strength. Local Distorsion Consideration.
Parameters
----------
none
Returns
-------
none
Raises
------
none
Tests
-----
>>>
'''
raise NotImplementedError
def sec3_3_1_2_eta(prof_type, E0, d, Iyc, L, rx, ry, c_x, sc_x, A, Lx, Kx, Ly, Ky, Lz, Kz, Cw, G0, J, j, Cb):
'''Strength for Bending Only. Design Lateral Buckling Strength.
Parameters
----------
prof_type: string,
seccion del miembro.
Cb: float,
coeficiente de flexion.
E0: float,
modulo de elasticidad inicial.
d: float,
altura total de la seccion.
Iyc: float,
momento de inercia de la porcion de la seccion en compresion con respecto al eje vertical.
L: float,
longitud del miembro sin soporte.
rx, ry: float,
radios de giro.
c_x: float
coordenada x del centroide de la seccion.
sc_x,: float
coordenada x del centro de corte.
A: float,
area de la seccion.
Lx, Kx: float,
longitud efectiva para miembros en compresion sometidos a flexion en x.
Ly, Ky: float,
longitud efectiva para miembros en compresion sometidos a flexion en y.
Lz, Kz: float,
longitud efectiva para miembros en compresion sometidos a torsion.
Cw: float,
constante de warping
G0: float,
modulo de elasticida de corte inicial.
J: float,
constante de St Venant.
j: float
mitad de la constante monociclica a compresion en eje -y- (beta22-)
Returns
-------
Mc_eta: float,
momento critico dividido por eta (para iterar).
Raises
------
none
Tests
-----
Example 8.1 - I-section w/unstiffened flanges
>>> round(sec3_3_1_2_eta(prof_type='I_builtup_cee', E0=27000, d=6.0, Iyc=0.172, L=4*12, rx=2.224, ry=0.564, c_x=0, sc_x=0, A=1.083, Lx=4*12, Kx=1.0, Ly=4*12, Ky=1.0, Lz=4*12, Kz=1.0, Cw=3.00, G0=10384.61, J=0.0037, j=0.0, Cb=1.75), 2)
208.88
Example 9.1 - C-section w/lateral buckling consideration
En realidad el valor de ref es 326.54 pero sigma_ey en ref es 47.14 y en calculos es 47.37 (leve error en ref)
>>> round(sec3_3_1_2_eta(prof_type='cee', Cb=1.685, E0=27000, d=7.0, Iyc=0.204/2, L=2.5*12, rx=2.47, ry=0.40, c_x=0.217, sc_x=-0.417, A=1.284, Lx=2.5*12, Kx=1.0, Ly=2.5*12, Ky=1.0, Lz=2.5*12, Kz=1.0, Cw=1.819, G0=10500, J=0.0078, j=0), 2)
327.35
'''
if prof_type in ['I_builtup_cee', 'I_builtup_cee_w_lps']: # perfil I - aplica CASE I
Mc_eta = E_3_3_1_2_e2(E0=E0, Cb=Cb, d=d, Iyc=Iyc, L=L)
elif prof_type in ['cee', 'c_w_lps']: # perfil C - aplica CASE III
# implemento solo flexion alrededor del eje de simetria (tambien hay que ver como va disernir entre un caso y otro)
# Mc_eta = Mc/eta
Mc_eta = sec3_3_1_2_3_i(Cb=Cb, rx=rx, ry=ry, c_x=c_x, sc_x=sc_x, E0=E0, A=A, Ly=Ly, Ky=Ky, Lz=Lz, Kz=Kz, Cw=Cw, G0=G0, J=J)
# Mc_eta = sec3_3_1_2_3_ii(Cb=Cb, rx=rx, ry=ry, c_x=c_x, sc_x=sc_x, E0=E0, A=A, Lx=Lx, Kx=Kx, Lz=Lz, Kz=Kz, Cw=Cw, G0=G0, J=J, j=j)
else:
print('Seccion del tipo', prof_type,'no implementada en analisis 3.3.1.2.')
raise NotImplementedError
return Mc_eta
def sec3_3_1_2_3_i(Cb, rx, ry, c_x, sc_x, E0, A, Ly, Ky, Lz, Kz, Cw, G0, J):
'''Lateral Buckling Strength. Singly symmetric sections bent about the axis of symmetry.
Parameters
----------
Cb: float,
coeficiente de flexion.
rx, ry: float,
radios de giro.
c_x: float
coordenada x del centroide de la seccion.
sc_x,: float
coordenada x del centro de corte.
E0: float,
modulo de elasticidad inicial.
A: float,
area de la seccion.
Ly, Ky: float,
longitud efectiva para miembros en compresion sometidos a flexion en y.
Lz, Kz: float,
longitud efectiva para miembros en compresion sometidos a torsion.
Cw: float,
constante de warping
G0: float,
modulo de elasticida de corte inicial.
J: float,
constante de St Venant.
Returns
-------
Mc_eta: float,
momento critico dividido por eta (para iterar).
Raises
------
none
Tests
-----
Example 9.1 - C-section w/lateral buckling consideration
En realidad el valor de ref es 326.54 pero sigma_ey en ref es 47.14 y en calculos es 47.37 (leve error en ref)
>>> round(sec3_3_1_2_3_i(Cb=1.685, rx=2.47, ry=0.40, c_x=0.217, sc_x=-0.417, E0=27000, A=1.284, Ly=2.5*12, Ky=1.0, Lz=2.5*12, Kz=1.0, Cw=1.819, G0=10500, J=0.0078), 2)
327.35
'''
# parametros para calculo r0
x0 = -abs(c_x-sc_x)
r0 = E_3_3_1_2_e9(rx=rx, ry=ry, x0=x0)
sigma_ey_eta = E_3_3_1_2_e6(E0=E0, K=Ky, L=Ly, r=ry)
sigma_t_eta = E_3_3_1_2_e8(E0=E0, Kt=Kz, Lt=Lz, r0=r0, A=A, Cw=Cw, G0=G0, J=J)
Mc_eta = E_3_3_1_2_e4(Cb=Cb, r0=r0, A=A, sigma_ey_eta=sigma_ey_eta, sigma_t_eta=sigma_t_eta)
return Mc_eta
def sec3_3_1_2_3_ii(Cb, Cs, rx, ry, c_x, sc_x, E0, A, Lx, Kx, Lz, Kz, Cw, G0, J, j):
'''Lateral Buckling Strength. Singly symmetric sections bent about the axis perpendicular to the axis of symmetry.
Parameters
----------
Cb: float,
coeficiente de flexion.
Cs: float,
+1 si hay compresion en el lado del centro de corte, sino -1.
rx, ry: float,
radios de giro.
c_x: float
coordenada x del centroide de la seccion.
sc_x,: float
coordenada x del centro de corte.
E0: float,
modulo de elasticidad inicial.
A: float,
area de la seccion.
Lx, Kx: float,
longitud efectiva para miembros en compresion sometidos a flexion en x.
Lz, Kz: float,
longitud efectiva para miembros en compresion sometidos a torsion.
Cw: float,
constante de warping
G0: float,
modulo de elasticida de corte inicial.
J: float,
constante de St Venant.
j: float,
mitad de la constante monociclica a compresion en eje -y- (beta22-)
Returns
-------
Mc_eta: float,
resistencia nominal al LB de la seccion.
Raises
------
none
Tests
-----
>>>
'''
'''
# parametros para calculo r0
x0 = -abs(c_x-sc_x)
r0 = E_3_3_1_2_e9(rx=rx, ry=ry, x0=x0)
sigma_ex_eta = E_3_3_1_2_e6(E0=E0, K=Kx, L=Lx, r=rx)
sigma_t_eta = E_3_3_1_2_e8(E0=E0, Kt=Kz, Lt=Lz, r0=r0, A=A, Cw=Cw, G0=G0, J=J)
Mc_eta = E_3_3_1_2_e5(Cb, Cs, r0, A, sigma_ex_eta, sigma_t_eta, j)
return Mc_eta'''
raise NotImplementedError
def E_3_3_1_1_e1(Se, FY):
'''Nominal Section Strength. Procedure I. Based on Initiation of Yielding.
Parameters
----------
Se: float,
modulo de seccion elastico efectivo, calculado con la fibra extrema en compresion con f=Fyc o f=Fyc, la que plastifique primero.
FY: float,
tension de fluencia segun Tabla A1 - ASCE 8.
Returns
-------
Mn: float,
resistencia de diseno a la flexion.
Raises
------
none
Tests
-----
>>> round(E_3_3_1_1_e1(Se=2.239, FY=50), 2)
111.95
'''
Mn = Se*FY
return Mn
def E_3_3_1_2_e1(Sc, Mc, Sf):
'''Lateral Buckling Strength. Strength of laterally unbraced segments.
Parameters
----------
Sc: float,
modulo de seccion elastico efectivo calculado a una tension Mc/Sf.
Mc: float,
momento critico.
Sf: float,
modulo de seccion elastico.
Returns
-------
fiMn: float,
resistencia de diseño nominal al Lateral Buckling.
[LB_fi, LB_Mn, Mc, eta]: list of float,
LB_fi: factor de diseno.
Lb_Mn: resistencia nominal al Lateral Buckling.
Raises
------
none
Tests
-----
>>> fiMn, _ = E_3_3_1_2_e1(Sc=2.239, Mc=42.12*2.239, Sf=2.239)
>>> round(fiMn, 2)
80.16
'''
fi = 0.85
Mn = Sc*(Mc/Sf)
fiMn = fi*Mn
midC = {'Mn_LB': Mn, 'fi_LB': fi}
return fiMn, midC
def E_3_3_1_2_e2(E0, Cb, d, Iyc, L):
'''Lateral Buckling Strength. CASE I: doubly symmetric I-sections bent about their minor axis.
Parameters
----------
E0: float,
odulo de elasticidad inicial.
Cb: float,
coeficiente de flexion.
d: float,
altura de la seccion.
Iyc: float,
momento de inercia de la porcion de la seccion en compresion con respecto al eje vertical.
L: float,
longitud del miembro sin soporte lateral.
Returns
-------
Mc_eta: float,
momento critico dividido por eta (para iterar).
Raises
------
none
Tests
-----
Ejemplo 8.1 - Eq 3.3.1.2-2 dividido por eta
>>> round(E_3_3_1_2_e2(E0=27000, Cb=1.75, d=6, Iyc=0.172, L=4*12), 2)
208.88
'''
Mc_eta = pi**2*E0*Cb*(d*Iyc/L**2)
return Mc_eta
def E_3_3_1_2_e4(Cb, r0, A, sigma_ey_eta, sigma_t_eta):
'''Lateral Buckling Strength. CASE III: singly symmetric sections bent about their minor axis (symm axis).
Parameters
----------
Cb: float,
coeficiente de flexion.
r0: float,
radio polar de la seccion con respecto al centro de corte.
A: float,
full area.
sigma_ey: float,
tension critica de pandeo con respecto al eje y (eje mayor).
sigma_t: float,
tension critica de pandeo torsional.
Returns
-------
Mc_eta: float,
momento critico dividido por eta (para iterar).
Raises
------
none
Tests
-----
>>> round(E_3_3_1_2_e4(Cb=1.685, r0=2.581, A=1.284, sigma_ey_eta=47.14, sigma_t_eta=72.54), 2)
326.54
'''
Mc_eta = Cb*r0*A*(sigma_ey_eta*sigma_t_eta)**0.5
return Mc_eta
def E_3_3_1_2_e5(Cb, Cs, r0, A, sigma_ex_eta, sigma_t_eta, j):
'''Lateral Buckling Strength. CASE III: singly symmetric sections bent about their mayor axis (perp symm axis).
Parameters
----------
Cb: float,
coeficiente de flexion.
Cs: float,
igual a +1 si se produce compresion en el alma, de lo contrario igual a -1.
r0: float,
radio polar de la seccion con respecto al centro de corte.
A: float,
full area.
sigma_ex_eta: float,
tension critica de pandeo con respecto al eje x (eje menor) dividida por eta.
sigma_t_eta: float,
tension critica de pandeo torsional dividida por eta.
j: float,
mitad de la constante monociclica a compresion en eje -y- (beta22-)
Returns
-------
Mc_eta: float,
momento critico dividido por eta (para iterar).
Raises
------
none
Tests
-----
>>>
'''
Mc_eta = Cs*Cb*A*sigma_ex_eta*(j + Cs*(j**2 + r0**2*sigma_t_eta/sigma_ex_eta)**0.5)
return Mc_eta
def E_3_3_1_2_e6(E0, K, L, r):
'''Lateral Buckling Strength. Tension critica de pandeo.
Parameters
----------
E0: float,
modulo de elasticidad inicial.
K*L: float,
longitud efectiva.
r: float,
radio de giro.
Returns
-------
sigma_eta: float,
tension critica de pandeo dividida por eta (para iterar).
Raises
------
none
Tests
-----
>>> round (E_3_3_1_2_e6(E0=27000, K=1.0, L=2.5*12, r=0.40), 2)
47.37
'''
sigma_eta = pi**2*E0/(K*L/r)**2
return sigma_eta
def E_3_3_1_2_e8(E0, Kt, Lt, r0, A, Cw, G0, J):
'''Tension critica de pandeo torsional. s_t en Eq 3.3.1.2-6.
Ver parametros de seccion en https://sectionproperties.readthedocs.io/en/latest/rst/post.html.
Parameters
----------
E0 : float,
Modulo elasticidad.
Kt : float,
Factor de longitud efectiva a torsion.
Lt : float,
longitud del miembro a torsion.
r0: float,
radio polar.
A : float,
Area de la seccion.
Cw : float,
Constante torsional de warping de la seccion.
G0 : float,
Modulo de corte inicial.
J : float,
Constante de torsion de St. Venant.
Returns
-------
s_t_eta : float
Tension critica de pandeo torsional dividida por eta (para iterar).
Tests
-----
>>> round (E_3_3_1_2_e8(E0=27000, Kt=1.0, Lt=2.5*12, r0=2.581, A=1.284, Cw=1.819, G0=10500, J=0.0078), 2)
72.54
'''
t1 = 1/A/r0/r0
t2_den = (Kt*Lt)**2
t2 = G0*J + pi**2*E0*Cw/t2_den
s_t_eta = t1*t2
return s_t_eta
def E_3_3_1_2_e9(rx, ry, x0):
'''Lateral Buckling Strength. Radio polar de la seccion.
Parameters
----------
rx: float,
radio de giro con respecto al eje x (eje menor).
ry: float,
radio de giro con respecto al eje y (eje mayor).
x0 : float
distancia entre el centro de corte y centroide
Returns
-------
r0: float,
radio polar de la seccion con respecto al centro de corte.
Raises
------
none
Tests
-----
>>> round( E_3_3_1_2_e9(rx=40.272, ry=18.2673, x0=38.69), 2)
58.76
'''
r0 = (rx**2 + ry**2 + x0**2)**0.5
return r0
## 3.3.2 Strength for Shear Only
def E_3_3_2_e1(E0, t, h, eta= 1.0):
'''Strength for Shear Only. Resistencia nominal al corte dividida por eta_shear. Ecuation (3.3.2-1)
Parameters
----------
E0: float,
modulo de elasticidad inicial.
t: float,
espesor de la seccion.
h: float,
altura de la seccion.
eta : float
Factor de reduccion plastica, ratio Gs/G0
Returns
-------
Vn: float,
resistencia nominal al corte
fiVn : float
resistencia de diseño al corte
Raises
------
none
Tests
-----
Example 9.1 - C-profile with LB consideration
>>> Vn, fiVn = E_3_3_2_e1(E0=27e3, t=0.135, h=6.354)
>>> print(round(Vn, 2),'|', round(fiVn, 2))
50.6 | 43.01
'''
fi = 0.85
Vn = 4.84*E0*t**3/h*eta
fiVn = fi*Vn
return Vn, fiVn
## 3.3.3 Strength for Combined Bending and Shear
def E_3_3_3_e1(fiMn, fiVn, Mu, Vu):
'''Ecuacion de interaccion flexion-corte. Ecuacion 3.3.3-1.
Parameters
----------
fiMn: float,
resistencia de diseno a la flexion.
fiVn: float,
resistencia de diseno al corte.
Mu: float,
resistencia requerida a la flexion.
Vu: float,
resistencia requerida al corte.
Returns
-------
ratio: float,
ratio entre las resistencias requeridas y las correspondientes de diseno.
Tests
-----
Example 9.1 - C-profile with LB consideration
>>> round(E_3_3_3_e1(fiMn=80.16, fiVn=27.88, Mu=44.16, Vu=2.21), 2)
0.31
'''
comb = (Mu/fiMn)**2 + (Vu/fiVn)**2
limit = 1.0
ratio = comb/limit
return ratio
def E_3_3_3_e2(fiMn, fiVn, Mu, Vu):
'''Ecuacion de interaccion flexion-corte. Ecuacion 3.3.3-2.
Parameters
----------
fiMn: float,
resistencia de diseno a la flexion.
fiVn: float,
resistencia de diseno al corte.
Mu: float,
resistencia requerida a la flexion.
Vu: float,
resistencia requerida al corte.
Returns
-------
ratio: float,
ratio entre las resistencias requeridas y las correspondientes de diseno.
Tests
-----
none
'''
comb = (Mu/fiMn)*0.6 + (Vu/fiVn)
limit = 1.3
ratio = comb/limit
return ratio
"""
## 3.3.4 Web Crippling Strength
def sec3_3_4(member):
'''Web Crippling Strength.
Parameters
----------
Returns
-------
Raises
------
none
Tests
-----
>>>
'''
steel = member.steel
profile = member.profile
# Parametros
fi = 0.70
FY = steel.FY
h = profile.H - 2*profile.r_out
t = profile.t
ct = Ct(units='US')
k = FY/33/ct
m = E_3_3_4_e22(t= t, units='US')
# N =
R = profile.r_out - t
# theta =
# Calculo de coeficientes
C1 = E_3_3_4_e10(FY=FY, Ct=Ct, k=k)
C2 = E_3_3_4_e11(R=R, t=t)
C3 = E_3_3_4_e12(FY=FY, Ct=Ct, k=k)
C4 = E_3_3_4_e13(R=R, t=t)
C5 = E_3_3_4_e14(k=k)
C6 = E_3_3_4_e15(h=h, t=t)
C7 = E_3_3_4_e17(h=h, t=t, k=k)
C8 = E_3_3_4_e19(h=h, t=t, k=k)
C_theta = E_3_3_4_e20(theta=theta)
## Shapes Having Single Webs
# Stiffened or Partially Stiffened Flenges
# End Reaction
E_3_3_4_e1(t, C3, C4, Ctheta, h, N, Ct)
# Interior Reaction
E_3_3_4_e4(t, C1, C2, Ctheta, h, N, Ct)
# Unstiffened Flanges
# End Reaction
E_3_3_4_e2(t, C3, C4, Ctheta, h, N, Ct)
# Interior Reaction
E_3_3_4_e4(t, C1, C2, Ctheta, h, N, Ct)
## I-sections or Similar Sections
# Stiffened, Partially Stiffened and Unstiffened Flanges
# End Reaction
E_3_3_4_e3(N, t, FY, C6)
# Interior Reaction
E_3_3_4_e5(N, t, FY, C5, m)
def E_3_3_4_e1(t, C3, C4, Ctheta, h, N, Ct):
'''Ecuacion 3.3.4-1.
Parameters
----------
Returns
-------
Tests
-----
none
'''
f1 = t**2*C3*C4*Ctheta
f2 = 331 - 0.61*h/t
f3 = 1 + 0.01*N/t
return f1*f2*f3*Ct
def E_3_3_4_e2(t, C3, C4, Ctheta, h, N, Ct):
'''Ecuacion 3.3.4-2.
Parameters
----------
Returns
-------
Tests
-----
none
'''
f1 = t**2*C3*C4*Ctheta
f2 = 217 - 0.28*h/t
f3 = 1 + 0.01*N/t
return f1*f2*f3*Ct
def E_3_3_4_e3(N, t, FY, C6):
'''Ecuacion 3.3.4-3.
Parameters
----------
Returns
-------
Tests
-----
none
'''
f1 = 0.71 + 0.015*N/t
f2 = t**2*FY*C6
f3 = 10 + 1.25*(N/t)**0.5
return f1*f2*f3
def E_3_3_4_e4(t, C1, C2, Ctheta, h, N, Ct):
'''Ecuacion 3.3.4-4.
Parameters
----------
Returns
-------
Tests
-----
none
'''
f1 = t**2*C1*C2*Ctheta
f2 = 538 - 0.74*h/t
f3 = 1 + 0.007*N/t
return f1*f2*f3*Ct
def E_3_3_4_e5(N, t, FY, C5, m):
'''Ecuacion 3.3.4-5.
Parameters
----------
Returns
-------
Tests
-----
none
'''
f1 = 0.75 + 0.011*N/t
f2 = t**2*FY*C5
f3 = 0.88 + 0.12*m
f4 = 15 + 3.25*(N/t)**0.5
return f1*f2*f3*f4
def E_3_3_4_e6(t, C3, C4, Ctheta, h, N, Ct):
'''Ecuacion 3.3.4-6.
Parameters
----------
Returns
-------
Tests
-----
none
'''
f1 = t**2*C3*C4*Ctheta
f2 = 244 - 0.57*h/t
f3 = 1 + 0.01*N/t
return f1*f2*f3*Ct
def E_3_3_4_e7(t, C3, C4, Ctheta, h, N, Ct):
'''Ecuacion 3.3.4-7.
Parameters
----------
Returns
-------
Tests
-----
none
'''
f1 = t**2*FY*C8
f2 = 0.64 + 0.31*m
f3 = 10 + 1.25*(N/t)**0.5
return f1*f2*f3
def E_3_3_4_e8(t, C1, C2, Ctheta, h, N, Ct):
'''Ecuacion 3.3.4-8.
Parameters
----------
Returns
-------
Tests
-----
none
'''
f1 = t**2*C1*C2*Ctheta
f2 = 771 - 2.26*h/t
f3 = 1 + 0.0013*N/t
return f1*f2*f3*Ct
def E_3_3_4_e9(t, C3, C4, Ctheta, h, N, Ct):
'''Ecuacion 3.3.4-9.
Parameters
----------
Returns
-------
Tests
-----
none
'''
f1 = t**2*FY*C7
f2 = 0.82 - 0.15*m
f3 = 15 + 3.25*(N/t)**0.5
return f1*f2*f3 """
def Ct(units = 'SI'):
'''Ct.
Parameters
----------
Returns
-------
Tests
-----
none
'''
if units == 'SI':
return 6.9
if units == 'US':
return 1.0
def E_3_3_4_e10(FY, Ct, k):
'''C1. Ecuacion 3.3.4-10.
Parameters
----------
Returns
-------
Tests
-----
none
'''
limit = FY/91.5/Ct
if limit <= 1:
return (1.22 - 0.22*k)*k
else:
return 1.69
def E_3_3_4_e11(R, t):
'''C2. Ecuacion 3.3.4-11.
Parameters
----------
Returns
-------
Tests
-----
none
'''
C2 = (1.06 - 0.06*R/t)
if C2 > 1: C2 = 1.0
return C2
def E_3_3_4_e12(FY, Ct, k):
'''C3. Ecuacion 3.3.4-12.
Parameters
----------
Returns
-------
Tests
-----
none
'''
limit = FY/66.5/Ct
if limit <= 1:
return (1.33 - 0.33*k)*k
else:
return 1.34
def E_3_3_4_e13(R, t):
'''C4. Ecuacion 3.3.4-13.
Parameters
----------
Returns
-------
Tests
-----
none
'''
C4 = (1.15 - 0.15*R/t)
if C4 < 0.5: C4 = 0.5
return C4
def E_3_3_4_e14(k):
'''C5. Ecuacion 3.3.4-14.
Parameters
----------
Returns
-------
Tests
-----
none
'''
C5 = (1.49 - 0.53*k)
if C5 < 0.6: C5 = 0.6
return C5
def E_3_3_4_e15(h, t):
'''C6. Ecuacion 3.3.4-15.
Parameters
----------
Returns
-------
Tests
-----
none
'''
if h/t <= 150:
C6 = 1.0 + h/t/750
else: C6 = 1.20
return C6
def E_3_3_4_e17(h, t, k):
'''C7. Ecuacion 3.3.4-17.
Parameters
----------
Returns
-------
Tests
-----
none
'''
if h/t <= 66.5:
C7 = 1/k
else: C7 = (1.10 - h/t/660)/k
return C7
def E_3_3_4_e19(h, t, k):
'''C8. Ecuacion 3.3.4-19.
Parameters
----------
Returns
-------
Tests
-----
none
'''
return (0.98 - h/t/665)/k
def E_3_3_4_e20(theta):
'''C_theta. Ecuacion 3.3.4-20.
Parameters
----------
Returns
-------
Tests
-----
none
'''
return 0.7 + 0.3*(theta/90)**2
def E_3_3_4_e21(FY, Ct):
'''k. Ecuacion 3.3.4-21.
Parameters
----------
Returns
-------
Tests
-----
none
'''
return FY/33/Ct
def E_3_3_4_e22(t, units='SI'):
'''m. Ecuacion 3.3.4-22.
Parameters
----------
Returns
-------
Tests
-----
none
'''
if units == 'SI':
return t/1.91
if units == 'US':
return t/0.075
## 3.3.5 Strength for Combined Bending and Web Crippling
def E_3_3_5_e1(Pu, fiPn, Mu, fiMn):
'''Ecuacion de interaccion flexion-corte. Ecuacion 3.3.3-1.
Parameters
----------
fiMn: float,
resistencia de diseno a la flexion, cuando actua solo la flexion.
fiPn: float,
resistencia de diseno a una carga concentrada o reaccion, en ausencia de flexion segun seccion 3.3.4.
Mu: float,
resistencia requerida a la flexion.
Pu: float,
resistencia requerida a una carga concentrada o reaccion, en ausencia de flexion.
Returns
-------
ratio: float,
ratio entre las resistencias requeridas y las correspondientes de diseno.
Tests
-----
Example 9.1 - C-profile with LB consideration
>>> round(E_3_3_5_e1(Pu=4.05, fiPn=11.05, Mu=44.16, fiMn=80.16), 2)
0.66
'''
comb = Mu/fiMn + 1.07*Pu/fiPn
limit = 1.42
ratio = comb/limit
return ratio
def E_3_3_5_e2(Pu, fiPn, Mu, fiMn):
'''Ecuacion de interaccion flexion-corte. Ecuacion 3.3.3-1.
Parameters
----------
fiMn: float,
resistencia de diseno a la flexion, cuando actua solo la flexion.
fiPn: float,
resistencia de diseno a una carga concentrada o reaccion, en ausencia de flexion segun seccion 3.3.4.
Mu: float,
resistencia requerida a la flexion.
Pu: float,
resistencia requerida a una carga concentrada o reaccion, en ausencia de flexion.
Returns
-------
ratio: float,
ratio entre las resistencias requeridas y las correspondientes de diseno.
Tests
-----
none
'''
comb = 0.82*Pu/fiPn + Mu/fiMn
limit = 1.32
ratio = comb/limit
return ratio
## 3.4 Compression Members
def E_3_4_e1(Fn, Ae):
''' Design axial strength Ec 3.4-1.
Parameters
----------
Fn : float,
El menor de los valores de tension para pandel flexiona, torsional o flexo-torsional.
Ae : float,
Area efectiva calculada a la tension Fn.
Returns
-------
fiPn : float,
Resistencia axial de diseño.
Tests
-----
>>> round( E_3_4_e1(1.5, 1.5), 4)
1.9125
'''
fi_c = 0.85 # factor de resistencia a compresion
fiPn = fi_c*Fn*Ae
return fiPn
def E_3_4_2_e1(E0, Kt, Lt, rx, ry, eta, c_x, sc_x, A, Cw, G0, J):
"""Fn = s_t = TB en Eq 3.4.2-1. Tension critica de Torsional.
Parameters
----------
E0 : float,
Modulo elasticidad.
Kt : float,
Factor de longitud efectiva de pandeo a torsion.
Lt : float,
longitud de pandeo a torsion.
rx, ry : float,
radio de giro del miembro | sqrt(I/A).
eta : float,
factor de reduccion plastica | Et(s)/E0.
c_x : float,
coordenada del centroide de la seccion.
sc_x: float,
coordenada del centro de corte.
A : float,
Area de la seccion.
Cw : float,
Constante torsional de warping de la seccion.
G0 : float,
Modulo de corte inicial.
J : float,
Constante de torsion de St. Venant.
Returns
-------
Fn : float,
Tension critica de pandeo torsional con eta=1.
Tests
-----
>>> round ( E_3_4_2_e1(E0 = 180510, Kt = 0.5, Lt = 1800, rx = 40.272, ry = 18.2673, eta = 0.6225, c_x = 15.59, sc_x = -23.1, A = 319, Cw = 215e6, G0 = 69426.9, J = 239), 2)
276.66
"""
x0 = -abs(c_x-sc_x) # distancia desde el centroide al centro de corte, negativo
r0 = E_3_3_1_2_e9(rx= rx, ry=ry, x0=x0) # radio de giro polar
s_t_eta = E_3_3_1_2_e8(E0=E0, Kt= Kt, Lt=Lt, r0=r0, A=A, Cw=Cw, G0=G0, J=J)
Fn = s_t_eta*eta
return Fn
def E_3_4_3_e1(E0, Kx, Lx, Kt, Lt, rx, ry, eta, c_x, sc_x, A, Cw, G0, J):
'''Fn de FTB en Eq 3.4.3-1. Tension critica de pandeo Flexo-torsional.
Parameters
----------
E0 : float,
Modulo elasticidad.
Kx : float,
Factor de longitud efectiva de pandeo a flexión en -x-.
Lx : float,
Longitud de pandeo a flexión en -x-.
Kt : float,
Factor de longitud efectiva de pandeo a torsion.
Lt : float,
longitud de pandeo a torsion.
rx, ry : float,
radio de giro del miembro | sqrt(I/A).
eta : float,
factor de reduccion plastica | Et(s)/E0.
c_x : float,
coordenada del centroide de la seccion.
sc_x: float,
coordenada del centro de corte.
A : float,
Area de la seccion.
Cw : float,
Constante torsional de warping de la seccion.
G0 : float,
Modulo de corte inicial.
J : float,
Constante de torsion de St. Venant.
Returns
-------
Fn : float,
Tension critica de pandeo flexo-torsional con eta=1.
Tests
-----
>>> round ( E_3_4_3_e1(E0 = 180510, Kx = 0.5, Lx = 1800, Kt = 0.5, Lt = 1800, rx = 40.272, ry = 18.2673, eta = 0.6225, c_x = 15.59, sc_x = -23.1, A = 319, Cw = 215e6, G0 = 69426.9, J = 239), 2)
261.53
'''
x0 = -abs(c_x-sc_x) # distancia desde el centroide al centro de corte, negativo
r0 = E_3_3_1_2_e9(rx= rx, ry=ry, x0=x0) # radio de giro polar
beta = 1 - (x0/r0)**2
t1 = eta/2/beta
s_ex = E_3_3_1_2_e6(E0=E0, K=Kx, L=Lx, r=rx)
s_t = E_3_3_1_2_e8(E0=E0, Kt=Kt, Lt=Lt, r0=r0, A=A, Cw=Cw, G0=G0, J=J)
raiz = ( (s_ex + s_t)**2 - 4*beta*s_ex*s_t )**0.5
Fn = t1*(s_ex + s_t - raiz)
return Fn
def E_3_4_3_e3(E0, K, L, r, eta):
'''Tension critica de pandeo flexional. s_ex en Eq 3.4.3-3.
Parameters
----------
E0 : float
Modulo elasticidad
K : float
Factor de longitud efectiva
L : float
longitud del miembro
r : float
radio de giro del miembro | sqrt(I/A)
eta : float
factor de reduccion plastica | Et(s)/E0
Returns
-------
s_ex : float
Tension critica de pandeo flexional
Tests
-----
>>> round (E_3_4_3_e3(E0 = 180510 ,K = 0.5,L = 1800, r = 40.272 , eta = 0.6225), 2)
2220.56
'''
s_ex_eta = E_3_3_1_2_e6(E0=E0, K=K, L=L, r=r)
s_ex = s_ex_eta*eta
return s_ex
## 3.5 Combined Axial Load and Bending
def E_3_5_e1(Pu, fiPn, Mu_x, Mu_y, fiMn_x, fiMn_y, alpha_nx, alpha_ny, Cm_x = 0.85, Cm_y = 0.85):
'''Ecuacion de interaccion para carga axial y flexion. Caso Pu/fiPn > 0.15.
Parameters
----------
Pu: float,
resistencia axial requerida a la compresion.
fiPn: float,
resistencia de diseno a la compresion.
Mu_x, Mu_y: float,
resistencias requeridas a la flexion.
fiMn_x, fiMn_y: float,
resistencias de diseno a la flexion.
alpha_nx, alpha_ny: float,
factores de amplificacion.
Cm_x, Cm_y: float,
coeficientes segun restriccion en la junta.
Returns
-------
ratio: float,
ratio entre las resistencias requeridas y las correspondientes de diseno.
Tests
-----
>>>
'''
return Pu/fiPn + Cm_x*Mu_x/(fiMn_x*alpha_nx) + Cm_y*Mu_y/(fiMn_y*alpha_ny)
def E_3_5_e2(Pu, fiPn_0, Mu_x, Mu_y, fiMn_x, fiMn_y):
'''Ecuacion de interaccion para carga axial y flexion. Caso Pu/fiPn > 0.15.
Parameters
----------
Pu: float,
resistencia axial requerida a la compresion.
fiPn_0: float,
resistencia de diseno a la compresion segun seccion 3.4 con Fn=FY.
Mu_x, Mu_y: float,
resistencias requeridas a la flexion.
fiMn_x, fiMn_y: float,
resistencias de diseno a la flexion.
Returns
-------
ratio: float,
ratio entre las resistencias requeridas y las correspondientes de diseno.
Tests
-----
>>>
'''
return Pu/fiPn_0 + Mu_x/fiMn_x + Mu_y/fiMn_y
def E_3_5_e3(Pu, fiPn, Mu_x, Mu_y, fiMn_x, fiMn_y):
'''Ecuacion de interaccion para carga axial y flexion. Caso Pu/fiPn <= 0.15.
Parameters
----------
Pu: float,
resistencia axial requerida a la compresion.
fiPn: float,
resistencia de diseno a la compresion.
Mu_x, Mu_y: float,
resistencias requeridas a la flexion.
fiMn_x, fiMn_y: float,
resistencias de diseno a la flexion.
Returns
-------
ratio: float,
ratio entre las resistencias requeridas y las correspondientes de diseno.
Tests
-----
>>>
'''
return Pu/fiPn + Mu_x/fiMn_x + Mu_y/fiMn_y
def E_3_5_e4(Pu, Pe):
'''Magnification factor.
Parameters
----------
Pu: float,
resistencia requerida axial a la compresion.
Pe: float,
resistencia de pandeo elastico.
Returns
-------
alpha_n: float,
factor de amplificacion.
Tests
-----
>>> round(E_3_5_e4(Pu=15, Pe=20), 1)
4.0
'''
return 1/(1 - Pu/Pe)
def E_3_5_e5(E0, Kb, Lb, Ib):
'''Elastic Buckling Strength.
Parameters
----------
fiMn_x, fiMn_y: float,
resistencias de diseno a la flexion, cuando actuan solas.
fiPn: float,
resistencia de diseno axial a la compresion.
Mu_x, Mu_y: float,
resistencias requeridas a la flexion.
Pu: float,
resistencia requerida axial a la compresion.
Returns
-------
Pe: float,
resistencia de pandeo elastico.
Tests
-----
>>> round(E_3_5_e5(E0=27000, Kb=1.0, Lb=4*12, Ib=5.357), 2)
619.59
'''
return pi**2*E0*Ib/(Kb*Lb)**2
#####################################################################################
#####################################################################################
|
{"hexsha": "d7877139bc7e32c973f7082777f8a002aa189b14", "size": 36107, "ext": "py", "lang": "Python", "max_stars_repo_path": "steeldesign/modules/sec_3.py", "max_stars_repo_name": "mainqueg/steeldesign", "max_stars_repo_head_hexsha": "e1250f93af0b156f5820e83e5925d2d991d43519", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "steeldesign/modules/sec_3.py", "max_issues_repo_name": "mainqueg/steeldesign", "max_issues_repo_head_hexsha": "e1250f93af0b156f5820e83e5925d2d991d43519", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "steeldesign/modules/sec_3.py", "max_forks_repo_name": "mainqueg/steeldesign", "max_forks_repo_head_hexsha": "e1250f93af0b156f5820e83e5925d2d991d43519", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.5297575312, "max_line_length": 246, "alphanum_fraction": 0.5232226438, "include": true, "reason": "import numpy", "num_tokens": 11979}
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
import random
import time
from functools import partial
import numpy as np
import paddle
from paddle.io import DataLoader
from paddlenlp.datasets import GlueQNLI, GlueSST2
from paddlenlp.data import Stack, Tuple, Pad
from paddlenlp.data.sampler import SamplerHelper
from paddlenlp.transformers import BertForSequenceClassification, BertTokenizer
FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)
TASK_CLASSES = {
"qnli": (GlueQNLI, paddle.metric.Accuracy), # (dataset, metric)
"sst-2": (GlueSST2, paddle.metric.Accuracy),
}
MODEL_CLASSES = {"bert": (BertForSequenceClassification, BertTokenizer), }
def parse_args():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train selected in the list: " +
", ".join(TASK_CLASSES.keys()), )
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " +
", ".join(MODEL_CLASSES.keys()), )
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: "
+ ", ".join(
sum([
list(classes[-1].pretrained_init_configuration.keys())
for classes in MODEL_CLASSES.values()
], [])), )
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.", )
parser.add_argument(
"--batch_size",
default=8,
type=int,
help="Batch size per GPU/CPU for training.", )
parser.add_argument(
"--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument(
"--weight_decay",
default=0.0,
type=float,
help="Weight decay if we apply some.")
parser.add_argument(
"--adam_epsilon",
default=1e-8,
type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument(
"--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs",
default=3,
type=int,
help="Total number of training epochs to perform.", )
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument(
"--warmup_steps",
default=0,
type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument(
"--logging_steps",
type=int,
default=500,
help="Log every X updates steps.")
parser.add_argument(
"--save_steps",
type=int,
default=500,
help="Save checkpoint every X updates steps.")
parser.add_argument(
"--seed", type=int, default=42, help="Random seed for initialization")
args = parser.parse_args()
return args
def create_data_holder():
input_ids = paddle.static.data(
name="input_ids", shape=[-1, -1], dtype="int64")
segment_ids = paddle.static.data(
name="segment_ids", shape=[-1, -1], dtype="int64")
label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64")
return [input_ids, segment_ids, label]
def reset_program_state_dict(model, state_dict, pretrained_state_dict):
reset_state_dict = {}
scale = model.initializer_range if hasattr(model, "initializer_range")\
else model.bert.config["initializer_range"]
for n, p in state_dict.items():
if n not in pretrained_state_dict:
dtype_str = "float32"
if str(p.dtype) == "VarType.FP64":
dtype_str = "float64"
reset_state_dict[p.name] = np.random.normal(
loc=0.0, scale=scale, size=p.shape).astype(dtype_str)
else:
reset_state_dict[p.name] = pretrained_state_dict[n]
return reset_state_dict
def set_seed(args):
# Use the same data seed(for data shuffle) for all procs to guarantee data
# consistency after sharding.
random.seed(args.seed)
np.random.seed(args.seed)
# Maybe different op seeds(for dropout) for different procs is better. By:
# `paddle.seed(args.seed + paddle.distributed.get_rank())`
paddle.seed(args.seed)
def evaluate(exe, metric, loss, correct, dev_program, data_loader):
metric.reset()
for batch in data_loader:
loss_return, correct_return = exe.run(dev_program, feed=batch, \
fetch_list=[loss, correct])
metric.update(correct_return)
accuracy = metric.accumulate()
print("eval loss: %f, accuracy: %f" % (loss_return, accuracy))
def convert_example(example,
tokenizer,
label_list,
max_seq_length=512,
is_test=False):
"""convert a glue example into necessary features"""
def _truncate_seqs(seqs, max_seq_length):
if len(seqs) == 1: # single sentence
# Account for [CLS] and [SEP] with "- 2"
seqs[0] = seqs[0][0:(max_seq_length - 2)]
else: # sentence pair
# Account for [CLS], [SEP], [SEP] with "- 3"
tokens_a, tokens_b = seqs
max_seq_length -= 3
while True: # truncate with longest_first strategy
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_seq_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
return seqs
def _concat_seqs(seqs, separators, seq_mask=0, separator_mask=1):
concat = sum((seq + sep for sep, seq in zip(separators, seqs)), [])
segment_ids = sum(
([i] * (len(seq) + len(sep))
for i, (sep, seq) in enumerate(zip(separators, seqs))), [])
if isinstance(seq_mask, int):
seq_mask = [[seq_mask] * len(seq) for seq in seqs]
if isinstance(separator_mask, int):
separator_mask = [[separator_mask] * len(sep) for sep in separators]
p_mask = sum((s_mask + mask
for sep, seq, s_mask, mask in zip(
separators, seqs, seq_mask, separator_mask)), [])
return concat, segment_ids, p_mask
if not is_test:
# `label_list == None` is for regression task
label_dtype = "int64" if label_list else "float32"
# get the label
label = example[-1]
example = example[:-1]
#create label maps if classification task
if label_list:
label_map = {}
for (i, l) in enumerate(label_list):
label_map[l] = i
label = label_map[label]
label = [label]
#label = np.array([label], dtype=label_dtype)
# tokenize raw text
tokens_raw = [tokenizer(l) for l in example]
# truncate to the truncate_length,
tokens_trun = _truncate_seqs(tokens_raw, max_seq_length)
# concate the sequences with special tokens
tokens_trun[0] = [tokenizer.cls_token] + tokens_trun[0]
tokens, segment_ids, _ = _concat_seqs(tokens_trun, [[tokenizer.sep_token]] *
len(tokens_trun))
# convert the token to ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
# input_mask = [1] * len(input_ids)
if not is_test:
return input_ids, segment_ids, label
else:
return input_ids, segment_ids
def do_train(args):
# Set the paddle execute enviroment
paddle.enable_static()
place = paddle.CUDAPlace(0)
set_seed(args)
# Create the main_program for the training and dev_program for the validation
main_program = paddle.static.default_main_program()
startup_program = paddle.static.default_startup_program()
dev_program = paddle.static.Program()
# Get the configuration of tokenizer and model
args.task_name = args.task_name.lower()
args.model_type = args.model_type.lower()
model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
dataset_class, metric_class = TASK_CLASSES[args.task_name]
# Create the tokenizer and dataset
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)
train_dataset, dev_dataset = dataset_class.get_datasets(["train", "dev"])
trans_func = partial(
convert_example,
tokenizer=tokenizer,
label_list=train_dataset.get_labels(),
max_seq_length=args.max_seq_length)
train_dataset = train_dataset.apply(trans_func, lazy=True)
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # input
Pad(axis=0, pad_val=tokenizer.pad_token_id), # segment
Stack(dtype="int64" if train_dataset.get_labels() else "float32") # label
): [data for i, data in enumerate(fn(samples))]
train_batch_sampler = paddle.io.BatchSampler(
train_dataset, batch_size=args.batch_size, shuffle=True)
dev_dataset = dev_dataset.apply(trans_func, lazy=True)
dev_batch_sampler = paddle.io.BatchSampler(
dev_dataset, batch_size=args.batch_size, shuffle=False)
feed_list_name = []
# Define the input data and create the train/dev data_loader
with paddle.static.program_guard(main_program, startup_program):
[input_ids, segment_ids, labels] = create_data_holder()
train_data_loader = DataLoader(
dataset=train_dataset,
feed_list=[input_ids, segment_ids, labels],
batch_sampler=train_batch_sampler,
collate_fn=batchify_fn,
num_workers=0,
return_list=False)
dev_data_loader = DataLoader(
dataset=dev_dataset,
feed_list=[input_ids, segment_ids, labels],
batch_sampler=dev_batch_sampler,
collate_fn=batchify_fn,
num_workers=0,
return_list=False)
# Create the training-forward program, and clone it for the validation
with paddle.static.program_guard(main_program, startup_program):
model, pretrained_state_dict = model_class.from_pretrained(
args.model_name_or_path,
num_classes=len(train_dataset.get_labels()))
loss_fct = paddle.nn.loss.CrossEntropyLoss(
) if train_dataset.get_labels() else paddle.nn.loss.MSELoss()
logits = model(input_ids, segment_ids)
loss = loss_fct(logits, labels)
dev_program = main_program.clone(for_test=True)
# Create the training-backward program, this pass will not be
# executed in the validation
with paddle.static.program_guard(main_program, startup_program):
lr_scheduler = paddle.optimizer.lr.LambdaDecay(
args.learning_rate,
lambda current_step, num_warmup_steps=args.warmup_steps,
num_training_steps=args.max_steps if args.max_steps > 0 else
(len(train_data_loader) * args.num_train_epochs): float(
current_step) / float(max(1, num_warmup_steps))
if current_step < num_warmup_steps else max(
0.0,
float(num_training_steps - current_step) / float(
max(1, num_training_steps - num_warmup_steps))))
optimizer = paddle.optimizer.AdamW(
learning_rate=lr_scheduler,
epsilon=args.adam_epsilon,
parameters=model.parameters(),
weight_decay=args.weight_decay,
apply_decay_param_fun=lambda x: x in [
p.name for n, p in model.named_parameters()
if not any(nd in n for nd in ["bias", "norm"])
])
optimizer.minimize(loss)
# Create the metric pass for the validation
with paddle.static.program_guard(dev_program, startup_program):
metric = metric_class()
correct = metric.compute(logits, labels)
# Initialize the fine-tuning parameter, we will load the parameters in
# pre-training model. And initialize the parameter which not in pre-training model
# by the normal distribution.
exe = paddle.static.Executor(place)
exe.run(startup_program)
state_dict = model.state_dict()
reset_state_dict = reset_program_state_dict(model, state_dict,
pretrained_state_dict)
paddle.static.set_program_state(main_program, reset_state_dict)
global_step = 0
tic_train = time.time()
for epoch in range(args.num_train_epochs):
for step, batch in enumerate(train_data_loader):
global_step += 1
loss_return = exe.run(main_program, feed=batch, fetch_list=[loss])
if global_step % args.logging_steps == 0:
logger.info(
"global step %d, epoch: %d, batch: %d, loss: %f, speed: %.2f step/s"
% (global_step, epoch, step, loss_return[0],
args.logging_steps / (time.time() - tic_train)))
tic_train = time.time()
lr_scheduler.step()
if global_step % args.save_steps == 0:
# Validation pass, record the loss and metric
evaluate(exe, metric, loss, correct, dev_program,
dev_data_loader)
output_dir = os.path.join(args.output_dir,
"model_%d" % global_step)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
paddle.fluid.io.save_params(exe, output_dir)
tokenizer.save_pretrained(output_dir)
if __name__ == "__main__":
args = parse_args()
do_train(args)
|
{"hexsha": "49e87f6a5e7937acebb0e28056881695182c1e3a", "size": 15100, "ext": "py", "lang": "Python", "max_stars_repo_path": "PaddleNLP/legacy/benchmark/bert/run_glue.py", "max_stars_repo_name": "weiwei1115/models", "max_stars_repo_head_hexsha": "e2c96c5f64b1dc8f0d5d9aa121300b87150e11e3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "PaddleNLP/legacy/benchmark/bert/run_glue.py", "max_issues_repo_name": "weiwei1115/models", "max_issues_repo_head_hexsha": "e2c96c5f64b1dc8f0d5d9aa121300b87150e11e3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PaddleNLP/legacy/benchmark/bert/run_glue.py", "max_forks_repo_name": "weiwei1115/models", "max_forks_repo_head_hexsha": "e2c96c5f64b1dc8f0d5d9aa121300b87150e11e3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-06-09T01:50:13.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-09T01:50:13.000Z", "avg_line_length": 37.8446115288, "max_line_length": 97, "alphanum_fraction": 0.6333112583, "include": true, "reason": "import numpy", "num_tokens": 3314}
|
import os, sys
from sys import platform as _platform
import glob
import yaml
import time
import requests
import numpy as np
import cv2
from afy.videocaptureasync import VideoCaptureAsync
from afy.arguments import opt
from afy.utils import info, Once, Tee, crop, pad_img, resize, TicToc
import afy.camera_selector as cam_selector
log = Tee('./var/log/cam_fomm.log')
if _platform == 'darwin':
if not opt.is_client:
info('\nOnly remote GPU mode is supported for Mac (use --is-client and --connect options to connect to the server)')
info('Standalone version will be available lately!\n')
exit()
def is_new_frame_better(source, driving, precitor):
global avatar_kp
global display_string
if avatar_kp is None:
display_string = "No face detected in avatar."
return False
if predictor.get_start_frame() is None:
display_string = "No frame to compare to."
return True
driving_smaller = resize(driving, (128, 128))[..., :3]
new_kp = predictor.get_frame_kp(driving)
if new_kp is not None:
new_norm = (np.abs(avatar_kp - new_kp) ** 2).sum()
old_norm = (np.abs(avatar_kp - predictor.get_start_frame_kp()) ** 2).sum()
out_string = "{0} : {1}".format(int(new_norm * 100), int(old_norm * 100))
display_string = out_string
log(out_string)
return new_norm < old_norm
else:
display_string = "No face found!"
return False
def load_stylegan_avatar():
url = "https://thispersondoesnotexist.com/image"
r = requests.get(url, headers={'User-Agent': "My User Agent 1.0"}).content
image = np.frombuffer(r, np.uint8)
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = resize(image, (IMG_SIZE, IMG_SIZE))
return image
def load_images(IMG_SIZE = 256):
avatars = []
filenames = []
images_list = sorted(glob.glob(f'{opt.avatars}/*'))
for i, f in enumerate(images_list):
if f.endswith('.jpg') or f.endswith('.jpeg') or f.endswith('.png'):
img = cv2.imread(f)
if img.ndim == 2:
img = np.tile(img[..., None], [1, 1, 3])
img = img[..., :3][..., ::-1]
img = resize(img, (IMG_SIZE, IMG_SIZE))
avatars.append(img)
filenames.append(f)
return avatars, filenames
def change_avatar(predictor, new_avatar):
global avatar, avatar_kp, kp_source
avatar_kp = predictor.get_frame_kp(new_avatar)
kp_source = None
avatar = new_avatar
predictor.set_source_image(avatar)
def draw_rect(img, rw=0.6, rh=0.8, color=(255, 0, 0), thickness=2):
h, w = img.shape[:2]
l = w * (1 - rw) // 2
r = w - l
u = h * (1 - rh) // 2
d = h - u
img = cv2.rectangle(img, (int(l), int(u)), (int(r), int(d)), color, thickness)
def print_help():
info('\n\n=== Control keys ===')
info('1-9: Change avatar')
for i, fname in enumerate(avatar_names):
key = i + 1
name = fname.split('/')[-1]
info(f'{key}: {name}')
info('W: Zoom camera in')
info('S: Zoom camera out')
info('A: Previous avatar in folder')
info('D: Next avatar in folder')
info('Q: Get random avatar')
info('X: Calibrate face pose')
info('I: Show FPS')
info('ESC: Quit')
info('\nFull key list: https://github.com/alievk/avatarify#controls')
info('\n\n')
def draw_fps(frame, fps, timing, x0=10, y0=20, ystep=30, fontsz=0.5, color=(255, 255, 255)):
frame = frame.copy()
cv2.putText(frame, f"FPS: {fps:.1f}", (x0, y0 + ystep * 0), 0, fontsz * IMG_SIZE / 256, color, 1)
cv2.putText(frame, f"Model time (ms): {timing['predict']:.1f}", (x0, y0 + ystep * 1), 0, fontsz * IMG_SIZE / 256, color, 1)
cv2.putText(frame, f"Preproc time (ms): {timing['preproc']:.1f}", (x0, y0 + ystep * 2), 0, fontsz * IMG_SIZE / 256, color, 1)
cv2.putText(frame, f"Postproc time (ms): {timing['postproc']:.1f}", (x0, y0 + ystep * 3), 0, fontsz * IMG_SIZE / 256, color, 1)
return frame
def draw_calib_text(frame, thk=2, fontsz=0.5, color=(0, 0, 255)):
frame = frame.copy()
cv2.putText(frame, "FIT FACE IN RECTANGLE", (40, 20), 0, fontsz * IMG_SIZE / 255, color, thk)
cv2.putText(frame, "W - ZOOM IN", (60, 40), 0, fontsz * IMG_SIZE / 255, color, thk)
cv2.putText(frame, "S - ZOOM OUT", (60, 60), 0, fontsz * IMG_SIZE / 255, color, thk)
cv2.putText(frame, "THEN PRESS X", (60, 245), 0, fontsz * IMG_SIZE / 255, color, thk)
return frame
def select_camera(config):
cam_config = config['cam_config']
cam_id = None
if os.path.isfile(cam_config):
with open(cam_config, 'r') as f:
cam_config = yaml.load(f, Loader=yaml.FullLoader)
cam_id = cam_config['cam_id']
else:
cam_frames = cam_selector.query_cameras(config['query_n_cams'])
if cam_frames:
cam_id = cam_selector.select_camera(cam_frames, window="CLICK ON YOUR CAMERA")
log(f"Selected camera {cam_id}")
with open(cam_config, 'w') as f:
yaml.dump({'cam_id': cam_id}, f)
else:
log("No cameras are available")
return cam_id
if __name__ == "__main__":
with open('config.yaml', 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
global display_string
display_string = ""
IMG_SIZE = 256
log('Loading Predictor')
predictor_args = {
'config_path': opt.config,
'checkpoint_path': opt.checkpoint,
'relative': opt.relative,
'adapt_movement_scale': opt.adapt_scale,
'enc_downscale': opt.enc_downscale
}
if opt.is_worker:
from afy import predictor_worker
predictor_worker.run_worker(opt.in_port, opt.out_port)
sys.exit(0)
elif opt.is_client:
from afy import predictor_remote
try:
predictor = predictor_remote.PredictorRemote(
in_addr=opt.in_addr, out_addr=opt.out_addr,
**predictor_args
)
except ConnectionError as err:
log(err)
sys.exit(1)
predictor.start()
else:
from afy import predictor_local
predictor = predictor_local.PredictorLocal(
**predictor_args
)
cam_id = select_camera(config)
if cam_id is None:
exit(1)
cap = VideoCaptureAsync(cam_id)
cap.start()
avatars, avatar_names = load_images()
enable_vcam = not opt.no_stream
ret, frame = cap.read()
stream_img_size = frame.shape[1], frame.shape[0]
if enable_vcam:
if _platform in ['linux', 'linux2']:
try:
import pyfakewebcam
except ImportError:
log("pyfakewebcam is not installed.")
exit(1)
stream = pyfakewebcam.FakeWebcam(f'/dev/video{opt.virt_cam}', *stream_img_size)
else:
enable_vcam = False
# log("Virtual camera is supported only on Linux.")
# if not enable_vcam:
# log("Virtual camera streaming will be disabled.")
cur_ava = 0
avatar = None
change_avatar(predictor, avatars[cur_ava])
passthrough = False
cv2.namedWindow('cam', cv2.WINDOW_GUI_NORMAL)
cv2.moveWindow('cam', 500, 250)
frame_proportion = 0.9
frame_offset_x = 0
frame_offset_y = 0
overlay_alpha = 0.0
preview_flip = False
output_flip = False
find_keyframe = False
is_calibrated = False
fps_hist = []
fps = 0
show_fps = False
print_help()
try:
while True:
tt = TicToc()
timing = {
'preproc': 0,
'predict': 0,
'postproc': 0
}
green_overlay = False
tt.tic()
ret, frame = cap.read()
if not ret:
log("Can't receive frame (stream end?). Exiting ...")
break
frame = frame[..., ::-1]
frame_orig = frame.copy()
frame, lrudwh = crop(frame, p=frame_proportion, offset_x=frame_offset_x, offset_y=frame_offset_y)
frame_lrudwh = lrudwh
frame = resize(frame, (IMG_SIZE, IMG_SIZE))[..., :3]
if find_keyframe:
if is_new_frame_better(avatar, frame, predictor):
log("Taking new frame!")
green_overlay = True
predictor.reset_frames()
timing['preproc'] = tt.toc()
if passthrough:
out = frame
elif is_calibrated:
tt.tic()
out = predictor.predict(frame)
if out is None:
log('predict returned None')
timing['predict'] = tt.toc()
else:
out = None
tt.tic()
key = cv2.waitKey(1)
if key == 27: # ESC
break
elif key == ord('d'):
cur_ava += 1
if cur_ava >= len(avatars):
cur_ava = 0
passthrough = False
change_avatar(predictor, avatars[cur_ava])
elif key == ord('a'):
cur_ava -= 1
if cur_ava < 0:
cur_ava = len(avatars) - 1
passthrough = False
change_avatar(predictor, avatars[cur_ava])
elif key == ord('w'):
frame_proportion -= 0.05
frame_proportion = max(frame_proportion, 0.1)
elif key == ord('s'):
frame_proportion += 0.05
frame_proportion = min(frame_proportion, 1.0)
elif key == ord('H'):
if frame_lrudwh[0] - 1 > 0:
frame_offset_x -= 1
elif key == ord('h'):
if frame_lrudwh[0] - 5 > 0:
frame_offset_x -= 5
elif key == ord('K'):
if frame_lrudwh[1] + 1 < frame_lrudwh[4]:
frame_offset_x += 1
elif key == ord('k'):
if frame_lrudwh[1] + 5 < frame_lrudwh[4]:
frame_offset_x += 5
elif key == ord('J'):
if frame_lrudwh[2] - 1 > 0:
frame_offset_y -= 1
elif key == ord('j'):
if frame_lrudwh[2] - 5 > 0:
frame_offset_y -= 5
elif key == ord('U'):
if frame_lrudwh[3] + 1 < frame_lrudwh[5]:
frame_offset_y += 1
elif key == ord('u'):
if frame_lrudwh[3] + 5 < frame_lrudwh[5]:
frame_offset_y += 5
elif key == ord('Z'):
frame_offset_x = 0
frame_offset_y = 0
frame_proportion = 0.9
elif key == ord('x'):
predictor.reset_frames()
if not is_calibrated:
cv2.namedWindow('avatarify', cv2.WINDOW_GUI_NORMAL)
cv2.moveWindow('avatarify', 600, 250)
is_calibrated = True
elif key == ord('z'):
overlay_alpha = max(overlay_alpha - 0.1, 0.0)
elif key == ord('c'):
overlay_alpha = min(overlay_alpha + 0.1, 1.0)
elif key == ord('r'):
preview_flip = not preview_flip
elif key == ord('t'):
output_flip = not output_flip
elif key == ord('f'):
find_keyframe = not find_keyframe
elif key == ord('q'):
try:
log('Loading StyleGAN avatar...')
avatar = load_stylegan_avatar()
passthrough = False
change_avatar(predictor, avatar)
except:
log('Failed to load StyleGAN avatar')
elif key == ord('l'):
try:
log('Reloading avatars...')
avatars = load_images()
passthrough = False
log("Images reloaded")
except:
log('Image reload failed')
elif key == ord('i'):
show_fps = not show_fps
elif 48 < key < 58:
cur_ava = min(key - 49, len(avatars) - 1)
passthrough = False
change_avatar(predictor, avatars[cur_ava])
elif key == 48:
passthrough = not passthrough
elif key != -1:
log(key)
if overlay_alpha > 0:
preview_frame = cv2.addWeighted( avatars[cur_ava], overlay_alpha, frame, 1.0 - overlay_alpha, 0.0)
else:
preview_frame = frame.copy()
if preview_flip:
preview_frame = cv2.flip(preview_frame, 1)
if green_overlay:
green_alpha = 0.8
overlay = preview_frame.copy()
overlay[:] = (0, 255, 0)
preview_frame = cv2.addWeighted( preview_frame, green_alpha, overlay, 1.0 - green_alpha, 0.0)
timing['postproc'] = tt.toc()
if find_keyframe:
preview_frame = cv2.putText(preview_frame, display_string, (10, 220), 0, 0.5 * IMG_SIZE / 256, (255, 255, 255), 1)
if show_fps:
preview_frame = draw_fps(preview_frame, fps, timing)
if not is_calibrated:
preview_frame = draw_calib_text(preview_frame)
if not opt.hide_rect:
draw_rect(preview_frame)
cv2.imshow('cam', preview_frame[..., ::-1])
if out is not None:
if not opt.no_pad:
out = pad_img(out, stream_img_size)
if output_flip:
out = cv2.flip(out, 1)
if enable_vcam:
out = resize(out, stream_img_size)
stream.schedule_frame(out)
cv2.imshow('avatarify', out[..., ::-1])
fps_hist.append(tt.toc(total=True))
if len(fps_hist) == 10:
fps = 10 / (sum(fps_hist) / 1000)
fps_hist = []
except KeyboardInterrupt:
log("main: user interrupt")
log("stopping camera")
cap.stop()
cv2.destroyAllWindows()
if opt.is_client:
log("stopping remote predictor")
predictor.stop()
log("main: exit")
|
{"hexsha": "6342e2d932056616ab6d0b53b4a04af903bf091c", "size": 14637, "ext": "py", "lang": "Python", "max_stars_repo_path": "cam_fomm (1).py", "max_stars_repo_name": "Tushar010119/THE-AI-FACE", "max_stars_repo_head_hexsha": "83457f8cd837b70d2e1cf67527e69f85ceb63f07", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-06-07T16:53:23.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-07T16:54:44.000Z", "max_issues_repo_path": "cam_fomm (1).py", "max_issues_repo_name": "Tushar010119/THE-AI-FACE-SWAP-DF", "max_issues_repo_head_hexsha": "83457f8cd837b70d2e1cf67527e69f85ceb63f07", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cam_fomm (1).py", "max_forks_repo_name": "Tushar010119/THE-AI-FACE-SWAP-DF", "max_forks_repo_head_hexsha": "83457f8cd837b70d2e1cf67527e69f85ceb63f07", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.1692307692, "max_line_length": 131, "alphanum_fraction": 0.5296167247, "include": true, "reason": "import numpy", "num_tokens": 3691}
|
"""
Just all the imports for all other scripts and notebooks.
"""
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from collections import OrderedDict
import pandas as pd
import pickle
import pdb
import netCDF4 as nc
import xarray as xr
import h5py
from glob import glob
import sys, os
from os import path
from configargparse import ArgParser
import fire
import logging
with open(os.path.join(os.path.dirname(__file__), 'hyai_hybi.pkl'), 'rb') as f:
hyai, hybi = pickle.load(f)
|
{"hexsha": "9e10924a19825d76c200da8e15b95d6c253b8a47", "size": 510, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/imports.py", "max_stars_repo_name": "Somendratripathi/CBRAIN-CAM", "max_stars_repo_head_hexsha": "3a47941a282f4f250b130f4cc485db0807570bfb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/imports.py", "max_issues_repo_name": "Somendratripathi/CBRAIN-CAM", "max_issues_repo_head_hexsha": "3a47941a282f4f250b130f4cc485db0807570bfb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/imports.py", "max_forks_repo_name": "Somendratripathi/CBRAIN-CAM", "max_forks_repo_head_hexsha": "3a47941a282f4f250b130f4cc485db0807570bfb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-11-24T20:06:18.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-24T20:06:18.000Z", "avg_line_length": 19.6153846154, "max_line_length": 79, "alphanum_fraction": 0.7745098039, "include": true, "reason": "import numpy", "num_tokens": 128}
|
function seq = ScanMatch_NumToDoubleStr(num, modulus)
%SCANMATCH_NUMTODOUBLESTR transform an array of numbers into a sequence of
% double characters (each number is represented by a residue of two letter) in
% function of the modulus. The modulus is the length of the
% alphabet used for the second letter of a residue.
% i.e. seq = ScanMatch_NumToDoubleStr([1 2 27], 26) % modulus = 26 (all the alphabet is used)
% returns aAaBBa
% i.e. seq = ScanMatch_NumToDoubleStr([1 2 27], 10) % modulus = 26 (all the alphabet is used)
% returns aAaBcG
%
% The modulus can be any number between 1 and 26.
% The Input numbers have to be between 1 and 676 (which is the maximum you
% can code with two characters: 676 is xX with a modulus of 26 )
%
% Part of the ScanMatch toolbox
% Written by Filipe Cristino
% $Version: 1.00 $ $Date: 10/09/2009
% ---- Check Modulus ----
if modulus > 26 || modulus < 1
error('ScanMatch:WrongInput', 'The modulus has to be between 1 and 26!')
end
% ---- Check input numbers ----
max_num = 26 * modulus; % Max number you can convert in function of the modulus
if sum(num <1 | num > max_num)
error('ScanMatch:WrongInput', 'The input number can not be less than 1 or more than (26 * modulus)')
end
% ---- substract one to the array as the conversion works from zero
num = num - 1;
% ---- Start conversion ----
num_l = length(num);
ind= 1;
for i=1:num_l
seq(ind) = lower(char((fix(num(i)/modulus))+65));
seq(ind+1) = char(rem(num(i),modulus)+65);
ind = ind+2;
end
|
{"author": "Allopart", "repo": "rbpf-gmapping", "sha": "affe0adc25fa446fc7af4902d699d92864bdba1b", "save_path": "github-repos/MATLAB/Allopart-rbpf-gmapping", "path": "github-repos/MATLAB/Allopart-rbpf-gmapping/rbpf-gmapping-affe0adc25fa446fc7af4902d699d92864bdba1b/rvctools/ScanMatcher/ScanMatch_NumToDoubleStr.m"}
|
# The whole frame from GNN Explainer to get data and model
""" explainer_main.py
Main user interface for the explainer module.
"""
from graphshap_explainer import GraphSHAP
from explainer import explain
import utils.train_utils as train_utils
import utils.parser_utils as parser_utils
import utils.math_utils as math_utils
import utils.io_utils as io_utils
import utils.graph_utils as graph_utils
import utils.featgen as featgen
import models
import gengraph
import configs
from tensorboardX import SummaryWriter
import torch
import sklearn.metrics as metrics
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import argparse
import os
import pickle
import shutil
import warnings
from types import SimpleNamespace
import time
warnings.filterwarnings("ignore")
def arg_parse():
parser = argparse.ArgumentParser(description="GNN Explainer arguments.")
io_parser = parser.add_mutually_exclusive_group(required=False)
io_parser.add_argument("--dataset", dest="dataset", help="Input dataset.")
benchmark_parser = io_parser.add_argument_group()
benchmark_parser.add_argument(
"--bmname", dest="bmname", help="Name of the benchmark dataset"
)
io_parser.add_argument("--pkl", dest="pkl_fname",
help="Name of the pkl data file")
parser_utils.parse_optimizer(parser)
parser.add_argument("--clean-log", action="store_true",
help="If true, cleans the specified log directory before running.")
parser.add_argument("--logdir", dest="logdir",
help="Tensorboard log directory")
parser.add_argument("--ckptdir", dest="ckptdir",
help="Model checkpoint directory")
parser.add_argument("--cuda", dest="cuda", help="CUDA.")
parser.add_argument(
"--gpu",
dest="gpu",
action="store_const",
const=True,
default=False,
help="whether to use GPU.",
)
parser.add_argument(
"--epochs", dest="num_epochs", type=int, help="Number of epochs to train."
)
parser.add_argument(
"--hidden-dim", dest="hidden_dim", type=int, help="Hidden dimension"
)
parser.add_argument(
"--output-dim", dest="output_dim", type=int, help="Output dimension"
)
parser.add_argument(
"--num-gc-layers",
dest="num_gc_layers",
type=int,
help="Number of graph convolution layers before each pooling",
)
parser.add_argument(
"--bn",
dest="bn",
action="store_const",
const=True,
default=False,
help="Whether batch normalization is used",
)
parser.add_argument("--dropout", dest="dropout",
type=float, help="Dropout rate.")
parser.add_argument(
"--nobias",
dest="bias",
action="store_const",
const=False,
default=True,
help="Whether to add bias. Default to True.",
)
parser.add_argument(
"--no-writer",
dest="writer",
action="store_const",
const=False,
default=True,
help="Whether to add bias. Default to True.",
)
# Explainer
parser.add_argument("--mask-act", dest="mask_act",
type=str, help="sigmoid, ReLU.")
parser.add_argument(
"--mask-bias",
dest="mask_bias",
action="store_const",
const=True,
default=False,
help="Whether to add bias. Default to True.",
)
parser.add_argument(
"--explain-node", dest="explain_node", type=int, help="Node to explain."
)
parser.add_argument(
"--graph-idx", dest="graph_idx", type=int, help="Graph to explain."
)
parser.add_argument(
"--graph-mode",
dest="graph_mode",
action="store_const",
const=True,
default=False,
help="whether to run Explainer on Graph Classification task.",
)
parser.add_argument(
"--multigraph-class",
dest="multigraph_class",
type=int,
help="whether to run Explainer on multiple Graphs from the Classification task for examples in the same class.",
)
parser.add_argument(
"--multinode-class",
dest="multinode_class",
type=int,
help="whether to run Explainer on multiple nodes from the Classification task for examples in the same class.",
)
parser.add_argument(
"--align-steps",
dest="align_steps",
type=int,
help="Number of iterations to find P, the alignment matrix.",
)
parser.add_argument(
"--method", dest="method", type=str, help="Method. Possible values: base, att."
)
parser.add_argument(
"--name-suffix", dest="name_suffix", help="suffix added to the output filename"
)
parser.add_argument(
"--explainer-suffix",
dest="explainer_suffix",
help="suffix added to the explainer log",
)
parser.add_argument(
"--hops",
dest="hops",
type=int,
help="k-hop subgraph considered for GraphSHAP",
)
parser.add_argument(
"--num_samples",
dest="num_samples",
type=int,
help="number of samples used to train GraphSHAP",
)
parser.add_argument("--multiclass", type=bool,
help='False if we consider explanations for the predicted class only')
parser.add_argument("--hv", type=str,
help="way simplified input is translated to the original input space")
parser.add_argument("--feat", type=str,
help="node features considered for hv above")
parser.add_argument("--coal", type=str,
help="type of coalition sampler")
parser.add_argument("--g", type=str,
help="method used to train g on derived dataset")
parser.add_argument("--regu", type=int,
help='None if we do not apply regularisation, 1 if only feat')
parser.add_argument("--info", type=bool,
help='True if we want to see info about the explainer')
parser.add_argument("--fullempty", type=str, default=None,
help='True if want to discard full and empty coalitions')
parser.add_argument("--S", type=int, default=3,
help='Max size of coalitions sampled in priority and treated specifically')
# TODO: Check argument usage
parser.set_defaults(
logdir="log",
ckptdir="ckpt",
dataset="syn5",
#bmname='Mutagenicity',
graph_mode=False,
opt="adam",
opt_scheduler="none",
gpu=False,
cuda="0",
lr=0.1,
clip=2.0,
batch_size=20,
num_epochs=100,
hidden_dim=20,
output_dim=20,
num_gc_layers=3,
dropout=0.0,
method="base",
name_suffix="",
explainer_suffix="",
align_steps=1000,
explain_node=None,
graph_idx=-1,
mask_act="sigmoid",
multigraph_class=-1,
multinode_class=-1,
hops=2,
num_samples=50,
multiclass=False,
fullempty=None,
S=4,
hv='compute_pred',
feat='Expectation',
coal='Smarter',
g='WLR_sklearn',
regu=None,
info=True,
)
return parser.parse_args()
def preprocess_graph(G, labels, normalize_adj=False):
""" Load an existing graph to be converted for the experiments.
Args:
G: Networkx graph to be loaded.
labels: Associated node labels.
normalize_adj: Should the method return a normalized adjacency matrix.
Returns:
A dictionary containing adjacency, node features and labels
"""
# Define adj matrix
adj = np.array(nx.to_numpy_matrix(G))
if normalize_adj:
sqrt_deg = np.diag(
1.0 / np.sqrt(np.sum(adj, axis=0, dtype=float).squeeze()))
adj = np.matmul(np.matmul(sqrt_deg, adj), sqrt_deg)
ajd = torch.tensor(adj, dtype=torch.int64)[0]
edge_index = torch.tensor([[], []], dtype=torch.int64)
for i, row in enumerate(adj):
for j, entry in enumerate(row):
if entry != 0:
edge_index = torch.cat((edge_index, torch.tensor([[torch.tensor(i, dtype=torch.int64)], [
torch.tensor(j, dtype=torch.int64)]], dtype=torch.int64)), dim=1)
# Define features
existing_node = list(G.nodes)[-1]
feat_dim = G.nodes[existing_node]["feat"].shape[0]
f = torch.zeros(G.number_of_nodes(), feat_dim)
for i, u in enumerate(G.nodes()):
f[i, :] = torch.tensor(G.nodes[u]["feat"])
# Define labels
labels = torch.tensor(labels)
return f, edge_index, labels
def transform_data(adj, x, labels):
data = SimpleNamespace()
adj_transfo = torch.tensor(adj, dtype=torch.int64)[0]
data.edge_index = torch.tensor([[], []], dtype=torch.int64)
for i, row in enumerate(adj_transfo):
for j, entry in enumerate(row):
if entry != 0:
data.edge_index = torch.cat((data.edge_index, torch.tensor([[torch.tensor(i, dtype=torch.int64)], [
torch.tensor(j, dtype=torch.int64)]], dtype=torch.int64)), dim=1)
# Define features
feat_dim = x.size(2)
data.x = torch.zeros(x.size(1), feat_dim)
for i, u in enumerate(range(x.size(1))):
data.x[i, :] = torch.tensor(x[0, i, :])
# Define labels
data.y = torch.tensor(labels)
try:
data.num_classes = max(labels) + 1
except TypeError:
data.num_classes = 1
data.num_features = x.size(2)
data.num_nodes = x.size(1)
return data
def extract_test_nodes(data, num_samples, train_indexes):
"""
:param data: dataset
:param num_samples: number of test samples desired
:param train_indexes: indexes of training samples
:return: list of indexes representing nodes used as test samples
"""
test_indices = list(set(range(300, 700, 5)) - set(train_indexes))
node_indices = np.random.choice(test_indices, num_samples).tolist()
return node_indices
def main():
# Load a configuration
prog_args = arg_parse()
if prog_args.gpu:
os.environ["CUDA_VISIBLE_DEVICES"] = prog_args.cuda
print("CUDA", prog_args.cuda)
else:
print("Using CPU")
# Configure the logging directory
if prog_args.writer:
path = os.path.join(
prog_args.logdir, io_utils.gen_explainer_prefix(prog_args))
if os.path.isdir(path) and prog_args.clean_log:
print('Removing existing log dir: ', path)
if not input("Are you sure you want to remove this directory? (y/n): ").lower().strip()[:1] == "y":
sys.exit(1)
shutil.rmtree(path)
writer = SummaryWriter(path)
else:
writer = None
# Load data and a model checkpoint
ckpt = io_utils.load_ckpt(prog_args)
cg_dict = ckpt["cg"] # get computation graph
input_dim = cg_dict["feat"].shape[2]
num_classes = cg_dict["pred"].shape[2]
print("Loaded model from {}".format(prog_args.ckptdir))
print("input dim: ", input_dim, "; num classes: ", num_classes)
# Determine explainer mode (node classif)
graph_mode = (
prog_args.graph_mode
or prog_args.multigraph_class >= 0
or prog_args.graph_idx >= 0
)
# build model
print("Method: ", prog_args.method)
if graph_mode:
# Explain Graph prediction
model = models.GcnEncoderGraph(
input_dim=input_dim,
hidden_dim=prog_args.hidden_dim,
embedding_dim=prog_args.output_dim,
label_dim=num_classes,
num_layers=prog_args.num_gc_layers,
bn=prog_args.bn,
args=prog_args,
)
else:
if prog_args.dataset == "ppi_essential":
# class weight in CE loss for handling imbalanced label classes
prog_args.loss_weight = torch.tensor(
[1.0, 5.0], dtype=torch.float).cuda()
# Explain Node prediction
model = models.GcnEncoderNode(
input_dim=input_dim,
hidden_dim=prog_args.hidden_dim,
embedding_dim=prog_args.output_dim,
label_dim=num_classes,
num_layers=prog_args.num_gc_layers,
bn=prog_args.bn,
args=prog_args,
)
if prog_args.gpu:
model = model.cuda()
# Load state_dict (obtained by model.state_dict() when saving checkpoint)
model.load_state_dict(ckpt["model_state"])
# Convertion data required to get correct model output for GraphSHAP
adj = torch.tensor(cg_dict["adj"], dtype=torch.float)
x = torch.tensor(cg_dict["feat"], requires_grad=True, dtype=torch.float)
if prog_args.gpu:
y_pred, att_adj = model(x.cuda(), adj.cuda())
else:
y_pred, att_adj = model(x, adj)
# Transform their data into our format
data = transform_data(adj, x, cg_dict["label"][0].tolist())
# Generate test nodes
# Use only these specific nodes as they are the ones added manually, part of the defined shapes
# node_indices = extract_test_nodes(data, num_samples=10, cg_dict['train_idx'])
k = 4 # number of nodes for the shape introduced (house, cycle)
K = 0
if prog_args.dataset == 'syn1':
node_indices = list(range(400, 450, 5))
elif prog_args.dataset == 'syn2':
node_indices = list(range(400, 425, 5)) + list(range(1100, 1125, 5))
elif prog_args.dataset == 'syn4':
node_indices = list(range(511, 571, 6))
if prog_args.hops == 3:
k = 5
else:
K = 5
elif prog_args.dataset == 'syn5':
node_indices = list(range(511, 601, 9))
if prog_args.hops == 3:
k = 8
else:
k = 5
K = 8
# GraphSHAP explainer
graphshap = GraphSHAP(data, model, adj, writer,
prog_args.dataset, prog_args.gpu)
# Run GNN Explainer and retrieve produced explanations
gnne = explain.Explainer(
model=model,
adj=cg_dict["adj"],
feat=cg_dict["feat"],
label=cg_dict["label"],
pred=cg_dict["pred"],
train_idx=cg_dict["train_idx"],
args=prog_args,
writer=writer,
print_training=True,
graph_mode=graph_mode,
graph_idx=prog_args.graph_idx,
)
#if prog_args.explain_node is not None:
# _, gnne_edge_accuracy, gnne_auc, gnne_node_accuracy = \
# gnne.explain_nodes_gnn_stats(
# node_indices, prog_args
# )
# elif graph_mode:
# # Graph explanation
# gnne_expl = gnne.explain_graphs([1])[0]
# GraphSHAP - assess accuracy of explanations
# Loop over test nodes
accuracy = []
feat_accuracy = []
for node_idx in node_indices:
start = time.time()
graphshap_explanations = graphshap.explain([node_idx],
prog_args.hops,
prog_args.num_samples,
prog_args.info,
prog_args.multiclass,
prog_args.fullempty,
prog_args.S,
prog_args.hv,
prog_args.feat,
prog_args.coal,
prog_args.g,
prog_args.regu,
)[0]
end = time.time()
print('GS Time:', end-start)
# Predicted class
pred_val, predicted_class = y_pred[0, node_idx, :].max(dim=0)
# Keep only node explanations
# ,predicted_class]
graphshap_node_explanations = graphshap_explanations[graphshap.F:]
# Derive ground truth from graph structure
ground_truth = list(range(node_idx+1, node_idx+max(k, K)+1))
# Retrieve top k elements indices form graphshap_node_explanations
if graphshap.neighbours.shape[0] > k:
i = 0
val, indices = torch.topk(torch.tensor(
graphshap_node_explanations.T), k+1)
# could weight importance based on val
for node in graphshap.neighbours[indices]:
if node.item() in ground_truth:
i += 1
# Sort of accruacy metric
accuracy.append(i / k)
print('There are {} from targeted shape among most imp. nodes'.format(i))
# Look at importance distribution among features
# Identify most important features and check if it corresponds to truly imp ones
if prog_args.dataset == 'syn2':
# ,predicted_class]
graphshap_feat_explanations = graphshap_explanations[:graphshap.F]
print('Feature importance graphshap',
graphshap_feat_explanations.T)
if np.argsort(graphshap_feat_explanations)[-1] == 0:
feat_accuracy.append(1)
else:
feat_accuracy.append(0)
# Metric for graphshap
final_accuracy = sum(accuracy)/len(accuracy)
### GNNE
# Explain a set of nodes - accuracy on edges this time
_, gnne_edge_accuracy, gnne_auc, gnne_node_accuracy =\
gnne.explain_nodes_gnn_stats(
node_indices, prog_args
)
### GRAD benchmark
# MetricS to assess quality of predictionsx
"""
_, grad_edge_accuracy, grad_auc, grad_node_accuracy =\
gnne.explain_nodes_gnn_stats(
node_indices, prog_args, model="grad")
"""
grad_edge_accuracy = 0
grad_node_accuracy = 0
### GAT
# Nothing for now - implem a GAT on the side and look at weights coef
### Results
print('Accuracy for GraphSHAP is {:.2f} vs {:.2f},{:.2f} for GNNE vs {:.2f},{:.2f} for GRAD'.format(
final_accuracy, np.mean(
gnne_edge_accuracy), np.mean(gnne_node_accuracy),
np.mean(grad_edge_accuracy), np.mean(grad_node_accuracy))
)
if prog_args.dataset == 'syn2':
print('Most important feature was found in {:.2f}% of the case'.format(
100*np.mean(feat_accuracy)))
print('GNNE_auc is:', gnne_auc)
if __name__ == "__main__":
main()
|
{"hexsha": "671099b7b478b25e96a64aefa6dfbeb170308e78", "size": 18592, "ext": "py", "lang": "Python", "max_stars_repo_path": "initial_test.py", "max_stars_repo_name": "AlexDuvalinho/GNNE_eval", "max_stars_repo_head_hexsha": "da6fa3c8f6587276cc270dde4be265c487b1239f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "initial_test.py", "max_issues_repo_name": "AlexDuvalinho/GNNE_eval", "max_issues_repo_head_hexsha": "da6fa3c8f6587276cc270dde4be265c487b1239f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "initial_test.py", "max_forks_repo_name": "AlexDuvalinho/GNNE_eval", "max_forks_repo_head_hexsha": "da6fa3c8f6587276cc270dde4be265c487b1239f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.1137614679, "max_line_length": 120, "alphanum_fraction": 0.5930507745, "include": true, "reason": "import numpy,import networkx", "num_tokens": 4205}
|
#!/usr/bin/python3
"""transliteration of Kim Asendorf's pixel sorting script"""
from copy import copy
from random import random, gauss
from PIL import Image
from numpy import int32
from argparse import ArgumentParser
# PROGRAM CONSTANTS
# rgb(103, 105, 128)
BLACK_VALUE = int32(-10000000)
# rgb(164, 114, 128)
WHITE_VALUE = int32((255 << 24) + (230 << 16) + (230 << 8) + 230)
BRIGHTNESS_VALUE = int32(30)
# PIXEL CONVERSION FUNCTIONS
def get_pixel_value(pixel):
"""rgb pixel to int32 processing representation"""
return(int32((((255 << 8) | pixel[0]) << 8 | pixel[1]) << 8 | pixel[2]))
def get_pixel_brightness(pixel):
"""rgb pixel to brightness value"""
return(max((pixel[0], pixel[1], pixel[2])) / 255 * 100)
# PIXEL FINDING FUNCTIONS
def get_next_satisfying(vector, starting_position, condition_fun):
"""find next pixel in the vector after starting position
that satisfies the condition (boolean)
return -1 if not found"""
position = starting_position
while(position < len(vector) and
not(condition_fun(vector[position]))):
position += 1
if(position == (len(vector) - 1) and
not(condition_fun(vector[position]))):
position = - 1
return(position)
# black mode
def get_next_black(vector, starting_position):
"""next black pixel"""
condition = lambda x: int32(get_pixel_value(x)) > BLACK_VALUE
return get_next_satisfying(vector, starting_position, condition)
def get_next_not_black(vector, starting_position):
"""next non black pixel"""
condition = lambda x: int32(get_pixel_value(x)) < BLACK_VALUE
return get_next_satisfying(vector, starting_position, condition)
# bright mode
def get_next_bright(vector, starting_position):
"""next bright pixel"""
condition = lambda x: int32(get_pixel_brightness(x)) < BRIGHTNESS_VALUE
return get_next_satisfying(vector, starting_position, condition)
def get_next_dark(vector, starting_position):
"""next dark pixel"""
condition = lambda x: int32(get_pixel_brightness(x)) > BRIGHTNESS_VALUE
return get_next_satisfying(vector, starting_position, condition)
# white mode
def get_next_white(vector, starting_position):
"""next white pixel"""
condition = lambda x: int32(get_pixel_value(x)) < WHITE_VALUE
return get_next_satisfying(vector, starting_position, condition)
def get_next_not_white(vector, starting_position):
"""next not white pixel"""
condition = lambda x: int32(get_pixel_value(x)) > WHITE_VALUE
return get_next_satisfying(vector, starting_position, condition)
FIND_FUNCTIONS = ((get_next_black, get_next_not_black), # black
(get_next_bright, get_next_dark), # bright
(get_next_white, get_next_not_white)) # white
# PIXEL SORTING FUNCTIONS
def sort_pixels(vector, mode=0, find=FIND_FUNCTIONS):
"""sort pixel in the given vector"""
assert(mode in (0, 1, 2)), "invalid use case"
vector = copy(vector)
position = 0
pos_end = None
while(position < len(vector)):
if((position == -1) or (pos_end == -1)):
break
position = find[mode][0](vector, position)
pos_end = find[mode][1](vector, position)
vector[position:pos_end] = sorted(vector[position:pos_end],
key=lambda x: get_pixel_value(x))
position = pos_end + 1
return(vector)
# IMAGE TRANSFORMATIONS
def to_vectors(rgb_image, row_or_col):
"""rgb image -> list of lists of RGB tuples"""
assert(rgb_image.mode == "RGB"), "must be a RGB image"""
assert(row_or_col in (0, 1)), "row = 0, col = 1"
vectors = []
x_size, y_size = rgb_image.size
if(row_or_col == 0):
for y_coord in range(0, y_size):
row = []
for x_coord in range(0, x_size):
row.append(rgb_image.getpixel((x_coord, y_coord)))
vectors.append(row)
else:
for x_coord in range(0, x_size):
col = []
for y_coord in range(0, y_size):
col.append(rgb_image.getpixel((x_coord, y_coord)))
vectors.append(col)
return(vectors)
# COMPLETE FUNCTIONS
def sort_image(image, row_or_col, mode=0, prob=1, avg_band_size=1):
"""input: (rgb image, row or column, sort mode, probability of sorting,
average band size for sorting)
output: sorted out image)"""
x_size, y_size = image.size
sigma = avg_band_size / 4
vectors = to_vectors(image, row_or_col)
new_vectors = []
position = 0
while(position < len(vectors)):
if(random() < prob):
# calculate the indices of the rows to sort
to_sort = []
coarseness = int(gauss(avg_band_size, sigma))
for index in range(position, position + coarseness):
if(index >= len(vectors)):
break
else:
to_sort.append(index)
for index in to_sort:
new_vectors.append(sort_pixels(vectors[index], mode))
position += coarseness
else:
new_vectors.append(vectors[position])
position += 1
new_image = []
if(row_or_col == 0):
for vector in new_vectors:
for (red, green, blue) in vector:
new_image.append(int(red))
new_image.append(int(green))
new_image.append(int(blue))
else:
for i in range(0, y_size):
for vector in new_vectors:
(red, green, blue) = vector[i]
new_image.append(int(red))
new_image.append(int(green))
new_image.append(int(blue))
return(Image.fromstring('RGB', (x_size, y_size), bytes(new_image)))
__all__ = ["sort_image"]
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-i", dest="input_image_file", required=True, type=str, help="input")
parser.add_argument("-o", dest="output_image_file", required=True, type=str, help="output")
args = parser.parse_args()
image = Image.open(args.input_image_file)
sort_image(image, 0).save(args.output_image_file)
|
{"hexsha": "2996df27209f1d350199a6a54bcf14fae9ad1a1a", "size": 6173, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/pixel_sorting.py", "max_stars_repo_name": "in3rtial/imgsrt", "max_stars_repo_head_hexsha": "2dec237b7d797d9964ed874c4e4d72f7eb23eaf0", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2015-11-08T09:22:30.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-15T03:42:24.000Z", "max_issues_repo_path": "src/pixel_sorting.py", "max_issues_repo_name": "in3rtial/imgsrt", "max_issues_repo_head_hexsha": "2dec237b7d797d9964ed874c4e4d72f7eb23eaf0", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/pixel_sorting.py", "max_forks_repo_name": "in3rtial/imgsrt", "max_forks_repo_head_hexsha": "2dec237b7d797d9964ed874c4e4d72f7eb23eaf0", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.6613756614, "max_line_length": 95, "alphanum_fraction": 0.6387493925, "include": true, "reason": "from numpy", "num_tokens": 1474}
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from ast import literal_eval as make_tuple
def plotHashtags(filename, savePng=True):
"""Created a stacked bar plot for the given hashtag data csv
:input: filename: path to file of csv
savePng: if user wants the plot saved as a png
:type: None
:return: None
:type: None
"""
# error checking
assert isinstance(filename, basestring)
try:
df = pd.read_csv(filename)
except:
print "error reading file " + filename
return
col_number = df.shape[1]
row_number = df.shape[0]
#print hasthag counts to console
# for i in df.ix[y]:
# print i
data = [[] for i in range(col_number)]
col_number2=str(col_number-2)
temp=[]
for row in df.iterrows():
index, data = row
temp.append(data.tolist())
xDates = df['date']
data=[]
data_numer=[]
data_hashtag=[]
# get data except dates
for i in df.loc[:,col_number2:'0']:
data.append( df[i].tolist())
# extract tuple data
for i in data:
x=[]
y=[]
for j in i:
x.append(make_tuple(j)[1])
y.append(make_tuple(j)[0])
data_numer.append(x)
data_hashtag.append(y)
df=pd.DataFrame(data_numer)
df=df.transpose()
# plot counts
ax = df.plot.bar(stacked=True,legend=False);
#label totals at top of bar
for i in ax.patches[len(ax.patches)-row_number:]:
plt.text(i.get_x(),i.get_y()+75, int(i.get_y()),fontsize=10)
plt.gcf().subplots_adjust(bottom=0.15)
plt.xticks(range(10),xDates)
plt.xticks(rotation=30)
plt.ylabel('number of hashtags')
plt.xlabel('date')
height = 2000
for i in temp:
plt.text(10,height,i)
height = height-200
# if set, save as png
if savePng:
plt.savefig(filename+'.png')
plt.show();
return
plotHashtags('twitter_analyzer/United_Airlines_Popular_Hashtags_2017-04-04_to_2017-04-13.csv', True)
|
{"hexsha": "483ed84e11814ea3eaef7f2395d40ac2603bfdde", "size": 2098, "ext": "py", "lang": "Python", "max_stars_repo_path": "plot_hashtag_bar.py", "max_stars_repo_name": "AlexHtZhang/A-billion-dollar-deal", "max_stars_repo_head_hexsha": "573ba92026f063d90a08daca73cc61c1325eb695", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "plot_hashtag_bar.py", "max_issues_repo_name": "AlexHtZhang/A-billion-dollar-deal", "max_issues_repo_head_hexsha": "573ba92026f063d90a08daca73cc61c1325eb695", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "plot_hashtag_bar.py", "max_forks_repo_name": "AlexHtZhang/A-billion-dollar-deal", "max_forks_repo_head_hexsha": "573ba92026f063d90a08daca73cc61c1325eb695", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.6823529412, "max_line_length": 100, "alphanum_fraction": 0.5958055291, "include": true, "reason": "import numpy", "num_tokens": 549}
|
import pandas as pd
import numpy as np
from scipy import stats
import glob, os
class KalmanFilter(object):
def __init__(self, F = None, B = None, H = None, Q = None, R = None, P = None, x0 = None):
if(F is None or H is None):
raise ValueError("Set proper system dynamics.")
self.n = F.shape[1]
self.m = H.shape[1]
self.F = F
self.H = H
self.B = 0 if B is None else B
self.Q = np.eye(self.n) if Q is None else Q
self.R = np.eye(self.n) if R is None else R
self.P = np.eye(self.n) if P is None else P
self.x = np.zeros((self.n, 1)) if x0 is None else x0
def predict(self, u = 0):
self.x = np.dot(self.F, self.x) + np.dot(self.B, u)
self.P = np.dot(np.dot(self.F, self.P), self.F.T) + self.Q
return self.x
def update(self, z):
y = z - np.dot(self.H, self.x)
S = self.R + np.dot(self.H, np.dot(self.P, self.H.T))
K = np.dot(np.dot(self.P, self.H.T), np.linalg.inv(S))
self.x = self.x + np.dot(K, y)
I = np.eye(self.n)
self.P = np.dot(np.dot(I - np.dot(K, self.H), self.P),
(I - np.dot(K, self.H)).T) + np.dot(np.dot(K, self.R), K.T)
dt = 1.0/25
#Transition
F = np.array([[1, -dt], [0, 1]])
#Observation
H = np.array([1, 0]).reshape(1, 2)
Q = np.array([[0.001, 0.000], [0.000, 0.003]])
R = np.array([0.03]).reshape(1, 1)
# # Transition
# F = np.array([[1, dt, 0], [0, 1, dt], [0, 0, 1]])
# # Observation
# H = np.array([1, 0, 0]).reshape(1, 3)
# Q = np.array([[0.05, 0.05, 0.0], [0.05, 0.05, 0.0], [0.0, 0.0, 0.0]])
# R = np.array([0.5]).reshape(1, 1)
def compute_kalman(measurements):
kf = KalmanFilter(F = F, H = H, Q = Q, R = R)
predictions = []
for z in measurements:
predictions.append(np.dot(H, kf.predict())[0])
kf.update(z)
predictions=np.asarray(predictions).reshape(-1)
return predictions
def read_data_sets(file_path):
column_names = ['timestamp','x-axis', 'y-axis', 'z-axis','x1-axis', 'y1-axis', 'z1-axis','x2-axis', 'y2-axis', 'z2-axis','activity']
data = pd.read_csv(file_path,header = None, names = column_names,delimiter='\t')
return data
def filter_noise(data):
data["x-axis"]=compute_kalman(data["x-axis"])
data["y-axis"]=compute_kalman(data["y-axis"])
data["z-axis"]=compute_kalman(data["z-axis"])
data["x1-axis"]=compute_kalman(data["x1-axis"])
data["y1-axis"]=compute_kalman(data["y1-axis"])
data["z1-axis"]=compute_kalman(data["z1-axis"])
data["x2-axis"]=compute_kalman(data["x2-axis"])
data["y2-axis"]=compute_kalman(data["y2-axis"])
data["z2-axis"]=compute_kalman(data["z2-axis"])
return data
def main():
for filename in glob.iglob('original_dataset/**', recursive=True):
if os.path.isfile(filename):
filename="original_dataset/LeThiTuyet/Recorder_2019_04_03_16_23/out.txt"
dataset = read_data_sets(file_path=filename)
dataset = filter_noise(data=dataset)
list_str_filename=filename.split('/',1)
filename="filtered_dataset/"+list_str_filename[1]
directory=filename.replace("out.txt","")
if not os.path.exists(directory):
os.makedirs(directory)
dataset.to_csv("original_dataset/LeThiTuyet/Recorder_2019_04_03_16_23/org.txt",header=None, sep='\t', encoding='utf-8', index=False, float_format='%.6f')
exit()
main()
|
{"hexsha": "ad35100e84f31c67d8d330be992fecf16d39b4e4", "size": 3468, "ext": "py", "lang": "Python", "max_stars_repo_path": "gesture_dataset/filter_noise.py", "max_stars_repo_name": "hamanhbui/hand-gesture_recognition", "max_stars_repo_head_hexsha": "4225122f85c982a1f291bcbc6a406b9eb65c7ffb", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-01-08T14:53:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-09T04:02:54.000Z", "max_issues_repo_path": "gesture_dataset/filter_noise.py", "max_issues_repo_name": "hamanhbui/hand-gesture_recognition", "max_issues_repo_head_hexsha": "4225122f85c982a1f291bcbc6a406b9eb65c7ffb", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gesture_dataset/filter_noise.py", "max_forks_repo_name": "hamanhbui/hand-gesture_recognition", "max_forks_repo_head_hexsha": "4225122f85c982a1f291bcbc6a406b9eb65c7ffb", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.2903225806, "max_line_length": 165, "alphanum_fraction": 0.5870818916, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1079}
|
*DECK POISTG
SUBROUTINE POISTG (NPEROD, N, MPEROD, M, A, B, C, IDIMY, Y,
+ IERROR, W)
C***BEGIN PROLOGUE POISTG
C***PURPOSE Solve a block tridiagonal system of linear equations
C that results from a staggered grid finite difference
C approximation to 2-D elliptic PDE's.
C***LIBRARY SLATEC (FISHPACK)
C***CATEGORY I2B4B
C***TYPE SINGLE PRECISION (POISTG-S)
C***KEYWORDS ELLIPTIC, FISHPACK, HELMHOLTZ, PDE, TRIDIAGONAL
C***AUTHOR Adams, J., (NCAR)
C Swarztrauber, P. N., (NCAR)
C Sweet, R., (NCAR)
C***DESCRIPTION
C
C Subroutine POISTG solves the linear system of equations
C
C A(I)*X(I-1,J) + B(I)*X(I,J) + C(I)*X(I+1,J)
C + X(I,J-1) - 2.*X(I,J) + X(I,J+1) = Y(I,J)
C
C for I=1,2,...,M and J=1,2,...,N.
C
C The indices I+1 and I-1 are evaluated modulo M, i.e.
C X(0,J) = X(M,J) and X(M+1,J) = X(1,J), and X(I,0) may be equal to
C X(I,1) or -X(I,1) and X(I,N+1) may be equal to X(I,N) or -X(I,N)
C depending on an input parameter.
C
C
C * * * * * * * * Parameter Description * * * * * * * * * *
C
C * * * * * * On Input * * * * * *
C
C NPEROD
C Indicates the values which X(I,0) and X(I,N+1) are assumed
C to have.
C = 1 If X(I,0) = -X(I,1) and X(I,N+1) = -X(I,N)
C = 2 If X(I,0) = -X(I,1) and X(I,N+1) = X(I,N)
C = 3 If X(I,0) = X(I,1) and X(I,N+1) = X(I,N)
C = 4 If X(I,0) = X(I,1) and X(I,N+1) = -X(I,N)
C
C N
C The number of unknowns in the J-direction. N must
C be greater than 2.
C
C MPEROD
C = 0 If A(1) and C(M) are not zero
C = 1 If A(1) = C(M) = 0
C
C M
C The number of unknowns in the I-direction. M must
C be greater than 2.
C
C A,B,C
C One-dimensional arrays of length M that specify the coefficients
C in the linear equations given above. If MPEROD = 0 the array
C elements must not depend on the index I, but must be constant.
C Specifically, the subroutine checks the following condition
C
C A(I) = C(1)
C B(I) = B(1)
C C(I) = C(1)
C
C for I = 1, 2, ..., M.
C
C IDIMY
C The row (or first) dimension of the two-dimensional array Y as
C it appears in the program calling POISTG. This parameter is
C used to specify the variable dimension of Y. IDIMY must be at
C least M.
C
C Y
C A two-dimensional array that specifies the values of the
C right side of the linear system of equations given above.
C Y must be dimensioned at least M X N.
C
C W
C A one-dimensional work array that must be provided by the user
C for work space. W may require up to 9M + 4N + M(INT(log2(N)))
C locations. The actual number of locations used is computed by
C POISTG and returned in location W(1).
C
C
C * * * * * * On Output * * * * * *
C
C Y
C Contains the solution X.
C
C IERROR
C An error flag that indicates invalid input parameters. Except
C for number zero, a solution is not attempted.
C = 0 No error
C = 1 If M .LE. 2
C = 2 If N .LE. 2
C = 3 IDIMY .LT. M
C = 4 If NPEROD .LT. 1 or NPEROD .GT. 4
C = 5 If MPEROD .LT. 0 or MPEROD .GT. 1
C = 6 If MPEROD = 0 and
C A(I) .NE. C(1) or B(I) .NE. B(1) or C(I) .NE. C(1)
C for some I = 1, 2, ..., M.
C = 7 If MPEROD .EQ. 1 .AND. (A(1).NE.0 .OR. C(M).NE.0)
C
C W
C W(1) contains the required length of W.
C
C *Long Description:
C
C * * * * * * * Program Specifications * * * * * * * * * * * *
C
C Dimension of A(M),B(M),C(M),Y(IDIMY,N),
C Arguments W(see argument list)
C
C Latest June 1, 1977
C Revision
C
C Subprograms POISTG,POSTG2,COSGEN,MERGE,TRIX,TRI3,PIMACH
C Required
C
C Special NONE
C Conditions
C
C Common NONE
C Blocks
C
C I/O NONE
C
C Precision Single
C
C Specialist Roland Sweet
C
C Language FORTRAN
C
C History Written by Roland Sweet in 1973
C Revised by Roland Sweet in 1977
C
C
C Space 3297(decimal) = 6341(octal) locations on the
C Required NCAR Control Data 7600
C
C Timing and The execution time T on the NCAR Control Data
C Accuracy 7600 for subroutine POISTG is roughly proportional
C to M*N*log2(N). Some typical values are listed
C in the table below. More comprehensive timing
C charts may be found in the reference.
C To measure the accuracy of the algorithm a
C uniform random number generator was used to create
C a solution array X for the system given in the
C 'PURPOSE ' with
C
C A(I) = C(I) = -0.5*B(I) = 1, I=1,2,...,M
C
C and, when MPEROD = 1
C
C A(1) = C(M) = 0
C B(1) = B(M) =-1.
C
C The solution X was substituted into the given sys-
C tem and, using double precision, a right side Y was
C computed. Using this array Y subroutine POISTG was
C called to produce an approximate solution Z. Then
C the relative error, defined as
C
C E = MAX(ABS(Z(I,J)-X(I,J)))/MAX(ABS(X(I,J)))
C
C where the two maxima are taken over all I=1,2,...,M
C and J=1,2,...,N, was computed. The value of E is
C given in the table below for some typical values of
C M and N.
C
C
C M (=N) MPEROD NPEROD T(MSECS) E
C ------ ------ ------ -------- ------
C
C 31 0-1 1-4 45 9.E-13
C 31 1 1 21 4.E-13
C 31 1 3 41 3.E-13
C 32 0-1 1-4 51 3.E-12
C 32 1 1 32 3.E-13
C 32 1 3 48 1.E-13
C 33 0-1 1-4 42 1.E-12
C 33 1 1 30 4.E-13
C 33 1 3 34 1.E-13
C 63 0-1 1-4 186 3.E-12
C 63 1 1 91 1.E-12
C 63 1 3 173 2.E-13
C 64 0-1 1-4 209 4.E-12
C 64 1 1 128 1.E-12
C 64 1 3 199 6.E-13
C 65 0-1 1-4 143 2.E-13
C 65 1 1 160 1.E-11
C 65 1 3 138 4.E-13
C
C Portability American National Standards Institute FORTRAN.
C The machine dependent constant PI is defined in
C function PIMACH.
C
C Required COS
C Resident
C Routines
C
C Reference Schumann, U. and R. Sweet,'A Direct Method for
C the Solution of Poisson's Equation With Neumann
C Boundary Conditions on a Staggered Grid of
C Arbitrary Size,' J. Comp. Phys. 20(1976),
C pp. 171-182.
C
C * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
C
C***REFERENCES U. Schumann and R. Sweet, A direct method for the
C solution of Poisson's equation with Neumann boundary
C conditions on a staggered grid of arbitrary size,
C Journal of Computational Physics 20, (1976),
C pp. 171-182.
C***ROUTINES CALLED POSTG2
C***REVISION HISTORY (YYMMDD)
C 801001 DATE WRITTEN
C 861211 REVISION DATE from Version 3.2
C 891214 Prologue converted to Version 4.0 format. (BAB)
C 920501 Reformatted the REFERENCES section. (WRB)
C***END PROLOGUE POISTG
C
C
DIMENSION Y(IDIMY,*)
DIMENSION W(*) ,B(*) ,A(*) ,C(*)
C***FIRST EXECUTABLE STATEMENT POISTG
IERROR = 0
IF (M .LE. 2) IERROR = 1
IF (N .LE. 2) IERROR = 2
IF (IDIMY .LT. M) IERROR = 3
IF (NPEROD.LT.1 .OR. NPEROD.GT.4) IERROR = 4
IF (MPEROD.LT.0 .OR. MPEROD.GT.1) IERROR = 5
IF (MPEROD .EQ. 1) GO TO 103
DO 101 I=1,M
IF (A(I) .NE. C(1)) GO TO 102
IF (C(I) .NE. C(1)) GO TO 102
IF (B(I) .NE. B(1)) GO TO 102
101 CONTINUE
GO TO 104
102 IERROR = 6
RETURN
103 IF (A(1).NE.0. .OR. C(M).NE.0.) IERROR = 7
104 IF (IERROR .NE. 0) RETURN
IWBA = M+1
IWBB = IWBA+M
IWBC = IWBB+M
IWB2 = IWBC+M
IWB3 = IWB2+M
IWW1 = IWB3+M
IWW2 = IWW1+M
IWW3 = IWW2+M
IWD = IWW3+M
IWTCOS = IWD+M
IWP = IWTCOS+4*N
DO 106 I=1,M
K = IWBA+I-1
W(K) = -A(I)
K = IWBC+I-1
W(K) = -C(I)
K = IWBB+I-1
W(K) = 2.-B(I)
DO 105 J=1,N
Y(I,J) = -Y(I,J)
105 CONTINUE
106 CONTINUE
NP = NPEROD
MP = MPEROD+1
GO TO (110,107),MP
107 CONTINUE
GO TO (108,108,108,119),NPEROD
108 CONTINUE
CALL POSTG2 (NP,N,M,W(IWBA),W(IWBB),W(IWBC),IDIMY,Y,W,W(IWB2),
1 W(IWB3),W(IWW1),W(IWW2),W(IWW3),W(IWD),W(IWTCOS),
2 W(IWP))
IPSTOR = W(IWW1)
IREV = 2
IF (NPEROD .EQ. 4) GO TO 120
109 CONTINUE
GO TO (123,129),MP
110 CONTINUE
C
C REORDER UNKNOWNS WHEN MP =0
C
MH = (M+1)/2
MHM1 = MH-1
MODD = 1
IF (MH*2 .EQ. M) MODD = 2
DO 115 J=1,N
DO 111 I=1,MHM1
MHPI = MH+I
MHMI = MH-I
W(I) = Y(MHMI,J)-Y(MHPI,J)
W(MHPI) = Y(MHMI,J)+Y(MHPI,J)
111 CONTINUE
W(MH) = 2.*Y(MH,J)
GO TO (113,112),MODD
112 W(M) = 2.*Y(M,J)
113 CONTINUE
DO 114 I=1,M
Y(I,J) = W(I)
114 CONTINUE
115 CONTINUE
K = IWBC+MHM1-1
I = IWBA+MHM1
W(K) = 0.
W(I) = 0.
W(K+1) = 2.*W(K+1)
GO TO (116,117),MODD
116 CONTINUE
K = IWBB+MHM1-1
W(K) = W(K)-W(I-1)
W(IWBC-1) = W(IWBC-1)+W(IWBB-1)
GO TO 118
117 W(IWBB-1) = W(K+1)
118 CONTINUE
GO TO 107
119 CONTINUE
C
C REVERSE COLUMNS WHEN NPEROD = 4.
C
IREV = 1
NBY2 = N/2
NP = 2
120 DO 122 J=1,NBY2
MSKIP = N+1-J
DO 121 I=1,M
A1 = Y(I,J)
Y(I,J) = Y(I,MSKIP)
Y(I,MSKIP) = A1
121 CONTINUE
122 CONTINUE
GO TO (108,109),IREV
123 CONTINUE
DO 128 J=1,N
DO 124 I=1,MHM1
MHMI = MH-I
MHPI = MH+I
W(MHMI) = .5*(Y(MHPI,J)+Y(I,J))
W(MHPI) = .5*(Y(MHPI,J)-Y(I,J))
124 CONTINUE
W(MH) = .5*Y(MH,J)
GO TO (126,125),MODD
125 W(M) = .5*Y(M,J)
126 CONTINUE
DO 127 I=1,M
Y(I,J) = W(I)
127 CONTINUE
128 CONTINUE
129 CONTINUE
C
C RETURN STORAGE REQUIREMENTS FOR W ARRAY.
C
W(1) = IPSTOR+IWP-1
RETURN
END
|
{"hexsha": "e1deb5ecd75110455440dbd9ad4ee1fbbc0f7bb1", "size": 11417, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "slatec/src/poistg.f", "max_stars_repo_name": "andremirt/v_cond", "max_stars_repo_head_hexsha": "6b5c364d7cd4243686488b2bd4318be3927e07ea", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "slatec/src/poistg.f", "max_issues_repo_name": "andremirt/v_cond", "max_issues_repo_head_hexsha": "6b5c364d7cd4243686488b2bd4318be3927e07ea", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "slatec/src/poistg.f", "max_forks_repo_name": "andremirt/v_cond", "max_forks_repo_head_hexsha": "6b5c364d7cd4243686488b2bd4318be3927e07ea", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.1605633803, "max_line_length": 72, "alphanum_fraction": 0.4781466235, "num_tokens": 4052}
|
% CS615 Aspects of System Administration
% Author: Jan Schaumann <jschauma@netmeister.org>
% $Id: slides.tex,v 1.6 2006/03/07 13:55:55 jschauma Exp $
\documentclass[xga]{xdvislides}
\usepackage[landscape]{geometry}
\usepackage{graphics}
\usepackage{graphicx}
\usepackage{colordvi}
\begin{document}
\setfontphv
%%% Headers and footers
\lhead{\slidetitle} % default:\lhead{\slidetitle}
\chead{CS615 - Aspects of System Administration}% default:\chead{\relax}
\rhead{Slide \thepage} % default:\rhead{\sectiontitle}
\lfoot{\Gray{DNS; Backup and Disaster Recovery}}% default:\lfoot{\slideauthor}
\cfoot{\relax} % default:\cfoot{\relax}
\rfoot{\Gray{\today}}
\newcommand{\smallish}{\fontsize{15}{20}\selectfont}
\vspace*{\fill}
\begin{center}
\Hugesize
CS615 - Aspects of System Administration\\ [1em]
DNS; Backup and Disaster Recovery \\ [1em]
\hspace*{5mm}\blueline\\ [1em]
\Normalsize
Department of Computer Science\\
Stevens Institute of Technology\\
Jan Schaumann\\
\verb+jschauma@stevens-tech.edu+
\verb+http://www.cs.stevens-tech.edu/~jschauma/615A/+
\end{center}
\vspace*{\fill}
\subsection{In the beginning...}
\vspace*{\fill}
\begin{center}
\includegraphics[scale=0.8]{pics/2computers.eps} \\
\end{center}
\vspace*{\fill}
\subsection{In the beginning...}
\vspace*{\fill}
\begin{center}
\includegraphics[scale=0.8]{pics/2computers-nic.eps} \\
\end{center}
\vspace*{\fill}
\subsection{In the beginning...}
\vspace*{\fill}
\begin{center}
\includegraphics[scale=0.8]{pics/3computers.eps} \\
\end{center}
\vspace*{\fill}
\subsection{In the beginning...}
\vspace*{\fill}
\begin{center}
\includegraphics[scale=0.8]{pics/3computers-1.eps} \\
\end{center}
\vspace*{\fill}
\subsection{In the beginning...}
\vspace*{\fill}
\begin{center}
\includegraphics[scale=0.8]{pics/3computers-2.eps} \\
\end{center}
\vspace*{\fill}
\subsection{In the beginning...}
\vspace*{\fill}
\begin{center}
\includegraphics[scale=0.8]{pics/arpanet1.eps} \\
\end{center}
\vspace*{\fill}
\subsection{In the beginning...}
\begin{verbatim}
# Host Database
# This file should contain the addresses and aliases
# for local hosts that share this file.
#
127.0.0.1 localhost localhost.
#
# RFC 1918 specifies that these networks are "internal".
# 10.0.0.0 10.255.255.255
# 172.16.0.0 172.31.255.255
# 192.168.0.0 192.168.255.255
10.0.0.1 UCLA-TEST
10.0.0.2 SRI-SPRM
10.0.0.4 UTAH-CS
\end{verbatim}
\subsection{But then...}
\vspace*{\fill}
\begin{center}
\includegraphics[scale=0.3]{pics/routed.eps} \\
\end{center}
\vspace*{\fill}
\subsection{The Domain Name System}
\vspace{.5in}
\begin{center}
\Huge
Computers like numbers. \\
\vspace{.5in}
\begin{verbatim}
10011011111101100101100110011111
\end{verbatim}
\end{center}
\Normalsize
\subsection{The Domain Name System}
\vspace{.5in}
\begin{center}
\Huge
Computers like numbers. \\
\vspace{.5in}
\begin{verbatim}
10011011 11110110 01011001 10011111
155 . 246 . 89 . 159
\end{verbatim}
\end{center}
\Normalsize
\subsection{The Domain Name System}
\vspace{.5in}
\begin{center}
\Huge
People like names. \\
\vspace{.5in}
\verb+ash.cs.stevens-tech.edu+
\end{center}
\Normalsize
\subsection{The Domain Name System}
\vspace*{\fill}
\begin{center}
\includegraphics[scale=0.6]{pics/phonebook.eps}
\end{center}
\vspace*{\fill}
\subsection{The New Phonebook is here!}
\vspace*{\fill}
\begin{center}
\verb+http://is.gd/XXp2sC+ \\
\addvspace{.5in}
\verb+wget -q -O - http://is.gd/XXp2sC | grep -c "^HOST"+
\end{center}
\vspace*{\fill}
\subsection{DNS: A distributed database}
\vspace*{\fill}
\begin{center}
\includegraphics[scale=0.75]{pics/distributed-database.eps}
\end{center}
\vspace*{\fill}
\subsection{The Domain Name Space}
\vspace{.5in}
\begin{center}
\Huge
The domain name space consists of a tree of {\em domain} names.
\end{center}
\Normalsize
\subsection{DNS: A hierarchical system}
\vspace*{\fill}
\begin{center}
\includegraphics[scale=0.75]{pics/hierarchical-dns.eps}
\end{center}
\vspace*{\fill}
\subsection{The Domain Name Space}
\vspace{.5in}
\begin{center}
\Huge
The domain name space consists of a tree of {\em domain} names. \\
\vspace{.5in}
A subtree divides into {\em zones}.
\end{center}
\Normalsize
\subsection{The Domain Name Space}
\vspace{.5in}
\begin{center}
\Huge
The domain name space consists of a tree of {\em domain} names. \\
\vspace{.5in}
A subtree divides into {\em zones}. \\
\vspace{.5in}
Each node may contain {\em resource records}.
\end{center}
\Normalsize
\subsection{The Domain Name Space}
\vspace*{\fill}
\begin{center}
\includegraphics[scale=0.74]{pics/dns-space.eps}
\end{center}
\vspace*{\fill}
\subsection{Domain Names}
\vspace{.5in}
\begin{center}
\Huge
\verb+ash.cs.stevens-tech.edu+ \\
\vspace{.5in}
Domain Names are read from right to left and components separated by a ``\verb+.+''.
\end{center}
\Normalsize
\subsection{Domain Names}
\vspace{.5in}
\begin{center}
\Huge
\verb+ash.cs.stevens-tech.edu.+ \\
\vspace{.5in}
The {\em root} is known as ``\verb+.+'', but is usually left out.
\end{center}
\Normalsize
\subsection{Domain Names}
\vspace{.5in}
\begin{center}
\Huge
\verb+ash.cs.stevens-tech.+{\bf edu}\verb+.+ \\
\vspace{.5in}
There is a small number of {\em top level domains}.
\end{center}
\Normalsize
\subsection{Domain Names}
\vspace{.5in}
\begin{center}
\Huge
\verb+ash.cs.stevens-tech.+{\bf edu}\verb+.+ \\
\vspace{.5in}
There is a number of {\em top level domains}. \\
\vspace{.5in}
\Normalsize
\begin{verbatim}
wget -O - ftp://rs.internic.net/domain/root.zone | \
grep "IN<tab>*NS<tab>" | awk '{print $1}' | sort -u | wc -l
\end{verbatim}
\vspace{.25in}
\verb+http://data.iana.org/TLD/tlds-alpha-by-domain.txt+ \\
\verb+https://en.wikipedia.org/wiki/List_of_Internet_top-level_domains+
\end{center}
\Normalsize
\subsection{Domain Names}
\vspace{.5in}
\begin{center}
\Huge
\verb+ash.cs.+{\bf stevens-tech}\verb+.edu.+ \\
\vspace{.5in}
Each {\em domain} can be divided into any number of {\em sub domains}.
\end{center}
\Normalsize
\subsection{Domain Names}
\vspace{.5in}
\begin{center}
\Huge
\verb+ash.+{\bf cs}\verb+.stevens-tech.edu.+ \\
\vspace{.5in}
Each {\em domain} can be divided into any number of {\em sub domains}.
\end{center}
\Normalsize
\subsection{Domain Names}
\vspace{.5in}
\begin{center}
\Huge
{\bf ash}\verb+.cs.stevens-tech.edu.+ \\
\vspace{.5in}
The left-most component of a domain name may be a {\em hostname}.
\end{center}
\Normalsize
\subsection{Fully Qualified Domain Names}
\vspace{.5in}
\begin{center}
\Huge
\verb+ash.cs.stevens-tech.edu.+ \\
\vspace{.5in}
A {\em hostname} with a domain name is known as a {\em FQDN}.
\end{center}
\Normalsize
\subsection{DNS servers come in two flavors}
\vspace*{\fill}
\begin{center}
\begin{tabular}{ c c c }
\includegraphics[scale=1.5]{pics/vanilla.eps} & \hspace{.5in} & \includegraphics[scale=1.5]{pics/chocolate.eps} \\
\hspace{.3in} \Huge Authoritative & & \hspace{.3in} \Huge Recursive \\
\hspace{.3in} \Huge Nameservers & & \hspace{.3in} \Huge Nameservers \\
\end{tabular}
\end{center}
\vspace*{\fill}
\subsection{Hostname resolution}
Resolution on a recursive nameserver (aka {\em resolver}) involves a number of queries:
\vspace{.5in}
\begin{verbatim}
$ nslookup ash.cs.stevens-tech.edu
Server: 127.0.0.1
Address: 127.0.0.1#53
Non-authoritative answer:
Name: ash.cs.stevens-tech.edu
Address: 155.246.89.159
$
\end{verbatim}
\subsection{Hostname resolution}
Resolution on a {\em resolver} involves a number of queries:
\begin{verbatim}
18:39:27.186778 IP panix.netmeister.org.62105 > i.root-servers.net.domain:
11585 [1au] A? ash.cs.stevens-tech.edu. (52)
18:39:27.446190 IP i.root-servers.net.domain > panix.netmeister.org.62105:
11585- 0/8/8 (494)
18:39:27.446994 IP panix.netmeister.org.53168 > a.gtld-servers.net.domain:
46575 [1au] A? ash.cs.stevens-tech.edu. (52)
18:39:27.481565 IP a.gtld-servers.net.domain > panix.netmeister.org.53168:
46575- 0/6/3 (609)
18:39:27.481998 IP panix.netmeister.org.41071 > nrac.stevens-tech.edu.domain:
24322 [1au] A? ash.cs.stevens-tech.edu. (52)
18:39:27.486035 IP nrac.stevens-tech.edu.domain > panix.netmeister.org.41071:
24322*- 1/2/3 A[|domain]
\end{verbatim}
\Normalsize
\subsection{Hostname resolution}
Resolution on a {\em resolver} involves a number of queries:
\begin{verbatim}
$ host -t ns .
. name server I.ROOT-SERVERS.NET.
. name server D.ROOT-SERVERS.NET.
. name server C.ROOT-SERVERS.NET.
. name server M.ROOT-SERVERS.NET.
. name server F.ROOT-SERVERS.NET.
. name server A.ROOT-SERVERS.NET.
. name server E.ROOT-SERVERS.NET.
. name server L.ROOT-SERVERS.NET.
. name server H.ROOT-SERVERS.NET.
. name server J.ROOT-SERVERS.NET.
. name server B.ROOT-SERVERS.NET.
. name server G.ROOT-SERVERS.NET.
. name server K.ROOT-SERVERS.NET.
$
\end{verbatim}
\subsection{Hostname resolution}
Resolution on a {\em resolver} involves a number of queries:
\begin{verbatim}
$ dig -t ns edu.
[...]
;; ANSWER SECTION:
edu. 172800 IN NS l.edu-servers.net.
edu. 172800 IN NS f.edu-servers.net.
edu. 172800 IN NS c.edu-servers.net.
edu. 172800 IN NS g.edu-servers.net.
edu. 172800 IN NS a.edu-servers.net.
edu. 172800 IN NS d.edu-servers.net.
;; ADDITIONAL SECTION:
c.edu-servers.net. 36626 IN A 192.26.92.30
d.edu-servers.net. 13274 IN A 192.31.80.30
l.edu-servers.net. 36626 IN A 192.41.162.30
[...]
$
\end{verbatim}
\Normalsize
\subsection{Hostname resolution}
Resolution on a {\em resolver} involves a number of queries:
\begin{verbatim}
$ dig @c.edu-servers.net -t ns stevens.edu.
[...]
;; AUTHORITY SECTION:
stevens.edu. 172800 IN NS nrac.stevens-tech.edu.
stevens.edu. 172800 IN NS sitult.stevens-tech.edu.
;; ADDITIONAL SECTION:
nrac.stevens-tech.edu. 172800 IN A 155.246.1.21
sitult.stevens-tech.edu. 172800 IN A 155.246.1.20
[...]
$
\end{verbatim}
\subsection{Hostname resolution}
\vspace*{\fill}
\begin{center}
\includegraphics[scale=0.9]{pics/resolution.eps}
\end{center}
\vspace*{\fill}
\subsection{Hostname resolution}
Resolution on a {\em resolver} involves a number of queries:
\begin{verbatim}
$ nslookup ash.cs.stevens-tech.edu
Server: 127.0.0.1
Address: 127.0.0.1#53
Non-authoritative answer:
Name: ash.cs.stevens-tech.edu
Address: 155.246.89.159
$
\end{verbatim}
\subsection{Hostname resolution}
\vspace*{\fill}
\begin{center}
\includegraphics[scale=0.4]{pics/chicken-egg.eps} \\
\vspace*{\fill}
\end{center}
\subsection{Hostname resolution}
\vspace*{\fill}
\begin{center}
\includegraphics[scale=0.4]{pics/chicken-egg.eps} \\
\addvspace{.2in}
\verb+$ ftp -o - ftp.internic.net:/domain/db.cache | more+ \\
\verb+http://www.internic.net/zones/named.root+
\vspace*{\fill}
\end{center}
\subsection{Operation Global Blackout}
\vspace*{\fill}
\begin{center}
\includegraphics[scale=0.8]{pics/anonymous.eps} \\
\addvspace{.2in}
\verb+http://pastebin.com/XZ3EGsbc+ \\
\addvspace{.1in}
\end{center}
\vspace*{\fill}
\subsection{DNS: A distributed system}
\vspace{.5in}
\begin{center}
\Huge
There are 13 \verb+root+ servers. \\
\end{center}
\Normalsize
\subsection{DNS: A distributed system}
\vspace{.5in}
\begin{center}
\Huge
There are 13 \verb+root+ servers. \\
\vspace{.5in}
Except... there are more.
\end{center}
\Normalsize
\subsection{DNS: A distributed system}
\vspace{.5in}
\begin{center}
\Huge
There are 13 \verb+root+ {\em authorities}. \\
\end{center}
\Normalsize
\subsection{DNS: A distributed system}
\vspace{.5in}
\begin{center}
\Huge
There are 13 \verb+root server+ {\em addresses}. \\
\end{center}
\Normalsize
\subsection{DNS: A distributed system}
\vspace{.5in}
\begin{center}
\Huge
There are hundreds of \verb+root+ servers. \\
\end{center}
\Normalsize
\subsection{DNS: A distributed system}
\vspace*{\fill}
\begin{center}
\includegraphics[scale=0.5]{pics/root-servers.eps}
\end{center}
\vspace*{\fill}
\subsection{Operation Global Blackout}
\vspace*{\fill}
\begin{center}
\includegraphics[scale=0.8]{pics/anonymous-tweet.eps} \\
\end{center}
\vspace*{\fill}
\subsection{DNS: A distributed database}
\vspace*{\fill}
\begin{center}
\includegraphics[scale=0.75]{pics/distributed-database.eps}
\end{center}
\vspace*{\fill}
\subsection{DNS Resource Records}
\begin{itemize}
\item {\em NS} -- an authoritative name server
\item {\em CNAME} -- the canonical name for an alias
\item {\em SOA} -- marks the start of a zone of authority
\item {\em PTR} -- a domain name pointer
\item {\em HINFO} -- host information
\item {\em MX} -- mail exchange
\item {\em TXT} text strings
\item ...
\end{itemize}
\subsection{DNS Resource Records}
Exercise: trace DNS queries for a reverse lookup (PTR):
\\
\begin{verbatim}
$ host ash.cs.stevens-tech.edu
ash.cs.stevens-tech.edu has address 155.246.89.159
ash.cs.stevens-tech.edu mail is handled by 0 guinness.cs.stevens-tech.edu.
$ host 155.246.89.159
159.89.246.155.in-addr.arpa domain name pointer ash.cs.stevens-tech.edu.
$
\end{verbatim}
\subsection{Creative uses of DNS Resource Records}
\begin{itemize}
\item identifying sources of SPAM
\item find out if the internet is on fire: \\
\verb|dig +short txt istheinternetonfire.com|
\item find ASN numbers by IP addresses: \\
\verb|dig +short 159.89.246.155.origin.asn.cymru.com TXT|
\item check a resolver's source port randomization (to help
mitigate DNS Cache Poisoning attacks): \\
\verb|dig +short porttest.dns-oarc.net TXT|
\item using DNS to publish SSH key fingerprints (RFC4255,
ssh\_config(5) \verb+VerifyHostKeyDNS+; for best results combine with DNSSEC): \\
\verb|dig +short ftp.netbsd.org SSHFP|
\begin{verbatim}
ssh -o "VerifyHostKeyDNS yes" ftp.netbsd.org
[...]
Matching host key fingerprint found in DNS.
Are you sure you want to continue connecting (yes/no)?
\end{verbatim}
\end{itemize}
\newpage
\vspace*{\fill}
\begin{center}
\Hugesize
Hooray! \\ [1em]
\hspace*{5mm}
\blueline\\
\hspace*{5mm}\\
5 Minute Break
\end{center}
\vspace*{\fill}
\subsection{Backups}
\begin{itemize}
\item backup vs. restore
\end{itemize}
\subsection{Backups}
\begin{itemize}
\item backup vs. restore
\item backup devices and media
\end{itemize}
\vspace*{\fill}
\begin{center}
\includegraphics[scale=2.0]{pics/daily-tapes.eps}
\end{center}
\vspace*{\fill}
\subsection{Backups}
\begin{itemize}
\item backup vs. restore
\item backup devices and media
\end{itemize}
\vspace*{\fill}
\begin{center}
\includegraphics[scale=0.8]{pics/dlt-library.eps}
\end{center}
\vspace*{\fill}
\subsection{Backups}
\begin{itemize}
\item backup vs. restore
\item backup devices and media
\end{itemize}
\vspace*{\fill}
\begin{center}
\includegraphics[scale=1.0]{pics/libraries.eps}
\end{center}
\vspace*{\fill}
\subsection{Backups}
\begin{itemize}
\item backup vs. restore
\item backup devices and media
\item filesystem considerations
\end{itemize}
\subsection{Backups}
\begin{itemize}
\item backup vs. restore
\item backup devices and media
\item filesystem considerations
\item backup strategies
\end{itemize}
\subsection{Backups}
\begin{itemize}
\item backup vs. restore
\item backup devices and media
\item filesystem considerations
\item backup strategies
\item planning for disasters
\end{itemize}
\subsection{Backups and Restore Basics}
When do we need backups?
\subsection{Backups and Restore Basics}
When do we need backups?
\begin{itemize}
\item disaster recovery: off-site storage of sensitive data
\item long-term storage requirements
\item recover from data loss
\end{itemize}
\subsection{Backups and Restore Basics}
When do we need backups?
\begin{itemize}
\item disaster recovery: off-site storage of sensitive data
\item long-term storage requirements
\item recover from data loss due to
\end{itemize}
\vspace*{\fill}
\begin{center}
\includegraphics[scale=1.0]{pics/headcrash-closeup.eps}
\end{center}
\vspace*{\fill}
\subsection{Backups and Restore Basics}
When do we need backups?
\begin{itemize}
\item disaster recovery: off-site storage of sensitive data
\item long-term storage requirements
\item recover from data loss due to
\end{itemize}
\vspace*{\fill}
\begin{center}
\includegraphics[scale=1.2]{pics/dumb-user.eps}
\end{center}
\vspace*{\fill}
\subsection{Backups and Restore Basics}
When do we need backups?
\begin{itemize}
\item disaster recovery: off-site storage of sensitive data
\item long-term storage requirements
\item recover from data loss due to
\end{itemize}
\vspace*{\fill}
\begin{center}
\includegraphics[scale=0.4]{pics/20th-and-C.eps}
\end{center}
\vspace*{\fill}
\subsection{Backups and Restore Basics}
When do we need backups?
\begin{itemize}
\item disaster recovery: off-site storage of sensitive data
\item long-term storage requirements
\item recover from data loss due to
\end{itemize}
\vspace*{\fill}
\begin{center}
\includegraphics[scale=0.6]{pics/hacker.eps}
\end{center}
\vspace*{\fill}
\subsection{Backups and Restore Basics}
When do we need backups?
\begin{itemize}
\item disaster recovery: off-site storage of sensitive data
\item long-term storage requirements
\item recover from data loss due to
\end{itemize}
\vspace*{\fill}
\begin{center}
\includegraphics[scale=0.6]{pics/bugs.eps}
\end{center}
\vspace*{\fill}
\subsection{Backups and Restore Basics}
When do we need backups?
\begin{itemize}
\item disaster recovery: off-site storage of sensitive data
\item long-term storage requirements
\item recover from data loss due to
\begin{itemize}
\item equipment failure
\item bozotic users
\item natural disaster
\item security breach
\item software bugs
\end{itemize}
\end{itemize}
\subsection{Backups and Restore Basics}
When do we need backups?
\begin{itemize}
\item disaster recovery: off-site storage of sensitive data
\item long-term storage requirements
\item recover from data loss due to
\begin{itemize}
\item equipment failure
\item bozotic users
\item natural disaster
\item security breach
\item software bugs
\end{itemize}
\end{itemize}
\addvspace{.5in}
Think of your backups as {\em insurance}: you invest and pay for it, hoping
you will never need it.
\subsection{Key Reasons for Restores}
Three key reasons for restores: {\em Accidental File Deletion}, {\em Disk
Failure} and {\em Archival}.
\\
1. Accidental File Deletion
\begin{itemize}
\item ability to restore a file within a certain time frame
\item restore time, including
\begin{itemize}
\item actual time spent restoring
\item waiting until resources permit the restore
\item staff availability
\end{itemize}
\item self-service restore
\end{itemize}
\subsection{Key Reasons for Restores}
2. Disk Failure
\begin{itemize}
\item loss of entire file system
\item leads to downtime
\item RAID may help
\item takes long time to restore
\end{itemize}
\addvspace{.5in}
3. Archival
\begin{itemize}
\item {\em full} set of level 0 backups
\item separate set from regular backups
\item usually stored off-site
\item store for long time
\end{itemize}
\subsection{Filesystem backup}
{\tt dump(8)} / {\tt restore(8)}
\begin{itemize}
\item in use since \~{}1975
\item full filesystem level backups
\item direct interaction with tape devices
\item integration with {\tt /etc/fstab}
\item efficient incremental backups
\end{itemize}
\subsection{Filesystem backup}
\begin{itemize}
\item start an Ubuntu EC2 instance (e.g. \verb+ami-6de0dd04+)
\item create a full filesystem backup using \verb+dump(8)+
\item add the 'apache2' package
\item create an incremental backup
\item delete the 'apache2' package
\item restore all files from the incremental backup using the \verb+restore(8)+ command
\item verify that the 'apache2' package is fully installed
\end{itemize}
\subsection{Filesystem backup}
\begin{verbatim}
ssh ec2-instance "sudo dump -u -0 -f - /" | bzip2 -c -9 >tmp/ubuntu.0.bz2
DUMP: Date of this level 0 dump: Thu Mar 26 19:25:06 2015
DUMP: Dumping /dev/xvda1 (/) to standard output
DUMP: Label: cloudimg-rootfs
DUMP: Writing 10 Kilobyte records
DUMP: mapping (Pass I) [regular files]
DUMP: mapping (Pass II) [directories]
DUMP: estimated 823759 blocks.
DUMP: Volume 1 started with block 1 at: Thu Mar 26 19:25:07 2015
DUMP: dumping (Pass III) [directories]
DUMP: dumping (Pass IV) [regular files]
DUMP: Volume 1 completed at: Thu Mar 26 19:28:21 2015
DUMP: Volume 1 820690 blocks (801.46MB)
DUMP: Volume 1 took 0:03:14
DUMP: Volume 1 transfer rate: 4230 kB/s
DUMP: 820690 blocks (801.46MB)
DUMP: finished in 194 seconds, throughput 4230 kBytes/sec
DUMP: Date of this level 0 dump: Thu Mar 26 19:25:06 2015
DUMP: Date this dump completed: Thu Mar 26 19:28:21 2015
DUMP: Average transfer rate: 4230 kB/s
DUMP: DUMP IS DONE
\end{verbatim}
\subsection{Filesystem backup}
\begin{verbatim}
$ cat /var/lib/dumpdates
/dev/xvda1 0 Thu Mar 26 19:25:06 2015 +0000
$ sudo apt-get install apache2
[...]
$ ssh ec2-instance "sudo dump -u -1 -f - /" | bzip2 -c -9 >tmp/ubuntu.1.bz2
DUMP: Date of this level 2 dump: Thu Mar 26 19:56:45 2015
DUMP: Date of last level 1 dump: Thu Mar 26 19:48:58 2015
DUMP: Dumping /dev/xvda1 (/) to standard output
DUMP: Label: cloudimg-rootfs
DUMP: Writing 10 Kilobyte records
DUMP: mapping (Pass I) [regular files]
DUMP: mapping (Pass II) [directories]
DUMP: estimated 39644 blocks.
DUMP: Volume 1 started with block 1 at: Thu Mar 26 19:56:50 2015
DUMP: dumping (Pass III) [directories]
DUMP: dumping (Pass IV) [regular files]
DUMP: Volume 1 completed at: Thu Mar 26 19:56:56 2015
DUMP: Volume 1 39820 blocks (38.89MB)
DUMP: Volume 1 took 0:00:06
DUMP: Volume 1 transfer rate: 6636 kB/s
DUMP: 39820 blocks (38.89MB)
DUMP: finished in 5 seconds, throughput 7964 kBytes/sec
DUMP: Date of this level 2 dump: Thu Mar 26 19:56:45 2015
DUMP: Date this dump completed: Thu Mar 26 19:56:56 2015
DUMP: Average transfer rate: 6636 kB/s
DUMP: DUMP IS DONE
\end{verbatim}
\subsection{Filesystem backup}
\vspace*{\fill}
\begin{center}
\includegraphics[scale=0.7]{pics/flux-capacitor.eps}
\end{center}
\vspace*{\fill}
\subsection{Filesystem backup}
\vspace*{\fill}
\begin{center}
\includegraphics[scale=2.5]{pics/flux-capacitor2.eps}
\end{center}
\vspace*{\fill}
\subsection{Filesystem backup}
\vspace*{\fill}
\begin{center}
\includegraphics[scale=0.6]{pics/Time_Machine.eps}
\end{center}
\vspace*{\fill}
\subsection{Filesystem backup}
Example: Mac OS X ``Time Machine'':
\begin{itemize}
\item automatically creates a full backup (equivalent of a "level 0 dump")
to separate device or NAS, recording (specifically) last-modified date
of all directories
\item every hour, creates a full copy via {\em hardlinks} (hence no
additional disk space consumed) for files that have not changed,
new copy of files that have changed
\item changed files are determined by inspecting last-modified date of
directories (cheaper than doing comparison of all files'
last-modified date or data)
\item saves hourly backups for 24 hours, daily backups for
the past month, and weekly backups for everything older than a month.
\end{itemize}
\subsection{Filesystem backup}
Example: WAFL (Write Anywhere File Layout)
\begin{itemize}
\item used by NetApp's ``Data ONTAP'' OS
\item a snapshot is a read-only copy of a file system (cheap and near
instantaneous, due to CoW)
\item uses regular snapshots (``consistency points'', every 10 seconds)
to allow for speedy recovery from crashes
\end{itemize}
\vspace*{\fill}
\begin{center}
\includegraphics[scale=0.75]{pics/waffles.eps}
\end{center}
\vspace*{\fill}
\subsection{Filesystem backup}
Example: WAFL (Write Anywhere File Layout)
\vspace*{\fill}
\begin{center}
\includegraphics[scale=1.0]{pics/wafl0.eps}
\end{center}
\vspace*{\fill}
\subsection{Filesystem backup}
Example: WAFL (Write Anywhere File Layout)
\vspace*{\fill}
\begin{center}
\includegraphics[scale=1.0]{pics/wafl1.eps}
\end{center}
\vspace*{\fill}
\subsection{Filesystem backup}
Example: WAFL (Write Anywhere File Layout)
\vspace*{\fill}
\begin{center}
\includegraphics[scale=1.0]{pics/wafl2.eps}
\end{center}
\vspace*{\fill}
\subsection{Filesystem backup}
Example: WAFL (Write Anywhere File Layout)
\vspace*{\fill}
\begin{center}
\includegraphics[scale=1.0]{pics/wafl.eps}
\end{center}
\vspace*{\fill}
\subsection{Filesystem backup}
Example: ZFS snapshots
\begin{itemize}
\item ZFS uses a copy-on-write transactional object model (new data does
not overwrite existing data, instead modifications are written to a
new location with existing data being referenced), similar to WAFL
\item a snapshot is a read-only copy of a file system (cheap and near
instantaneous, due to CoW)
\item initially consumes no additional disk space; the writable filesystem
is made available as a ``clone''
\item conceptually provides a branched view of the filesystem; normally
only the ``active'' filesystem is writable
\end{itemize}
\subsection{ZFS Snapshots}
\smallish
\begin{verbatim}
$ pwd
/home/jschauma
$ ls -l .z*
ls: cannot access .z*: No such file or directory
$
\end{verbatim}
\Normalsize
\subsection{ZFS Snapshots}
\smallish
\begin{verbatim}
$ pwd
/home/jschauma
$ ls -l .z*
ls: cannot access .z*: No such file or directory
$ ls -lid .zfs
1 dr-xr-xr-x 3 root root 3 Jan 10 2013 .zfs
$
\end{verbatim}
\Normalsize
\subsection{ZFS Snapshots}
\smallish
\begin{verbatim}
$ pwd
/home/jschauma
$ ls -l .z*
ls: cannot access .z*: No such file or directory
$ ls -lid .zfs
1 dr-xr-xr-x 3 root root 3 Jan 10 2013 .zfs
$ ls -lai .zfs/snapshot
total 13
2 dr-xr-xr-x 4 root root 4 Feb 28 21:00 .
1 dr-xr-xr-x 3 root root 3 Jan 10 2013 ..
4 drwx--x--x 37 jschauma professor 88 Feb 24 22:32 amanda-_export_home_jschauma-0
4 drwx--x--x 37 jschauma professor 88 Feb 26 11:47 amanda-_export_home_jschauma-1
$
\end{verbatim}
\Normalsize
\subsection{ZFS Snapshots}
\smallish
\begin{verbatim}
$ pwd
/home/jschauma
$ ls -l .z*
ls: cannot access .z*: No such file or directory
$ ls -lid .zfs
1 dr-xr-xr-x 3 root root 3 Jan 10 2013 .zfs
$ ls -lai .zfs/snapshot
total 13
2 dr-xr-xr-x 4 root root 4 Feb 28 21:00 .
1 dr-xr-xr-x 3 root root 3 Jan 10 2013 ..
4 drwx--x--x 37 jschauma professor 88 Feb 24 22:32 amanda-_export_home_jschauma-0
4 drwx--x--x 37 jschauma professor 88 Feb 26 11:47 amanda-_export_home_jschauma-1
$ cd .zfs/snapshot
$ echo foo > amanda-_export_home_jschauma-0/oink
-ksh: amanda-_export_home_jschauma-0/oink: cannot create [Read-only file system]
$ ls -laid . /
2 dr-xr-xr-x 4 root root 4 Feb 28 21:00 .
2 drwxr-xr-x 26 root root 4096 Jan 27 11:44 /
\end{verbatim}
\Normalsize
\subsection{ZFS Snapshots}
\smallish
\begin{verbatim}
$ pwd
/home/jschauma/.zfs/snapshot
$ ls -lai amanda-_export_home_jschauma-0 >/tmp/a
$ ls -lai amanda-_export_home_jschauma-1 >/tmp/b
$ diff -bu /tmp/[ab]
--- /tmp/a 2014-03-01 22:55:49.000000000 -0500
+++ /tmp/b 2014-03-01 22:55:59.000000000 -0500
@@ -35,7 +35,7 @@
57723 drwx------ 3 jschauma professor 6 Dec 31 15:08 .subversion
49431 -rw------- 1 jschauma professor 6 Dec 22 12:25 .sws.pid
20 drwx------ 2 jschauma professor 3 Jan 26 10:30 .vim
-61768 -rw------- 1 jschauma professor 14538 Feb 24 22:32 .viminfo
+61775 -rw------- 1 jschauma professor 14557 Feb 26 09:23 .viminfo
173 -rw------- 1 jschauma professor 4355 Sep 17 2012 .vimrc
45744 -rw-r--r-- 1 jschauma professor 0 Jul 28 2013 .xsession-errors
21 drwxr-xr-x 3 jschauma professor 6 Apr 4 2010 CS615A
$
\end{verbatim}
\Normalsize
\subsection{HW \#4}
Data backup to the cloud \\
\verb+https://www.cs.stevens.edu/~jschauma/615/s16-hw5.html+
\\
\verb+https://www.cs.stevens.edu/~jschauma/615/ec2-backup.txt+
\subsection{Reading}
Hurricane Sandy
\begin{itemize}
\item \verb+http://is.gd/aaxzvI+
\item \verb+http://is.gd/Y75pEA+
\item \verb+http://is.gd/32Az7y+
\item \verb+http://is.gd/FhAuFZ+
\end{itemize}
\subsection{Reading}
Manual Pages:
\begin{itemize}
\item \verb+dump(8)+ and \verb+restore(8)+
\end{itemize}
Filesystem snapshots:
\begin{itemize}
\item \verb+https://en.wikipedia.org/wiki/Snapshot_(computer_storage)+
\item \verb+https://en.wikipedia.org/wiki/Time_Machine_(Apple_software)+
\item \verb+http://comet.lehman.cuny.edu/jung/cmp426697/WAFL.pdf+
\item \verb+http://www.cs.tau.ac.il/~ohadrode/slides/WAFL.pdf+
\end{itemize}
\vspace{.5in}
Book: \verb+http://www.oreilly.com/catalog/unixbr/+
\end{document}
|
{"hexsha": "62aa69578e7c962e77e60079910da3f428924f4d", "size": 28902, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "07-backup-dns/slides.tex", "max_stars_repo_name": "jschauma/cs615asa", "max_stars_repo_head_hexsha": "54f9b03689c884966b131a13c5c56bf817a2adbb", "max_stars_repo_licenses": ["CC-BY-3.0"], "max_stars_count": 26, "max_stars_repo_stars_event_min_datetime": "2016-02-11T16:59:35.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-20T01:06:30.000Z", "max_issues_repo_path": "07-backup-dns/slides.tex", "max_issues_repo_name": "jschauma/cs615asa", "max_issues_repo_head_hexsha": "54f9b03689c884966b131a13c5c56bf817a2adbb", "max_issues_repo_licenses": ["CC-BY-3.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "07-backup-dns/slides.tex", "max_forks_repo_name": "jschauma/cs615asa", "max_forks_repo_head_hexsha": "54f9b03689c884966b131a13c5c56bf817a2adbb", "max_forks_repo_licenses": ["CC-BY-3.0"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2015-02-11T01:12:06.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-13T11:38:13.000Z", "avg_line_length": 26.6869806094, "max_line_length": 115, "alphanum_fraction": 0.710539063, "num_tokens": 9451}
|
#!/usr/bin/python3
"""
This script generates the write-heavy access pattern for ABDB vs Redis evaluation.
"""
import pandas as pd
import argparse
import numpy as np
parser = argparse.ArgumentParser(description='Read-heavy access pattern generator')
parser.add_argument('function_count', type=int, help='Number of functions')
parser.add_argument('mu', type=float, help='Expected value of the write count per sec')
parser.add_argument('sigma', type=float, help='Deviation of the write count per sec')
parser.add_argument('max_read_count', type=int,
help='The maximum possible read count of a function. The actual value is calculated according to uniform distribution between 0 and this given value.')
parser.add_argument('max_iter_to_migrate', type=int,
help='The maximum possible iteration number after a function is migrated. The actual value is calculated according to uniform distribution between 0 and this given value.')
parser.add_argument('keys_count', type=int,
help='The number of keys which are accessed by the functions')
parser.add_argument('server_count', type=int,
help='The number of the server cluster. These servers run the functions and store the data')
args = parser.parse_args()
df = pd.DataFrame({}, columns=['function_name', 'reading_count', 'writing_count', 'when_to_move', 'key_to_access',
'initial_server'])
for i in range(args.function_count):
func_name = 'function_{}'.format(i)
write_count = int(np.random.normal(args.mu, args.sigma, 1)[0])
read_count = np.random.randint(0, args.max_read_count, 1)[0]
# Migrate the function in each (calculated value below)th iteration
migration = np.random.randint(0, args.max_iter_to_migrate, 1)[0]
key = 'key{}'.format(np.random.randint(0, args.keys_count, 1)[0])
server = 'server{}'.format(np.random.randint(0, args.server_count, 1)[0])
df = df.append(
{'function_name': func_name, 'reading_count': read_count, 'writing_count': write_count, 'when_to_move': migration,
'key_to_access': key, 'initial_server': server},
ignore_index=True)
pd.options.display.width = 0
print(df)
print("Writing dataframe to a csv file...")
df.to_csv('write_heavy_access_pattern.csv',index=False)
print("DONE")
|
{"hexsha": "161d411055c776b972f41b7f7fb80641299585c5", "size": 2327, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/journal/gen_write_heavy_access_pattern.py", "max_stars_repo_name": "hsnlab/annabellaDB", "max_stars_repo_head_hexsha": "daeaf394babd07b6d980a3eaa74be6614e7124b8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2020-08-25T23:37:31.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-12T02:36:22.000Z", "max_issues_repo_path": "scripts/journal/gen_write_heavy_access_pattern.py", "max_issues_repo_name": "hsnlab/annabellaDB", "max_issues_repo_head_hexsha": "daeaf394babd07b6d980a3eaa74be6614e7124b8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/journal/gen_write_heavy_access_pattern.py", "max_forks_repo_name": "hsnlab/annabellaDB", "max_forks_repo_head_hexsha": "daeaf394babd07b6d980a3eaa74be6614e7124b8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-11-04T03:53:41.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-04T03:53:41.000Z", "avg_line_length": 51.7111111111, "max_line_length": 192, "alphanum_fraction": 0.7150837989, "include": true, "reason": "import numpy", "num_tokens": 513}
|
#include <Interpreters/Access/InterpreterShowAccessQuery.h>
#include <Parsers/formatAST.h>
#include <Interpreters/Context.h>
#include <Interpreters/Access/InterpreterShowCreateAccessEntityQuery.h>
#include <Interpreters/Access/InterpreterShowGrantsQuery.h>
#include <Columns/ColumnString.h>
#include <Processors/Sources/SourceFromSingleChunk.h>
#include <DataTypes/DataTypeString.h>
#include <Access/Common/AccessFlags.h>
#include <Access/AccessControl.h>
#include <base/range.h>
#include <boost/range/algorithm/sort.hpp>
#include <boost/range/algorithm_ext/push_back.hpp>
namespace DB
{
using EntityType = IAccessEntity::Type;
BlockIO InterpreterShowAccessQuery::execute()
{
BlockIO res;
res.pipeline = executeImpl();
return res;
}
QueryPipeline InterpreterShowAccessQuery::executeImpl() const
{
/// Build a create query.
ASTs queries = getCreateAndGrantQueries();
/// Build the result column.
MutableColumnPtr column = ColumnString::create();
WriteBufferFromOwnString buf;
for (const auto & query : queries)
{
buf.restart();
formatAST(*query, buf, false, true);
column->insert(buf.str());
}
String desc = "ACCESS";
return QueryPipeline(std::make_shared<SourceFromSingleChunk>(Block{{std::move(column), std::make_shared<DataTypeString>(), desc}}));
}
std::vector<AccessEntityPtr> InterpreterShowAccessQuery::getEntities() const
{
const auto & access_control = getContext()->getAccessControl();
getContext()->checkAccess(AccessType::SHOW_ACCESS);
std::vector<AccessEntityPtr> entities;
for (auto type : collections::range(EntityType::MAX))
{
auto ids = access_control.findAll(type);
for (const auto & id : ids)
{
if (auto entity = access_control.tryRead(id))
entities.push_back(entity);
}
}
boost::range::sort(entities, IAccessEntity::LessByTypeAndName{});
return entities;
}
ASTs InterpreterShowAccessQuery::getCreateAndGrantQueries() const
{
auto entities = getEntities();
const auto & access_control = getContext()->getAccessControl();
ASTs create_queries, grant_queries;
for (const auto & entity : entities)
{
create_queries.push_back(InterpreterShowCreateAccessEntityQuery::getCreateQuery(*entity, access_control));
if (entity->isTypeOf(EntityType::USER) || entity->isTypeOf(EntityType::ROLE))
boost::range::push_back(grant_queries, InterpreterShowGrantsQuery::getGrantQueries(*entity, access_control));
}
ASTs result = std::move(create_queries);
boost::range::push_back(result, std::move(grant_queries));
return result;
}
}
|
{"hexsha": "a385f6c8d7a1183a4870d2b67d928f391e20cc54", "size": 2680, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/Interpreters/Access/InterpreterShowAccessQuery.cpp", "max_stars_repo_name": "540522905/ClickHouse", "max_stars_repo_head_hexsha": "299445ec7da10bd2ef62d8e333a95b7ab12bf5f2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2022-03-15T16:35:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T12:39:52.000Z", "max_issues_repo_path": "src/Interpreters/Access/InterpreterShowAccessQuery.cpp", "max_issues_repo_name": "540522905/ClickHouse", "max_issues_repo_head_hexsha": "299445ec7da10bd2ef62d8e333a95b7ab12bf5f2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Interpreters/Access/InterpreterShowAccessQuery.cpp", "max_forks_repo_name": "540522905/ClickHouse", "max_forks_repo_head_hexsha": "299445ec7da10bd2ef62d8e333a95b7ab12bf5f2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2021-06-03T10:40:43.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-03T10:40:43.000Z", "avg_line_length": 29.7777777778, "max_line_length": 136, "alphanum_fraction": 0.7138059701, "num_tokens": 616}
|
%% Regression test data generator
% This program should be executed at the very beginning of refactoring a
% major revision. This is for making legacy STRAIGHT v40_007d to be
% compatible with MATLAB R2015b and Octave
% Copyright(c) 2016, Hideki Kawahara, (kawahara@sys.wakayama-u.ac.jp)
clear all
close all
original_speech_dir = '~/Music/VCTK_CORPUS/VCTK-Corpus/wav48/';
target_analysis_dir = '~/m-file/STRAIGHTV40_007e/analysisData/';
target_wave_dir = '~/m-file/STRAIGHTV40_007e/waveData/';
mkdir(target_analysis_dir);
mkdir(target_wave_dir);
dir_list = dir([original_speech_dir 'p*']);
%%
n_dirs = length(dir_list);
n_files = 0;
for ii = 1:n_dirs
tmp_files = dir([original_speech_dir dir_list(ii).name '/*.wav']);
n_files = n_files + length(tmp_files);
end;
%%
n_test = 2; % number of files tested for each speaker
l_segment = 0.1; % 100 ms segment
for ii = 1:n_dirs
rng(12345); % initialize frozen random number
seq_id = 0;
basic_stat_table = zeros(n_files, 4);
tmp_files = dir([original_speech_dir dir_list(ii).name '/*.wav']);
for jj = 1:length(tmp_files)
[x, fs] = audioread([original_speech_dir dir_list(ii).name '/' ...
tmp_files(jj).name ]);
seq_id = seq_id + 1;
l_in_sample_segment = min(length(x), round(fs * l_segment));
n_segment = floor(length(x) / l_in_sample_segment);
rms_level = zeros(n_segment, 1);
for kk = 1:n_segment
rms_level(kk) = 20 * ...
log10(std(x((kk - 1) * l_in_sample_segment + ...
(1:l_in_sample_segment))));
end;
sorted_level = sort(rms_level);
basic_stat_table(seq_id, 1) = length(x) / fs;
basic_stat_table(seq_id, 2) = max(rms_level) - min(rms_level);
basic_stat_table(seq_id, 3) = ...
sorted_level(round(length(sorted_level) * 0.85));
basic_stat_table(seq_id, 4) = max(abs(x));
end;
basic_stat_table = basic_stat_table(1:seq_id, :);
% select safe region
sorted_length = sort(basic_stat_table(:, 1));
sorted_dynamic_range = sort(basic_stat_table(:, 2));
sorted_85percent = sort(basic_stat_table(:, 3));
l_10 = sorted_length(round(seq_id * 0.1));
l_90 = sorted_length(round(seq_id * 0.9));
d_10 = sorted_dynamic_range(round(seq_id * 0.1));
d_90 = sorted_dynamic_range(round(seq_id * 0.9));
v_10 = sorted_85percent(round(seq_id * 0.1));
v_90 = sorted_85percent(round(seq_id * 0.9));
index_list = 1:seq_id;
safe_index = index_list( ...
l_10 < basic_stat_table(:, 1) & l_90 > basic_stat_table(:, 1) & ...
d_10 < basic_stat_table(:, 2) & d_90 > basic_stat_table(:, 2) & ...
v_10 < basic_stat_table(:, 3) & v_90 > basic_stat_table(:, 3) & ...
basic_stat_table(:, 4) < 0.95);
selection_index = 1:length(safe_index);
[~, tmp_index] = sort(rand(n_test, 1));
selection_index = selection_index(tmp_index(1:n_test));
for kk = 1:n_test
id = safe_index(selection_index(kk));
[x, fs] = audioread([original_speech_dir dir_list(ii).name '/' ...
tmp_files(id).name ]);
rng(12345); % initialize frozen random number
f0raw = MulticueF0v14(x,fs);
ap = exstraightAPind(x,fs,f0raw);
n3sgram=exstraightspec(x,f0raw,fs);
rng(12345); % initialize frozen random number
y = exstraightsynth(f0raw,n3sgram,ap,fs);
disp([num2str(kk) ': ' tmp_files(id).name ' at:' datestr(now)]);
audiowrite([target_wave_dir '/' tmp_files(id).name], ...
y / max(abs(y)) * 0.9, fs);
path_name_f0 = [target_analysis_dir '/' tmp_files(id).name(1:end-4) 'f0.bin'];
path_name_ap = [target_analysis_dir '/' tmp_files(id).name(1:end-4) 'ap.bin'];
path_name_sp = [target_analysis_dir '/' tmp_files(id).name(1:end-4) 'sp.bin'];
WriteBinaryData(path_name_f0, f0raw)
WriteBinaryData(path_name_ap, ap)
WriteBinaryData(path_name_sp, n3sgram)
end;
end;
|
{"author": "HidekiKawahara", "repo": "legacy_STRAIGHT", "sha": "964684981fe12cd232c5e882259dff126b3af0f2", "save_path": "github-repos/MATLAB/HidekiKawahara-legacy_STRAIGHT", "path": "github-repos/MATLAB/HidekiKawahara-legacy_STRAIGHT/legacy_STRAIGHT-964684981fe12cd232c5e882259dff126b3af0f2/src/regressionTestBaseGenerator.m"}
|
\documentstyle[11pt,reduce,makeidx]{article}
\title{TRIGD: Trigonometrical Functions\\ with Degree Arguments }
\date{}
\author{Alan Barnes \\
School of Engineering \& Applied Science \\
Aston University, Aston Triangle, \\
Birmingham B4 7ET \\ GREAT BRITAIN (now retired)\\
Email: Alan.Barnes45678@gmail.com}
\begin{document}
\maketitle
\index{trig functions} \index{degree arguments}
\index{Barnes, Alan}
\section{Introduction}
This module provides facilities for the numerical evaluation and algebraic simplification of expressions involving trigonometrical functions with arguments given in degrees rather than in radians. The degree-valued inverse functions are also provided.
Any user at all familiar with the normal trig functions in {\REDUCE} should have no trouble in using the facilities of this module. The names of the degree-based functions are those of the normal trig functions with the letter D appended, for example \texttt{SIND}, \texttt{COSD} and \texttt{TAND} denote the sine, cosine and tangent repectively and their corresponding inverse functions are \texttt{ASIND}, \texttt{ACOSD} and \texttt{ATAND}. The secant, cosecant and cotangent functions and their inverses are also supported and, indeed, are treated more as {\it first class} objects than their corresponding radian-based functions which are often converted to expressions involving sine and cosine by some of the standard {\REDUCE} simplifications rules.
Below I give a brief description of the facilities available together with a few examples of their use. More examples and the output that they should produce may be found in the test files \texttt{trigd-num.tst} and \texttt{trigd-simp.tst} and their corresponding log files with extension \texttt{.rlg} which may be found in the directory \texttt{packages/misc} of the {\REDUCE} distribution along with the source code of the module.
These degree-based functions are probably best regarded as functions defined for {\em real} values only, but complex arguments are supported for completeness. The numerical evaluation routines are fairly comprehensive for both real and complex arguments.
However, few simplifications occur for trigd functions with complex arguments.
The range of the principal values returned by the inverse functions is consistent with those of the corresponding radian-valued functions. More precisely, for \texttt{ASIND}, \texttt{ATAND} and \texttt{ACSCD} the (closure of the) range is [-90, 90] whilst for \texttt{ACOSD}, \texttt{ACOTD} and \texttt{ASECD} the (closure of the) range is [0, 180]. In addition the operator \texttt{ATAN2D} is the degree valued version of the two argument inverse tangent function which returns an angle in the interval (-180, 180] in the correct quadrant depending on the signs of its two arguments. For $X>0$, \texttt{ATAN2D(Y, X)} returns the same numerical value as \texttt{ATAND(Y/X)}. If $X=0$ then $\pm 90$ is returned depending on the sign of $Y$.
It might be thought that the facilities provided in this module couldbe easily provided by defining suitable rule lists to convert between the radian and degree-based versions of the trig functions. For example:
\begin{verbatim}
1: operator sind, asind$
2: d2r_list := {sind(~x) =>
sin(x*pi/180), asind(~x) => 180*asin(x)/pi}$
3: r2d_list := {sin(~x) =>
sind(180x/pi), asin(~x) => pi*sind(x)/180}$
4: sind(x+360) where d2r_list$
5: ws where r2d_list;
sind(x)
6: sind(360) where d2r_list;
0
\end{verbatim}
However, this approach \emph{seldom works} --- try it! The result produced by step 4 defeats the current rule\footnote{These rules may be improved in the next version of {\REDUCE}.}
used to simplify expressions of the form $\sin(x+2\pi)$ although it does manage step 6.
The rule list approach is more reliable if differentiation, integration or numerical evaluation of expressions involving \texttt{SIND} etc.\ is required. However it is not particularly convenient even if the rules and operator declarations are stored in a file so that they may be loaded at will.
This module aims to overcome these deficiences by providing the degree-based trig functions as \emph{first class} objects of the system just like their radian-based cousins. The aim is to provide facilities for numerical evaluation, symbolic simplification and differentiation totally analgous to those for the the basic trig functions and their inverses. It is hoped that the module will be of value to students and teachers at secondary school level as well as being sufficiently powerful and flexible to be of genuine utility in fields where angles measured in degrees (and arc minutes and seconds) are in common usage. For more advanced situations (involving integration, complex arguments and values etc.), users are urged to use the standard trig functions already provided by the system.
\section{Simplification}
As in other parts of {\REDUCE}, basic simplification of expressions involving the \texttt{trigd} functions takes place automatically (bracketted terms are multiplied out, like terms are gathered together, zero terms removed from sums and so on). The system {\it knows} and automatically applies the basic properties of the functions to simplify the input. For example \texttt{SIND(0)} is replaced by 0 and \texttt{SIND(-X)} by \texttt{SIND(X)}. If the switch \texttt{ROUNDED} is \texttt{OFF} all arithmetic is exact and transcendental functions such as \texttt{SIND} are not evaluated numerically even if their arguments are purely numerical.
The built-in simplification rules are totally analogous to those of the standard trig functions namely:
\begin{itemize}
\item Replacement of a function application by its value if a simple analytical value is known. For example \texttt{cosd(60) => 1/2} and \texttt{acscd(1) => 90}. Currently the only argument values where simplification takes place correspond to angles that are integral multiples of $15^o$.
\item Use of the odd and even properties of the trig.\ functions so that for example
\texttt{sind(-x) => -sind(x)}, \texttt{cosd(-x) => cosd(x)} and \\
\texttt{acosd(-x) => 180 - acosd(x)}.
\item Argument shifts by integral multiples of $180^o$ so that any residual numerical argument lies in the range $-90^o \ldots 90^o$. \\
Thus \texttt{sind(x+540) => -sind(x)}, \texttt{cosd(x+350) => cosd(x-10)}.
\item Removal of argument shifts of $\pm 90^o$ so that for example \\
\texttt{sind(x-90) => -cosd(x)} and \texttt{cotd(x+90) => -tand(x)}.
\item Replacement of \texttt{tand(x)} by \texttt{sind(x)/cosd(x)}, \texttt{secd(x)} by \texttt{1/cosd(x)} and the like, but \emph{only when the final result is simpler than the original}.
\item Basic properties relating a function and it inverse so that for example \\
\texttt{sind(asind(x)) => x}.
\item A few basic rules for \texttt{ATAN2D} when the signs of its arguments can be determined. For example \texttt{atan2d(Y, 0)} is replaced by $\pm 90$ depending on the sign of $Y$.
\end{itemize}
Extra rules can be added by the user for example addition formulae, double angle rules and tangent half-angle formulae as and when required as described in section 11 of the main {\REDUCE} manual.
Rules are provided for the symbolic differentiation of all the trig functions and their inverses. These rules are sufficient fot the power series of the trig functions and their inverses to be found using either the \texttt{TPS} or \texttt{TAYLOR} packages in the standard way.
\section{Numerical Evaluation}
When the switch \texttt{ROUNDED} is \texttt{ON} and the arguments of the operators evaluate to numbers, then the floating point value of the expression is calculated to the currently specified \texttt{PRECISION} in the normal way. The {\it bigfloat} capabilities are the same as for the standard trig functions.
If these functions are supplied with complex numerical arguments, numerical evaluation will \emph{NOT} be performed when the switch \texttt{ROUNDED} is \texttt{ON}, but the switch \texttt{COMPLEX} is \texttt{OFF} --- the input expression will be returned basically unaltered. Similarly inputs such as
\texttt{ASIND(2)} or \texttt{ASECD(0.5)} are not evaluated numerically. The values of these expressions are, of course, complex.
If the switch \texttt{COMPLEX} is also \texttt{ON} , numerical evaluation is performed. For example:
\begin{verbatim}
1: load_package trigd$
2: on rounded;
3: asecd(2);
60.0
4: asecd(0.5);
asecd(0.5)
5: on complex;
*** Domain mode rounded changed to complex-rounded
6: asecd(0.5);
75.4561292902*i
\end{verbatim}
The function \texttt{ATAN2D} (like \texttt{ATAN2}) is only defined if \emph{BOTH} its arguments are real. If they are also numerical, it will be evaluated whenever \texttt{ROUNDED} is \texttt{ON}. Attempting to evaluate it with complex numerical arguments will cause either the unaltered expression to be returned or an error to be
raised when the switch \texttt{COMPLEX} is \texttt{OFF} or \texttt{ON} respectively.
\subsection{Conversion between Degrees and Radians}
There are a number of utility routines for converting an angle in radians to degrees and vice-versa. \texttt{RAD2DEG} converts the radian value to an angle in degrees expressed as a single floating point value (according to the currently specified \texttt{PRECISION}).
The value to be converted may be an integer, a rational or a floating value or indeed any expression that simplies to a rounded value. In particular numerical constants such as $\pi$ may be used in the input expression.
\texttt{RAD2DMS} converts the radian value to an angle expressed in degrees, minutes and seconds returned as a three element list. The degree and minute values are integers the latter in the range $0 \ldots 59$ inclusive and the seconds value is a floating point value in the interval [0, 60.0).
There are also operators \texttt{DEG2RAD} and \texttt{DEG2DMS} whose purpose should be obvious.
The purpose of the operators \texttt{DMS2RAD} and \texttt{DMS2DEG} should also be obvious. The degree, minute and second value to be converted is passed to the conversion function as a three element list. There is considerable flexibilty allowed in format of the list supplied as parameter -- all three values may be integers, rational numbers or rounded values or any combination of these; the minute and second values need not lie between zero and sixty. The list supplied is simplified with the appropriate {\it carrys and borrows} performed (in effect at least) between the three values. For example
\begin{verbatim}
{60.5, 9.2, 11.234} => {60, 39, 23.234}
{45, 0, -1} => {44, 59, 59}
\end{verbatim}
These operators are not actually part of the \texttt{TRIGD} module but of the {\REDUCE} core system. However, they are not currently documented in the main manual. Currently they are \emph{purely numeric} operators; when \texttt{ROUNDED} is \texttt{OFF} they basically return the input expression (perhaps with their parameter simplified somewhat).
Note the sine of an angle specified in degrees, minutes and seconds \emph{cannot} be calculated by calling \texttt{SIND} directly with a dms list (i.e. as a list of length 3). Instead one must first convert the dms values to degrees using a call to \texttt{DMS2DEG} and then call \texttt{SIND} on the result. Applied directly to a list (of any length) any \texttt{TRIGD} function wil be applied to each member of the list separately just like most other {\REDUCE} operators. Here is an example illustrating tese points:
\begin{verbatim}
1: load_package trigd$
2: on rounded;
3: sind dms2deg {60, 45, 30};
0.872567064923
4: sind {60,45, 30};
{0.866025403784,0.707106781187,0.5}
5: off rounded;
6: sind{60, 45, 30};
sqrt(3) sqrt(2) 1
{---------,---------,---}
2 2 2
\end{verbatim}
Of course the results will be formatted much more attractively on a terminal supporting nice graphics.
\subsection{The operators \texttt{ARGD} and \texttt{ARG}}
Although not directly related to the trig functions, the module \texttt{TRIGD} also provides an operator \texttt{ARGD}; when the switches \texttt{ROUNDED} and \texttt{COMPLEX} are both \texttt{ON}, it will return the argument in degrees of the complex number supplied as its parameter --- supplying zero as the parameter causes an error to be raised.
If only \texttt{ROUNDED} is \texttt{ON},
\texttt{ARGD} will return the argument of the real numerical value supplied as its parameter --- this will be 0 or 180 when the value is positive or negative respectively.
The operator \texttt{ARG} is similar to \texttt{ARGD}, but returns the argument expressed in radians. There is also an operator \texttt{NORM} which returns the modulus (or absolute value or norm) of a complex number. \texttt{ARG} and \texttt{NORM} are actually part of the {\REDUCE} core system, but are not currently documented in the main manual. Currently they are \emph{purely numeric} operators; when \texttt{ROUNDED} is \texttt{OFF} they basically return the input expression (perhaps with their parameter simplified).
Example
\begin{verbatim}
1: load_package trigd$
2: on rounded;
3: {argd(-5), argd(1+i)};
{180.0, argd(i + 1)}
4: on complex;
*** Domain mode rounded changed to complex-rounded
5: {argd(1+i), argd(-1-i)};
{45.0, -135.0}
6: {arg(3+4i), norm(3+4i)};
{0.927295218002, 5.0}
\end{verbatim}
\section{Bugs, Restrictions and Planned Extensions}
The behaviour of the numerical evaluation routines for inverse trig functions with complex arguments at branch points could be improved; these values are \emph{undefined} and attempting to evaluate such a function at one of its branch points \emph{ought} to raise an error, however sometimes the input expression will be returned unaltered. It is hoped to improve this behaviour in due course.
Currently there are no facilities analogous to those provided in the module \texttt{TRIGSIMP} for the standard trig.\ functions. There users have a wide range of standard simplification formulae available for use and can control which are to be used depending on the requirements of their particular application: whether to eliminate
\texttt{sin} in favour of \texttt{cos} or vice-versa or to get rid of both in favour of \texttt{tan} of half-angles; or whether to use the trigonometrical addition formulae in order to transform trig functions whose arguments are sums into a form where the arguments are single terms or whether to perform the inverse transformations. It is hoped to make the \texttt{TRIGSIMP} faciliites available for use with the \texttt{TRIGD} functions in the near future.
Integration is not directly supported although the approach using rule-lists to convert the \texttt{TRIGD} functions to standard trig ones should work well. Introducing direct support for integration will not therefore be a priority.
For the standard sine function there is a rule for imaginary arguments namely: \texttt{sin(I*X) => I*sinh(X)}. The corresponding rule for the degree version is \texttt{sind(I*X) => I*sinh(X*PI/180)}. However, currently such rules are \emph{NOT} implemented by the system. They may be implemented in future, but it is not a high priority as it is felt that the radian-based trig functions are best suited for such symbolic calculations.
There are \emph{NO D} versions of the hyperbolic functions --- that would be a {\it step too far}! And should the new functions be called \texttt{sinhd} and so on? Or perhaps \texttt{sindh}\footnote{One is perhaps reminded here of the (in)famous bilingual pun: \emph{peccavi} attributed to Charles James Napier --- apparently no relation to his logarithmic namesake -- see Wikipedia for details!} etc?
\end{document}
|
{"hexsha": "967f43a538890557a8831eb2f0eb9f55c212baf0", "size": 15803, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "packages/misc/trigd/trigd.tex", "max_stars_repo_name": "arthurcnorman/general", "max_stars_repo_head_hexsha": "5e8fef0cc7999fa8ab75d8fdf79ad5488047282b", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "packages/misc/trigd/trigd.tex", "max_issues_repo_name": "arthurcnorman/general", "max_issues_repo_head_hexsha": "5e8fef0cc7999fa8ab75d8fdf79ad5488047282b", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "packages/misc/trigd/trigd.tex", "max_forks_repo_name": "arthurcnorman/general", "max_forks_repo_head_hexsha": "5e8fef0cc7999fa8ab75d8fdf79ad5488047282b", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 99.3899371069, "max_line_length": 800, "alphanum_fraction": 0.7646016579, "num_tokens": 4093}
|
import os
import random
import math
from datetime import datetime
import pygame as pg
from wall import *
from tank import *
from bullet import *
from explosion import *
from base import *
from score import *
from search import *
from path_symbol import *
from profiler import *
from util import *
from logger import Logger
from minimax import minimax, expectimax
import numpy as np
import time
random.seed()
RESTRICTED_TILES = [(12, 5), (12, 6), (12, 7), (11, 5),
(11, 6), (11, 7), (10, 6)]
SPAWN_ENEMY_EVENT = pg.USEREVENT
ENEMY_COUNT = 1
RANDOM_ENEMY_COUNT = 0
MAX_ENEMY_COUNT = 4
GAME_STATE_ACTIVE = 0
GAME_STATE_PLAYER_WON = 1
GAME_STATE_PLAYER_LOST = 2
SEARCH_ALGORITHMS = [BFS, DFS, UCS, A_Star]
win_sound = load_sound("samples/win_effect.wav")
lose_sound = load_sound("samples/lose_effect.wav")
class Game:
def __init__(self, graphical_mode=True, player_algorithm=minimax):
pg.init()
self.graphical_mode = graphical_mode
screen = pg.Surface((480, 426))
if graphical_mode:
screen = pg.display.set_mode((480, 426), flags=pg.SCALED)
self.screen = screen
pg.display.set_caption("Battle City")
background = pg.Surface(screen.get_size())
if graphical_mode:
background = background.convert()
background.fill((128, 128, 128))
self.background = background
self.game_map = pg.Surface((416, 416))
if graphical_mode:
self.game_map = self.game_map.convert()
self.game_map.fill((0, 0, 0))
self.game_map_rect = self.game_map.get_rect()
background.blit(self.game_map, MAP_COORDINATES)
screen.blit(background, (0, 0))
self.bullet_sprites = pg.sprite.RenderPlain()
self.base_sprite = pg.sprite.RenderPlain()
self.wall_sprites = pg.sprite.RenderPlain()
self.player_tank_sprites = pg.sprite.RenderPlain()
self.enemy_tank_sprites = pg.sprite.RenderPlain()
self.explosion_sprites = pg.sprite.RenderPlain()
self.enemy_counter_sprite = pg.sprite.RenderPlain(EnemyCounter(10))
self.path_symbol_sprites = pg.sprite.RenderPlain()
self.map_coords = []
self.player_tank = PlayerTank(10, 6, self, player_algorithm)
self.player_algorithm = player_algorithm
self.enemy_count = ENEMY_COUNT + RANDOM_ENEMY_COUNT
self.enemies = []
self.game_state = GAME_STATE_ACTIVE
self.player_move = True
self.search_algorithm_profiler = Profiler(BFS)
self.logger = Logger()
self.game_start_time = datetime.now()
self.search_algorithm = self.search_algorithm_profiler.execute
self.game_over_font = pg.font.Font(
data_dir + "/fonts/ARCADECLASSIC.TTF", 36)
self.game_over_text = self.game_over_font.render(
"YOU LOST!", 1, (0, 255, 0))
self.textpos = self.game_over_text.get_rect(
center=(self.game_map.get_width() / 2, self.game_map.get_height() / 2))
self.restart_game()
def restart_game(self):
self.game_start_time = datetime.now()
self.game_state = GAME_STATE_ACTIVE
self.player_move = True
self.bullet_sprites.empty()
self.base_sprite.empty()
self.wall_sprites.empty()
self.enemy_tank_sprites.empty()
self.explosion_sprites.empty()
self.player_tank_sprites.empty()
self.wall_sprites.add(
Wall(12, 5),
Wall(12, 7),
Wall(11, 5),
Wall(11, 6),
Wall(11, 7),
)
self.base_sprite.add(Base())
self.map_coords.clear()
for i in range(13):
for j in range(13):
if (i, j) not in RESTRICTED_TILES:
self.map_coords.append((i, j))
for i in range(40):
coord = random.choice(self.map_coords)
self.map_coords.remove(coord)
self.wall_sprites.add(Wall(coord[0], coord[1]))
for i in range(ENEMY_COUNT):
self.enemies.append(AITank)
for i in range(RANDOM_ENEMY_COUNT):
self.enemies.append(RandomTank)
random.shuffle(self.enemies)
self.enemy_count = ENEMY_COUNT + RANDOM_ENEMY_COUNT
for i in range(MAX_ENEMY_COUNT):
if self.enemy_count > 0:
self.enemy_count = self.enemy_count - 1
self.spawn_enemy()
self.player_tank = PlayerTank(10, 6, self, self.player_algorithm)
self.player_tank_sprites.add(self.player_tank)
def get_wall_collision(self, rect):
for wall in self.wall_sprites:
if wall.rect.colliderect(rect):
return wall
return None
def get_enemy_tank_collision(self, rect):
res = []
for tank in self.enemy_tank_sprites:
if tank.rect.colliderect(rect):
res.append(tank)
return res
def get_player_tank_collision(self, rect):
for tank in self.player_tank_sprites:
if tank.rect.colliderect(rect):
return tank
return None
def get_base_collision(self, rect):
for base in self.base_sprite:
if base.rect.colliderect(rect):
return base
return None
def collides_with_map(self, rect):
return not self.game_map_rect.contains(rect)
def spawn_enemy(self):
rows = [n for n in range(13)]
cols = [n for n in range(13)]
random.shuffle(rows)
random.shuffle(cols)
for i in rows:
for j in cols:
if (i, j) in RESTRICTED_TILES:
continue
test_rect = pg.Rect(
j * 32 + MAP_COORDINATES[0], i * 32 + MAP_COORDINATES[1], 32, 32)
if self.get_wall_collision(test_rect) is not None or len(self.get_enemy_tank_collision(
test_rect)) > 0 or self.get_player_tank_collision(test_rect) is not None:
continue
return self.enemy_tank_sprites.add(self.enemies.pop()(i, j, self))
def get_square_matrix(self, omit=[]):
res = [[0 for x in range(13)] for y in range(13)]
for i in range(13):
for j in range(13):
test_rect = pg.Rect(
j * 32 + MAP_COORDINATES[0], i * 32 + MAP_COORDINATES[1], 16, 16)
if (i, j) in omit or (self.get_wall_collision(test_rect) is None and len(self.get_enemy_tank_collision(test_rect)) == 0):
res[i][j] = 1
return res
def get_random_tank_sprites(self):
res = []
for tank in self.enemy_tank_sprites:
if isinstance(tank, RandomTank):
res.append(tank)
return res
def generate_path_to_player(self, enemy_tank):
player = self.player_tank.get_current_tile()
enemy_tank_tile = enemy_tank.get_current_tile()
m = self.get_square_matrix([enemy_tank_tile])
res = self.search_algorithm(m, player, enemy_tank_tile)
res.reverse()
return res
def draw_paths_to_enemies(self):
self.path_symbol_sprites.empty()
paths = []
for tank in self.enemy_tank_sprites:
paths.append(self.generate_path_to_player(tank))
for path in paths:
for square in path:
self.path_symbol_sprites.add(PathSymbol(square[0], square[1]))
def switch_search_algorithm(self):
index = SEARCH_ALGORITHMS.index(
self.search_algorithm_profiler.get_func())
index = index + 1 if index < len(SEARCH_ALGORITHMS) - 1 else 0
self.search_algorithm_profiler.set_func(SEARCH_ALGORITHMS[index])
def create_explosion(self, pos):
self.explosion_sprites.add(Explosion(pos[0], pos[1]))
def print_search_algorithm(self):
font = pg.font.Font(data_dir + "/fonts/ARCADECLASSIC.TTF", 26)
font2 = pg.font.Font(data_dir + "/fonts/INVASION2000.TTF", 20)
algorithm_name = font.render(
self.search_algorithm_profiler.get_func_name(), 1, (34, 34, 34))
algorithm_duration = font2.render("{:.2f}".format(
self.search_algorithm_profiler.get_avg_duration()), 1, (34, 34, 34))
self.background.blit(
algorithm_name, (480 - algorithm_name.get_rect().width, 0))
self.background.blit(
algorithm_duration,
(480 - algorithm_duration.get_rect().width,
algorithm_name.get_height()))
def print_win(self):
self.game_over_text = self.game_over_font.render(
"YOU WON!", 1, (0, 255, 0))
self.game_map.blit(self.game_over_text, self.textpos)
def print_lose(self):
self.game_over_text = self.game_over_font.render(
"YOU LOST!", 1, (255, 0, 0))
self.game_map.blit(self.game_over_text, self.textpos)
def get_score(self):
return self.player_tank.lives * 0.2 - \
(self.enemy_count + len(self.enemy_tank_sprites)) * 0.1
def log_game_result(self):
status = "player_won" if self.game_state == GAME_STATE_PLAYER_WON else "player_lost"
duration = (datetime.now() - self.game_start_time).total_seconds()
self.logger.log_result(status, duration,
self.get_score(), self.player_algorithm.__name__)
def check_or_update_game_state(self):
if self.game_state == GAME_STATE_ACTIVE:
if not self.player_tank.is_alive:
lose_sound.play()
self.game_state = GAME_STATE_PLAYER_LOST
self.log_game_result()
if self.enemy_count == 0 and len(self.enemy_tank_sprites) == 0:
win_sound.play()
self.game_state = GAME_STATE_PLAYER_WON
self.log_game_result()
def update_game_state(self):
self.check_or_update_game_state()
self.wall_sprites.update()
self.bullet_sprites.update()
self.base_sprite.update()
self.explosion_sprites.update()
self.enemy_counter_sprite.sprites()[0].set_count(self.player_tank.lives)
self.enemy_counter_sprite.update()
self.path_symbol_sprites.update()
def update_game_map(self):
self.game_map.fill((0, 0, 0))
self.path_symbol_sprites.draw(self.game_map)
self.wall_sprites.draw(self.game_map)
self.bullet_sprites.draw(self.game_map)
self.enemy_tank_sprites.draw(self.game_map)
self.player_tank_sprites.draw(self.game_map)
self.base_sprite.draw(self.game_map)
self.explosion_sprites.draw(self.game_map)
def toggle_player_move(self):
self.player_move = not self.player_move
def render_map(self):
arr = np.array(self.game_map.get_buffer())
arr = np.reshape(arr, (416, 416, 4))
arr = arr[:, :, 0:3]
return arr
def step(self, move):
possible_moves = [
self.player_tank.move_up,
self.player_tank.move_down,
self.player_tank.move_left,
self.player_tank.move_right,
self.player_tank.stop]
self.player_tank.shoot()
possible_moves[move]()
self.player_tank.move()
self.update_game_state()
self.enemy_tank_sprites.update()
self.update_game_state()
self.update_game_map()
return (self.get_score(), self.game_state != GAME_STATE_ACTIVE)
def main_loop(self):
if self.graphical_mode:
pg.display.flip()
clock = pg.time.Clock()
spawn_pending = False
going = True
while going:
self.draw_paths_to_enemies()
self.check_or_update_game_state()
if len(self.enemy_tank_sprites) < MAX_ENEMY_COUNT and not spawn_pending:
pg.time.set_timer(SPAWN_ENEMY_EVENT, 2000)
spawn_pending = True
for event in pg.event.get():
if event.type == pg.QUIT:
going = False
elif event.type == pg.KEYDOWN:
if event.key == pg.K_z:
self.switch_search_algorithm()
elif event.type == SPAWN_ENEMY_EVENT:
if self.enemy_count != 0:
self.enemy_count = self.enemy_count - 1
pg.time.set_timer(SPAWN_ENEMY_EVENT, 0)
self.spawn_enemy()
spawn_pending = False
if self.player_move:
self.player_tank_sprites.update()
self.update_game_state()
self.toggle_player_move()
else:
self.enemy_tank_sprites.update()
self.update_game_state()
self.toggle_player_move()
self.background.fill((128, 128, 128))
self.update_game_map()
self.enemy_counter_sprite.draw(self.background)
self.print_search_algorithm()
if self.game_state == GAME_STATE_PLAYER_WON:
self.restart_game()
self.print_win()
elif self.game_state == GAME_STATE_PLAYER_LOST:
self.restart_game()
self.print_lose()
self.background.blit(self.game_map, MAP_COORDINATES)
self.screen.blit(self.background, (0, 0))
if self.graphical_mode:
pg.display.flip()
time.sleep(1)
pg.quit()
if __name__ == "__main__":
game = Game()
game.main_loop()
|
{"hexsha": "aea646a21996080a3258c01bca8f2643290732fe", "size": 13633, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/game.py", "max_stars_repo_name": "vmkul/battle-city", "max_stars_repo_head_hexsha": "7d303ca2ab71bc160f3c6236e75d45bcff20aa92", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/game.py", "max_issues_repo_name": "vmkul/battle-city", "max_issues_repo_head_hexsha": "7d303ca2ab71bc160f3c6236e75d45bcff20aa92", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-09-05T17:47:18.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-08T16:59:56.000Z", "max_forks_repo_path": "src/game.py", "max_forks_repo_name": "vmkul/battle-city", "max_forks_repo_head_hexsha": "7d303ca2ab71bc160f3c6236e75d45bcff20aa92", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.00968523, "max_line_length": 137, "alphanum_fraction": 0.6050025673, "include": true, "reason": "import numpy", "num_tokens": 3021}
|
# coding:utf-8
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
import numpy as np
# from sqlalchemy import create_engine
import pandas as pd
import pymysql
from collections import Counter
from shortestpass import ReadFile
from shortestpass import Station
import math
class Vertex():
def __init__(self, name):
self.name = name
self.connected_vertex = {}
def __str__(self):
return str(self.name) + str([(vertex.name,str(self.connected_vertex[vertex])) for vertex in self.connected_vertex.keys()])
def appendNeighbor(self, neighbor, weight=1):
self.connected_vertex[neighbor] = weight
def deleteNeighbor(self, neighbor):
del self.connected_vertex[neighbor]
def getNeighbors(self):
return self.connected_vertex.keys()
def getWeight(self, neighbor):
return self.connected_vertex[neighbor]
def getName(self):
return self.name
class Graph():
def __init__(self):
self.ver_list = {}
self.num_vertex = 0
def addVertex(self, name):
new_vertex = Vertex(name)
self.ver_list[name] = new_vertex
self.num_vertex += 1
return new_vertex
def getVertex(self, name):
if name in self.ver_list:
return self.ver_list[name]
else:
return None
def addEdge(self, init, target, weight=1):
if init not in self.ver_list:
self.addVertex(init)
if target not in self.ver_list:
self.addVertex(target)
self.ver_list[init].appendNeighbor(self.ver_list[target], weight)
def deleteEdge(self, vertex1, vertex2):
self.ver_list[vertex1].deleteNeighbor(self.ver_list[vertex2])
def getVertices(self):
return self.ver_list.keys()
def __contains__(self, name):
return name in self.ver_list
def __iter__(self):
return iter(self.ver_list.values())
class Dfs():
def __init__(self):
self.createGraph()
# new_dict = {}
# 1. 得到 结点间的路径
# for sta_list in self.sta:
# new_dict[(sta_list[0], sta_list[-1])] = sta_list[1:-1:1]
# new_dict[(sta_list[-1], sta_list[0])] = sta_list[-2:0:-1]
# print(new_dict)
self.stationinfo_dict = {'Sta1': ['1号线', '0'],'Sta159': ['1号线', '0'],'Sta108': ['1号线', '0'],'Sta83': ['1号线', '0'],'Sta107': ['1号线', '0'],'Sta154': ['1号线', '0'],'Sta150': ['1号线', '0'],'Sta64': ['1号线', '0'],'Sta89': ['1号线', '1'],'Sta80': ['1号线', '0'],'Sta97': ['1号线', '0'],'Sta110': ['1号线', '0'],'Sta106': ['1号线', '0'],'Sta34': ['1号线', '0'],'Sta128': ['1号线', '0'],'Sta74': ['1号线', '0'],'Sta149': ['1号线', '0'],'Sta49': ['1号线', '0'],'Sta65': ['1号线', '0'],'Sta9': ['2号线', '0'],'Sta163': ['2号线', '0'],'Sta53': ['2号线', '0'],'Sta78': ['2号线', '0'],'Sta79': ['2号线', '0'],'Sta18': ['2号线', '0'],'Sta123': ['2号线', '0'],'Sta127': ['2号线', '1'],'Sta81': ['2号线', '0'],'Sta27': ['2号线', '0'],'Sta48': ['2号线', '0'],'Sta151': ['2号线', '0'],'Sta68': ['2号线', '0'],'Sta52': ['2号线', '0'],'Sta76': ['2号线', '0'],'Sta57': ['2号线', '0'],'Sta71': ['2号线', '0'],'Sta139': ['2号线', '0'],'Sta24': ['2号线', '0'],'Sta105': ['2号线', '0'],'Sta51': ['2号线', '0'],'Sta143': ['3号线', '0'],'Sta156': ['3号线', '0'],'Sta61': ['3号线', '0'],'Sta50': ['3号线', '0'],'Sta119': ['3号线', '0'],'Sta66': ['3号线', '0'],'Sta12': ['3号线', '0'],'Sta161': ['3号线', '0'],'Sta21': ['3号线', '0'],'Sta133': ['3号线', '0'],'Sta22': ['3号线', '0'],'Sta138': ['3号线', '0'],'Sta41': ['3号线', '1'],'Sta30': ['3号线', '0'],'Sta67': ['3号线', '0'],'Sta144': ['3号线', '0'],'Sta29': ['3号线', '0'],'Sta126': ['3号线', '0'],'Sta40': ['3号线', '0'],'Sta131': ['3号线', '0'],'Sta39': ['3号线', '0'],'Sta100': ['3号线', '0'],'Sta167': ['3号线', '0'],'Sta113': ['3号线', '0'],'Sta141': ['3号线', '0'],'Sta142': ['3号线', '0'],'Sta158': ['3号线', '0'],'Sta44': ['3号线', '0'],'Sta117': ['3号线', '0'],'Sta147': ['3号线', '0'],'Sta42': ['3号线', '0'],'Sta35': ['3号线', '0'],'Sta109': ['3号线', '0'],'Sta33': ['3号线', '0'],'Sta112': ['3号线', '0'],'Sta153': ['3号线', '0'],'Sta125': ['3号线', '0'],'Sta121': ['3号线', '0'],'Sta11': ['3号线', '0'],'Sta134': ['10号线', '1'],'Sta59': ['4号线', '0'],'Sta19': ['4号线', '0'],'Sta62': ['4号线', '0'],'Sta165': ['4号线', '0'],'Sta58': ['4号线', '0'],'Sta38': ['4号线', '0'],'Sta43': ['5号线', '0'],'Sta10': ['5号线', '0'],'Sta96': ['5号线', '0'],'Sta132': ['5号 线', '0'],'Sta37': ['5号线', '0'],'Sta16': ['5号线', '0'],'Sta69': ['5号线', '0'],'Sta54': ['5号线', '0'],'Sta120': ['11号线', '0'],'Sta130': ['11号线', '0'],'Sta146': ['11号线', '0'],'Sta25': ['11号线', '0'],'Sta3': ['11号线', '1'],'Sta46': ['11号线', '0'],'Sta86': ['11号线', '0'],'Sta15': ['11号线', '1'],'Sta162': ['11号线', '0'],'Sta118': ['11号线', '0'],'Sta20': ['11号线', '0'],'Sta55': ['11号线', '0'],'Sta70': ['11号线', '0'],'Sta13': ['11号线', '0'],'Sta140': ['11号线', '1'],'Sta77': ['11号线', '0'],'Sta122': ['11号线', '0'],'Sta36': ['11号线', '0'],'Sta166': ['11号线', '0'],'Sta99': ['11号线', '0'],'Sta124': ['11号线', '0'],'Sta28': ['11号线', '0'],'Sta82': ['11号线', '0'],'Sta164': ['11号线', '0'],'Sta152': ['11号线', '0'],'Sta45': ['11号线', '0'],'Sta75': ['10号线', '1'],'Sta136': ['12号线', '0'],'Sta137': ['12号线', '0'],'Sta101': ['12号线', '0'],'Sta17': ['12号线', '0'],'Sta26': ['12号线', '0'],'Sta90': ['12号线', '1'],'Sta95': ['12号线', '0'],'Sta93': ['12号线', '0'],'Sta92': ['12号线', '0'],'Sta32': ['12号线', '0'],'Sta91': ['12号线', '0'],'Sta157': ['10号线', '0'],'Sta168': ['10号线', '0'],'Sta85': ['10号线', '0'],'Sta2': ['10号线', '0'],'Sta4': ['10号线', '0'],'Sta103': ['10号线', '0'],'Sta145': ['10号线', '0'],'Sta88': ['10号线', '0'],'Sta94': ['10号线', '0'],'Sta160': ['10号线', '0'],'Sta7': ['10号线', '0'],'Sta6': ['10号线', '0'],'Sta8': ['10号线', '0'],'Sta102': ['10号线', '0'],'Sta31': ['12号线', '0'],'Sta72': ['12号线', '0'],'Sta116': ['12号线', '0'],'Sta129': ['2号线', '0'],'Sta47': ['2号线', '1'],'Sta60': ['12号线', '0'],'Sta148': ['12号线', '0'],'Sta73': ['12号线', '0'],'Sta23': ['11号线', '1'],'Sta56': ['11号线', '1'],'Sta115': ['11号线', '1'],'Sta63': ['11号线', '1'],'Sta114': ['10号线', '1'],'Sta135': ['10号线', '1'],'Sta87': ['10号线', '1'],'Sta84': ['4号线', '0'],'Sta111': ['11号线', '0']}
self.passes37 = {('Sta24', 'Sta127'): ['Sta51', 'Sta105', 'Sta139', 'Sta71', 'Sta57', 'Sta76', 'Sta52', 'Sta68', 'Sta151', 'Sta48', 'Sta27', 'Sta81'], ('Sta127', 'Sta24'): ['Sta81', 'Sta27', 'Sta48', 'Sta151', 'Sta68', 'Sta52', 'Sta76', 'Sta57', 'Sta71', 'Sta139', 'Sta105', 'Sta51'], ('Sta73', 'Sta127'): ['Sta148', 'Sta60'], ('Sta127', 'Sta73'): ['Sta60', 'Sta148'], ('Sta127', 'Sta47'): ['Sta123'], ('Sta47', 'Sta127'): ['Sta123'], ('Sta1', 'Sta47'): ['Sta159'], ('Sta47', 'Sta1'): ['Sta159'], ('Sta47', 'Sta89'): ['Sta108', 'Sta83', 'Sta107', 'Sta154', 'Sta150', 'Sta64'], ('Sta89', 'Sta47'): ['Sta64', 'Sta150', 'Sta154', 'Sta107', 'Sta83', 'Sta108'], ('Sta127', 'Sta41'): ['Sta91'], ('Sta41', 'Sta127'): ['Sta91'], ('Sta89', 'Sta65'): ['Sta80', 'Sta97', 'Sta110', 'Sta106', 'Sta34', 'Sta128', 'Sta74', 'Sta149', 'Sta49'], ('Sta65', 'Sta89'): ['Sta49', 'Sta149', 'Sta74', 'Sta128', 'Sta34', 'Sta106', 'Sta110', 'Sta97', 'Sta80'], ('Sta136', 'Sta89'): [], ('Sta89', 'Sta136'): [], ('Sta89', 'Sta23'): ['Sta137', 'Sta101', 'Sta31', 'Sta17'], ('Sta23', 'Sta89'): ['Sta17', 'Sta31', 'Sta101', 'Sta137'], ('Sta23', 'Sta140'): ['Sta20', 'Sta55', 'Sta70', 'Sta13'], ('Sta140', 'Sta23'): ['Sta13', 'Sta70', 'Sta55', 'Sta20'], ('Sta140', 'Sta77'): ['Sta99', 'Sta166', 'Sta124', 'Sta28', 'Sta36', 'Sta122'], ('Sta77', 'Sta140'): ['Sta122', 'Sta36', 'Sta28', 'Sta124', 'Sta166', 'Sta99'], ('Sta140', 'Sta75'): ['Sta111', 'Sta82', 'Sta164', 'Sta152'], ('Sta75', 'Sta140'): ['Sta152', 'Sta164', 'Sta82', 'Sta111'], ('Sta75', 'Sta102'): [], ('Sta102', 'Sta75'): [], ('Sta75', 'Sta45'): [], ('Sta45', 'Sta75'): [], ('Sta75', 'Sta87'): ['Sta8', 'Sta6', 'Sta7', 'Sta160', 'Sta94'], ('Sta87', 'Sta75'): ['Sta94', 'Sta160', 'Sta7', 'Sta6', 'Sta8'], ('Sta11', 'Sta87'): ['Sta121', 'Sta125', 'Sta153', 'Sta112', 'Sta33', 'Sta109'], ('Sta87', 'Sta11'): ['Sta109', 'Sta33', 'Sta112', 'Sta153', 'Sta125', 'Sta121'], ('Sta58', 'Sta90'): ['Sta38', 'Sta165', 'Sta62', 'Sta19', 'Sta59', 'Sta84'], ('Sta90', 'Sta58'): ['Sta84', 'Sta59', 'Sta19', 'Sta62', 'Sta165', 'Sta38'], ('Sta23', 'Sta90'): ['Sta26'], ('Sta90', 'Sta23'): ['Sta26'], ('Sta43', 'Sta56'): ['Sta10', 'Sta96', 'Sta132', 'Sta37', 'Sta16', 'Sta69', 'Sta54'], ('Sta56', 'Sta43'): ['Sta54', 'Sta69', 'Sta16', 'Sta37', 'Sta132', 'Sta96', 'Sta10'], ('Sta90', 'Sta134'): [], ('Sta134', 'Sta90'): [], ('Sta87', 'Sta134'): ['Sta88', 'Sta145', 'Sta103', 'Sta4', 'Sta2', 'Sta85'], ('Sta134', 'Sta87'): ['Sta85', 'Sta2', 'Sta4', 'Sta103', 'Sta145', 'Sta88'], ('Sta87', 'Sta135'): ['Sta35', 'Sta42', 'Sta147', 'Sta117', 'Sta44', 'Sta158', 'Sta142', 'Sta141', 'Sta113', 'Sta167'], ('Sta135', 'Sta87'): ['Sta167', 'Sta113', 'Sta141', 'Sta142', 'Sta158', 'Sta44', 'Sta117', 'Sta147', 'Sta42', 'Sta35'], ('Sta56', 'Sta115'): ['Sta118'], ('Sta115', 'Sta56'): ['Sta118'], ('Sta115', 'Sta41'): ['Sta126', 'Sta29', 'Sta144', 'Sta67', 'Sta30'], ('Sta41', 'Sta115'): ['Sta30', 'Sta67', 'Sta144', 'Sta29', 'Sta126'], ('Sta143', 'Sta41'): ['Sta156', 'Sta61', 'Sta50', 'Sta119', 'Sta66', 'Sta12', 'Sta161', 'Sta21', 'Sta133', 'Sta22', 'Sta138'], ('Sta41', 'Sta143'): ['Sta138', 'Sta22', 'Sta133', 'Sta21', 'Sta161', 'Sta12', 'Sta66', 'Sta119', 'Sta50', 'Sta61', 'Sta156'], ('Sta120', 'Sta63'): ['Sta130', 'Sta146', 'Sta25'], ('Sta63', 'Sta120'): ['Sta25', 'Sta146', 'Sta130'], ('Sta47', 'Sta63'): ['Sta18', 'Sta79', 'Sta53', 'Sta163', 'Sta9', 'Sta129'], ('Sta63', 'Sta47'): ['Sta129', 'Sta9', 'Sta163', 'Sta53', 'Sta79', 'Sta18'], ('Sta41', 'Sta3'): ['Sta32', 'Sta116', 'Sta92'], ('Sta3', 'Sta41'): ['Sta92', 'Sta116', 'Sta32'], ('Sta3', 'Sta15'): ['Sta46', 'Sta86'], ('Sta15', 'Sta3'): ['Sta86', 'Sta46'], ('Sta157', 'Sta114'): [], ('Sta114', 'Sta157'): [], ('Sta115', 'Sta114'): ['Sta162'], ('Sta114', 'Sta115'): ['Sta162'], ('Sta115', 'Sta135'): ['Sta40', 'Sta131', 'Sta39', 'Sta100'], ('Sta135', 'Sta115'): ['Sta100', 'Sta39', 'Sta131', 'Sta40'], ('Sta114', 'Sta135'): ['Sta168'], ('Sta135', 'Sta114'): ['Sta168'], ('Sta135', 'Sta134'): [], ('Sta134', 'Sta135'): [], ('Sta15', 'Sta134'): ['Sta95'], ('Sta134', 'Sta15'): ['Sta95'], ('Sta56', 'Sta23'): [], ('Sta23', 'Sta56'): [], ('Sta63', 'Sta3'): [], ('Sta3', 'Sta63'): [], ('Sta15', 'Sta114'): [], ('Sta114', 'Sta15'): []}
self.between_line = {('Sta65', 'Sta49'): '1号线', ('Sta49', 'Sta65'): '1号线', ('Sta49', 'Sta149'): '1号线', ('Sta149', 'Sta49'): '1号线', ('Sta149', 'Sta74'): '1号线', ('Sta74', 'Sta149'): '1号线', ('Sta74', 'Sta128'): '1号线', ('Sta128', 'Sta74'): '1号线', ('Sta128', 'Sta34'): '1号线', ('Sta34', 'Sta128'): '1号线', ('Sta34', 'Sta106'): '1号线', ('Sta106', 'Sta34'): '1号线', ('Sta106', 'Sta110'): '1号线', ('Sta110', 'Sta106'): '1号线', ('Sta110', 'Sta97'): '1号线', ('Sta97', 'Sta110'): '1号线', ('Sta97', 'Sta80'): '1号线', ('Sta80', 'Sta97'): '1号线', ('Sta80', 'Sta89'): '1号线', ('Sta89', 'Sta80'): '1号线', ('Sta89', 'Sta137'): '12号线', ('Sta137', 'Sta89'): '12号线', ('Sta89', 'Sta64'): '1号线', ('Sta64', 'Sta89'): '1号线', ('Sta64', 'Sta150'): '1号线', ('Sta150', 'Sta64'): '1号线', ('Sta150', 'Sta154'): '1号线', ('Sta154', 'Sta150'): '1号线', ('Sta154', 'Sta107'): '1号线', ('Sta107', 'Sta154'): '1号线', ('Sta107', 'Sta83'): '1号线', ('Sta83', 'Sta107'): '1号线', ('Sta83', 'Sta108'): '1号线', ('Sta108', 'Sta83'): '1号线', ('Sta108', 'Sta47'): '1号线', ('Sta47', 'Sta108'): '1号线', ('Sta159', 'Sta1'): '1号线', ('Sta1', 'Sta159'): '1号线', ('Sta129', 'Sta9'): '2号线', ('Sta9', 'Sta129'): '2号线', ('Sta9', 'Sta163'): '2号线', ('Sta163', 'Sta9'): '2号线', ('Sta163', 'Sta53'): '2号线', ('Sta53', 'Sta163'): '2号线', ('Sta53', 'Sta79'): '2号线', ('Sta79', 'Sta53'): '2号线', ('Sta79', 'Sta18'): '2号线', ('Sta18', 'Sta79'): '2号线', ('Sta18', 'Sta47'): '2号线', ('Sta47', 'Sta18'): '2号线', ('Sta47', 'Sta159'): '1号线', ('Sta159', 'Sta47'): '1号线', ('Sta47', 'Sta123'): '2号线', ('Sta123', 'Sta47'): '2号线', ('Sta123', 'Sta127'): '2号线', ('Sta127', 'Sta123'): '2号线', ('Sta127', 'Sta60'): '12号线', ('Sta60', 'Sta127'): '12号线', ('Sta127', 'Sta81'): '2号线', ('Sta81', 'Sta127'): '2号线', ('Sta81', 'Sta27'): '2号线', ('Sta27', 'Sta81'): '2号线', ('Sta27', 'Sta48'): '2号线', ('Sta48', 'Sta27'): '2号线', ('Sta48', 'Sta151'): '2号线', ('Sta151', 'Sta48'): '2号线', ('Sta151', 'Sta68'): '2号线', ('Sta68', 'Sta151'): '2号线', ('Sta68', 'Sta52'): '2号线', ('Sta52', 'Sta68'): '2号线', ('Sta52', 'Sta76'): '2号线', ('Sta76', 'Sta52'): '2号线', ('Sta76', 'Sta57'): '2号线', ('Sta57', 'Sta76'): '2号线', ('Sta57', 'Sta71'): '2号线', ('Sta71', 'Sta57'): '2号线', ('Sta71', 'Sta139'): '2号线', ('Sta139', 'Sta71'): '2号线', ('Sta139', 'Sta105'): '2号线', ('Sta105', 'Sta139'): '2号线', ('Sta105', 'Sta51'): '2号线', ('Sta51', 'Sta105'): '2号线', ('Sta51', 'Sta24'): '2号线', ('Sta24', 'Sta51'): '2号线', ('Sta143', 'Sta156'): '3号线', ('Sta156', 'Sta143'): '3号线', ('Sta156', 'Sta61'): '3号线', ('Sta61', 'Sta156'): '3号线', ('Sta61', 'Sta50'): '3号线', ('Sta50', 'Sta61'): '3号线', ('Sta50', 'Sta119'): '3号线', ('Sta119', 'Sta50'): '3号线', ('Sta119', 'Sta66'): '3号线', ('Sta66', 'Sta119'): '3号线', ('Sta66', 'Sta12'): '3号线', ('Sta12', 'Sta66'): '3号线', ('Sta12', 'Sta161'): '3号线', ('Sta161', 'Sta12'): '3号线', ('Sta161', 'Sta21'): '3号线', ('Sta21', 'Sta161'): '3号线', ('Sta21', 'Sta133'): '3号线', ('Sta133', 'Sta21'): '3号线', ('Sta133', 'Sta22'): '3号线', ('Sta22', 'Sta133'): '3号线', ('Sta22', 'Sta138'): '3号线', ('Sta138', 'Sta22'): '3号线', ('Sta138', 'Sta41'): '3号线', ('Sta41', 'Sta138'): '3号线', ('Sta41', 'Sta91'): '12号线', ('Sta91', 'Sta41'): '12号线', ('Sta41', 'Sta30'): '3号线', ('Sta30', 'Sta41'): '3号线', ('Sta30', 'Sta67'): '3号线', ('Sta67', 'Sta30'): '3号线', ('Sta67', 'Sta144'): '3号线', ('Sta144', 'Sta67'): '3号线', ('Sta144', 'Sta29'): '3号线', ('Sta29', 'Sta144'): '3号线', ('Sta29', 'Sta126'): '3号线', ('Sta126', 'Sta29'): '3号线', ('Sta126', 'Sta115'): '3号线', ('Sta115', 'Sta126'): '3号 线', ('Sta40', 'Sta131'): '3号线', ('Sta131', 'Sta40'): '3号线', ('Sta131', 'Sta39'): '3号线', ('Sta39', 'Sta131'): '3号线', ('Sta39', 'Sta100'): '3号线', ('Sta100', 'Sta39'): '3号线', ('Sta100', 'Sta135'): '3号线', ('Sta135', 'Sta100'): '3号线', ('Sta167', 'Sta113'): '3号线', ('Sta113', 'Sta167'): '3号线', ('Sta113', 'Sta141'): '3号线', ('Sta141', 'Sta113'): '3号线', ('Sta141', 'Sta142'): '3号线', ('Sta142', 'Sta141'): '3号线', ('Sta142', 'Sta158'): '3号线', ('Sta158', 'Sta142'): '3号线', ('Sta158', 'Sta44'): '3号线', ('Sta44', 'Sta158'): '3号线', ('Sta44', 'Sta117'): '3号线', ('Sta117', 'Sta44'): '3号线', ('Sta117', 'Sta147'): '3号线', ('Sta147', 'Sta117'): '3号线', ('Sta147', 'Sta42'): '3号线', ('Sta42', 'Sta147'): '3号线', ('Sta42', 'Sta35'): '3号线', ('Sta35', 'Sta42'): '3号线', ('Sta35', 'Sta87'): '3号线', ('Sta87', 'Sta35'): '3号线', ('Sta109', 'Sta33'): '3号线', ('Sta33', 'Sta109'): '3号线', ('Sta33', 'Sta112'): '3号线', ('Sta112', 'Sta33'): '3号线', ('Sta112', 'Sta153'): '3号线', ('Sta153', 'Sta112'): '3号线', ('Sta153', 'Sta125'): '3号线', ('Sta125', 'Sta153'): '3号线', ('Sta125', 'Sta121'): '3号线', ('Sta121', 'Sta125'): '3号线', ('Sta121', 'Sta11'): '3号线', ('Sta11', 'Sta121'): '3号线', ('Sta157', 'Sta114'): '10号线', ('Sta114', 'Sta157'): '10号线', ('Sta114', 'Sta15'): '11号线', ('Sta15', 'Sta114'): '11号线', ('Sta114', 'Sta168'): '10号线', ('Sta168', 'Sta114'): '10号线', ('Sta168', 'Sta135'): '10号线', ('Sta135', 'Sta168'): '10号线', ('Sta135', 'Sta167'): '3号线', ('Sta167', 'Sta135'): '3号线', ('Sta135', 'Sta134'): '10号线', ('Sta134', 'Sta135'): '10号线', ('Sta134', 'Sta95'): '12号线', ('Sta95', 'Sta134'): '12号线', ('Sta134', 'Sta85'): '10号线', ('Sta85', 'Sta134'): '10号线', ('Sta85', 'Sta2'): '10号线', ('Sta2', 'Sta85'): '10号线', ('Sta2', 'Sta4'): '10号线', ('Sta4', 'Sta2'): '10号线', ('Sta4', 'Sta103'): '10号线', ('Sta103', 'Sta4'): '10号线', ('Sta103', 'Sta145'): '10号线', ('Sta145', 'Sta103'): '10号线', ('Sta145', 'Sta88'): '10号线', ('Sta88', 'Sta145'): '10号线', ('Sta88', 'Sta87'): '10号线', ('Sta87', 'Sta88'): '10号线', ('Sta87', 'Sta109'): '3号线', ('Sta109', 'Sta87'): '3号线', ('Sta87', 'Sta94'): '10号线', ('Sta94', 'Sta87'): '10号线', ('Sta94', 'Sta160'): '10号线', ('Sta160', 'Sta94'): '10号线', ('Sta160', 'Sta7'): '10号线', ('Sta7', 'Sta160'): '10号线', ('Sta7', 'Sta6'): '10号线', ('Sta6', 'Sta7'): '10号线', ('Sta6', 'Sta8'): '10号线', ('Sta8', 'Sta6'): '10号线', ('Sta8', 'Sta75'): '10号线', ('Sta75', 'Sta8'): '10号线', ('Sta75', 'Sta152'): '11号线', ('Sta152', 'Sta75'): '11号线', ('Sta75', 'Sta102'): '10号线', ('Sta102', 'Sta75'): '10号线', ('Sta84', 'Sta90'): '4号线', ('Sta90', 'Sta84'): '4号线', ('Sta84', 'Sta59'): '4号线', ('Sta59', 'Sta84'): '4号线', ('Sta59', 'Sta19'): '4号线', ('Sta19', 'Sta59'): '4号线', ('Sta19', 'Sta62'): '4号线', ('Sta62', 'Sta19'): '4号线', ('Sta62', 'Sta165'): '4号线', ('Sta165', 'Sta62'): '4号线', ('Sta165', 'Sta38'): '4号线', ('Sta38', 'Sta165'): '4号线', ('Sta38', 'Sta58'): '4号线', ('Sta58', 'Sta38'): '4号线', ('Sta43', 'Sta10'): '5号线', ('Sta10', 'Sta43'): '5号线', ('Sta10', 'Sta96'): '5号线', ('Sta96', 'Sta10'): '5号线', ('Sta96', 'Sta132'): '5号线', ('Sta132', 'Sta96'): '5号线', ('Sta132', 'Sta37'): '5号线', ('Sta37', 'Sta132'): '5号线', ('Sta37', 'Sta16'): '5号线', ('Sta16', 'Sta37'): '5号线', ('Sta16', 'Sta69'): '5号线', ('Sta69', 'Sta16'): '5号线', ('Sta69', 'Sta54'): '5号线', ('Sta54', 'Sta69'): '5号线', ('Sta77', 'Sta122'): '11号线', ('Sta122', 'Sta77'): '11号线', ('Sta122', 'Sta36'): '11号线', ('Sta36', 'Sta122'): '11号线', ('Sta36', 'Sta28'): '11号线', ('Sta28', 'Sta36'): '11号线', ('Sta28', 'Sta124'): '11号线', ('Sta124', 'Sta28'): '11号线', ('Sta124', 'Sta166'): '11号线', ('Sta166', 'Sta124'): '11号线', ('Sta166', 'Sta99'): '11号线', ('Sta99', 'Sta166'): '11号线', ('Sta99', 'Sta140'): '11号线', ('Sta140', 'Sta99'): '11号线', ('Sta45', 'Sta75'): '11号线', ('Sta75', 'Sta45'): '11号线', ('Sta152', 'Sta164'): '11号线', ('Sta164', 'Sta152'): '11号线', ('Sta164', 'Sta82'): '11号线', ('Sta82', 'Sta164'): '11号线', ('Sta82', 'Sta111'): '11号线', ('Sta111', 'Sta82'): '11号线', ('Sta111', 'Sta140'): '11号线', ('Sta140', 'Sta111'): '11号线', ('Sta140', 'Sta13'): '11号线', ('Sta13', 'Sta140'): '11号线', ('Sta13', 'Sta70'): '11号线', ('Sta70', 'Sta13'): '11号线', ('Sta70', 'Sta55'): '11号线', ('Sta55', 'Sta70'): '11号线', ('Sta55', 'Sta20'): '11号线', ('Sta20', 'Sta55'): '11号线', ('Sta20', 'Sta23'): '11号线', ('Sta23', 'Sta20'): '11号线', ('Sta23', 'Sta26'): '12号线', ('Sta26', 'Sta23'): '12号线', ('Sta23', 'Sta56'): '11号线', ('Sta56', 'Sta23'): '11号线', ('Sta56', 'Sta54'): '5号线', ('Sta54', 'Sta56'): '5号线', ('Sta56', 'Sta118'): '11号线', ('Sta118', 'Sta56'): '11号线', ('Sta118', 'Sta115'): '11号线', ('Sta115', 'Sta118'): '11号线', ('Sta115', 'Sta40'): '3号线', ('Sta40', 'Sta115'): '3号线', ('Sta115', 'Sta162'): '11号线', ('Sta162', 'Sta115'): '11号线', ('Sta162', 'Sta114'): '11号线', ('Sta114', 'Sta162'): '11号线', ('Sta15', 'Sta72'): '12号线', ('Sta72', 'Sta15'): '12号线', ('Sta15', 'Sta86'): '11号线', ('Sta86', 'Sta15'): '11号线', ('Sta86', 'Sta46'): '11号线', ('Sta46', 'Sta86'): '11号线', ('Sta46', 'Sta3'): '11号线', ('Sta3', 'Sta46'): '11号线', ('Sta3', 'Sta92'): '12号线', ('Sta92', 'Sta3'): '12号线', ('Sta3', 'Sta63'): '11号线', ('Sta63', 'Sta3'): '11号线', ('Sta63', 'Sta129'): '2号线', ('Sta129', 'Sta63'): '2号线', ('Sta63', 'Sta25'): '11号线', ('Sta25', 'Sta63'): '11号线', ('Sta25', 'Sta146'): '11号线', ('Sta146', 'Sta25'): '11号线', ('Sta146', 'Sta130'): '11号线', ('Sta130', 'Sta146'): '11号线', ('Sta130', 'Sta120'): '11号线', ('Sta120', 'Sta130'): '11号线', ('Sta136', 'Sta137'): '12号线', ('Sta137', 'Sta136'): '12号线', ('Sta137', 'Sta101'): '12号线', ('Sta101', 'Sta137'): '12号线', ('Sta101', 'Sta31'): '12号线', ('Sta31', 'Sta101'): '12号线', ('Sta31', 'Sta17'): '12号线', ('Sta17', 'Sta31'): '12号线', ('Sta17', 'Sta23'): '12号线', ('Sta23', 'Sta17'): '12号线', ('Sta26', 'Sta90'): '12号线', ('Sta90', 'Sta26'): '12号线', ('Sta90', 'Sta134'): '12号线', ('Sta134', 'Sta90'): '12号线', ('Sta95', 'Sta15'): '12号线', ('Sta15', 'Sta95'): '12号线', ('Sta72', 'Sta93'): '12号线', ('Sta93', 'Sta72'): '12号线', ('Sta93', 'Sta3'): '12号线', ('Sta3', 'Sta93'): '12号线', ('Sta92', 'Sta116'): '12号线', ('Sta116', 'Sta92'): '12号线', ('Sta116', 'Sta32'): '12号线', ('Sta32', 'Sta116'): '12号线', ('Sta32', 'Sta41'): '12号线', ('Sta41', 'Sta32'): '12号线', ('Sta91', 'Sta127'): '12号线', ('Sta127', 'Sta91'): '12号线', ('Sta60', 'Sta148'): '12号线', ('Sta148', 'Sta60'): '12号线', ('Sta148', 'Sta73'): '12号线', ('Sta73', 'Sta148'): '12号线'}
def createGraph(self):
g = Graph()
sta = []
self.write(sta)
temp_set = set()
for part_pass in sta:
temp_set.add(part_pass[0])
temp_set.add(part_pass[-1])
for temp_sta in temp_set:
g.addVertex(temp_sta)
for part_pass in sta:
g.addEdge(part_pass[0], part_pass[-1], len(part_pass)-1)
g.addEdge(part_pass[-1], part_pass[0], len(part_pass)-1)
self.sta = sta
self.g = g
def write(self,sta):
a = []
b = []
c = []
d = []
e = []
f = []
g = []
h = []
i = []
j = []
k = []
l = []
m = []
n = []
o = []
p = []
q = []
r = []
s = []
t = []
u = []
v = []
w = []
x = []
y = []
z = []
a1 = []
a2 = []
a3 = []
a4 = []
a5 = []
a6 = []
a7 = []
a8 = []
a9 = []
a10 = []
a11 = []
a12 = []
a13 = []
a.append("Sta24")
a.append("Sta51")
a.append("Sta105")
a.append("Sta139")
a.append("Sta71")
a.append("Sta57")
a.append("Sta76")
a.append("Sta52")
a.append("Sta68")
a.append("Sta151")
a.append("Sta48")
a.append("Sta27")
a.append("Sta81")
a.append("Sta127")
b.append("Sta73")
b.append("Sta148")
b.append("Sta60")
b.append("Sta127")
c.append("Sta127")
c.append("Sta123")
c.append("Sta47")
d.append("Sta1")
d.append("Sta159")
d.append("Sta47")
e.append("Sta47")
e.append("Sta108")
e.append("Sta83")
e.append("Sta107")
e.append("Sta154")
e.append("Sta150")
e.append("Sta64")
e.append("Sta89")
f.append("Sta127")
f.append("Sta91")
f.append("Sta41")
g.append("Sta89")
g.append("Sta80")
g.append("Sta97")
g.append("Sta110")
g.append("Sta106")
g.append("Sta34")
g.append("Sta128")
g.append("Sta74")
g.append("Sta149")
g.append("Sta49")
g.append("Sta65")
h.append("Sta136")
h.append("Sta89")
i.append("Sta89")
i.append("Sta137")
i.append("Sta101")
i.append("Sta31")
i.append("Sta17")
i.append("Sta23")
j.append("Sta23")
j.append("Sta20")
j.append("Sta55")
j.append("Sta70")
j.append("Sta13")
j.append("Sta140")
k.append("Sta140")
k.append("Sta99")
k.append("Sta166")
k.append("Sta124")
k.append("Sta28")
k.append("Sta36")
k.append("Sta122")
k.append("Sta77")
l.append("Sta140")
l.append("Sta111")
l.append("Sta82")
l.append("Sta164")
l.append("Sta152")
l.append("Sta75")
m.append("Sta75")
m.append("Sta102")
n.append("Sta75")
n.append("Sta45")
o.append("Sta75")
o.append("Sta8")
o.append("Sta6")
o.append("Sta7")
o.append("Sta160")
o.append("Sta94")
o.append("Sta87")
p.append("Sta11")
p.append("Sta121")
p.append("Sta125")
p.append("Sta153")
p.append("Sta112")
p.append("Sta33")
p.append("Sta109")
p.append("Sta87")
q.append("Sta58")
q.append("Sta38")
q.append("Sta165")
q.append("Sta62")
q.append("Sta19")
q.append("Sta59")
q.append("Sta84")
q.append("Sta90")
r.append("Sta23")
r.append("Sta26")
r.append("Sta90")
s.append("Sta43")
s.append("Sta10")
s.append("Sta96")
s.append("Sta132")
s.append("Sta37")
s.append("Sta16")
s.append("Sta69")
s.append("Sta54")
s.append("Sta56")
t.append("Sta90")
t.append("Sta134")
u.append("Sta87")
u.append("Sta88")
u.append("Sta145")
u.append("Sta103")
u.append("Sta4")
u.append("Sta2")
u.append("Sta85")
u.append("Sta134")
v.append("Sta87")
v.append("Sta35")
v.append("Sta42")
v.append("Sta147")
v.append("Sta117")
v.append("Sta44")
v.append("Sta158")
v.append("Sta142")
v.append("Sta141")
v.append("Sta113")
v.append("Sta167")
v.append("Sta135")
w.append("Sta56")
w.append("Sta118")
w.append("Sta115")
x.append("Sta115")
x.append("Sta126")
x.append("Sta29")
x.append("Sta144")
x.append("Sta67")
x.append("Sta30")
x.append("Sta41")
y.append("Sta143")
y.append("Sta156")
y.append("Sta61")
y.append("Sta50")
y.append("Sta119")
y.append("Sta66")
y.append("Sta12")
y.append("Sta161")
y.append("Sta21")
y.append("Sta133")
y.append("Sta22")
y.append("Sta138")
y.append("Sta41")
z.append("Sta120")
z.append("Sta130")
z.append("Sta146")
z.append("Sta25")
z.append("Sta63")
a1.append("Sta47")
a1.append("Sta18")
a1.append("Sta79")
a1.append("Sta53")
a1.append("Sta163")
a1.append("Sta9")
a1.append("Sta129")
a1.append("Sta63")
a2.append("Sta41")
a2.append("Sta32")
a2.append("Sta116")
a2.append("Sta92")
a2.append("Sta3")
a3.append("Sta3")
a3.append("Sta93")
a3.append("Sta72")
a3.append("Sta15")
a4.append("Sta3")
a4.append("Sta46")
a4.append("Sta86")
a4.append("Sta15")
a5.append("Sta157")
a5.append("Sta114")
a6.append("Sta115")
a6.append("Sta162")
a6.append("Sta114")
a7.append("Sta115")
a7.append("Sta40")
a7.append("Sta131")
a7.append("Sta39")
a7.append("Sta100")
a7.append("Sta135")
a8.append("Sta114")
a8.append("Sta168")
a8.append("Sta135")
a9.append("Sta135")
a9.append("Sta134")
a10.append("Sta15")
a10.append("Sta95")
a10.append("Sta134")
a11.append("Sta56")
a11.append("Sta23")
a12.append("Sta63")
a12.append("Sta3")
a13.append("Sta15")
a13.append("Sta114")
sta.extend([a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z])
sta.append(a1)
sta.append(a2)
sta.append(a3)
sta.append(a4)
sta.append(a5)
sta.append(a6)
sta.append(a7)
sta.append(a8)
sta.append(a9)
sta.append(a10)
sta.append(a11)
sta.append(a12)
sta.append(a13)
def dfs(self, s, s2, g, sta):
stack,seen,big_list = [s,],[],[]
for i in range(len(g.getVertex(s).getNeighbors())+1):
seen.append(s)
while len(stack):
vertex = stack.pop()
big_list.append(vertex)
nodes = list(set(g.getVertex(vertex).getNeighbors()))
names = [ver.getName() for ver in nodes]
if s2 in names:
# print(big_list)
yield(big_list)
while True:
if len(big_list)>1:
pop2 = big_list.pop()
try:
if seen[-1] == seen[-2]:
pop1 = seen.pop()
if seen[-2] == seen[-1]:
break
else:
pop1 = seen.pop()
continue
pop1 = seen.pop()
except:
break
continue
flag = 0
for neighbor in names:
if neighbor not in seen:
flag += 1
stack.append(neighbor)
for i in range(flag):
seen.append(vertex)
if flag>1:
seen.append(vertex)
if flag==0:
while True:
if len(big_list)>1:
pop2 = big_list.pop()
if seen[-1] == seen[-2]:
pop1 = seen.pop()
if seen[-2] == seen[-1]:
break
else:
pop1 = seen.pop()
continue
pop1 = seen.pop()
def getPassInfo(self, s, s2):
#初始化
stationinfo_dict = self.stationinfo_dict
passes37 = self.passes37
between_line = self.between_line
sta = self.sta
g = self.g
if g.getVertex(s) == None:
g.addVertex(s)
for sta_list in sta:
if s in sta_list:
index_of_s = sta_list.index(s)
if s2 in sta_list and g.getVertex(s2) == None:
index_of_s2 = sta_list.index(s2)
if index_of_s < index_of_s2:
return [sta_list[index_of_s: index_of_s2+1: 1], index_of_s2-index_of_s, [stationinfo_dict[s][0]], 0, 0,0,1.0]
else:
return [sta_list[index_of_s: index_of_s2-1: -1], index_of_s-index_of_s2, [stationinfo_dict[s][0]], 0, 0,0,1.0]
g.addEdge(sta_list[0], s, index_of_s)
g.addEdge(s, sta_list[0], index_of_s)
g.addEdge(s, sta_list[-1], len(sta_list)-index_of_s)
g.addEdge(sta_list[-1], s, len(sta_list)-index_of_s)
g.deleteEdge(sta_list[0], sta_list[-1])
g.deleteEdge(sta_list[-1], sta_list[0])
passes37[(s, sta_list[0])] = sta_list[index_of_s-1:0:-1]
passes37[(s, sta_list[-1])] = sta_list[index_of_s+1:-1:1]
break
if g.getVertex(s2) == None:
g.addVertex(s2)
for sta_list in sta:
if s2 in sta_list:
index_of_s2 = sta_list.index(s2)
g.addEdge(sta_list[0], s2, index_of_s2)
g.addEdge(s2, sta_list[0], index_of_s2)
g.addEdge(s2, sta_list[-1], len(sta_list)-index_of_s2)
g.addEdge(sta_list[-1], s2, len(sta_list)-index_of_s2)
g.deleteEdge(sta_list[0], sta_list[-1])
g.deleteEdge(sta_list[-1], sta_list[0])
passes37[(sta_list[0], s2)] = sta_list[1:index_of_s2:1]
passes37[(sta_list[-1], s2)] = sta_list[-2:index_of_s2:-1]
break
# if (s, s2) in passes37.keys():
# big_list = [[g.addVertex(s), g.addVertex(s2)], g.addVertex(s).getWeight(g.getVertex(s2))]
# else:
big_list = []
for item in self.dfs(s, s2, g, sta):
# 1. 站点数量
weight = 0
for i in range(len(item)-1):
weight += g.getVertex(item[i]).getWeight(g.getVertex(item[i+1]))
weight += g.getVertex(item[-1]).getWeight(g.getVertex(s2))
# 2. 节点路线
itemm = list(item)
itemm.append(s2)
big_list.append([itemm, weight])
big_list.sort(key=self.takeWeight1)
big_list = big_list[:9]
for i in range(len(big_list)):
# 3. 完整路线
new_list = []
for j in range(len(big_list[i][0])-1):
new_list.append(big_list[i][0][j])
new_list.extend(passes37[(big_list[i][0][j], big_list[i][0][j+1])])
new_list.append(s2)
# big_list[i][1] = len(new_list[i][0]) - 1
# 4. 换乘数量
oldline = between_line[(new_list[0], new_list[1])]
oldsta = new_list[0]
passed_line = [oldline]
passes_sta = []
for k in range(len(new_list)-1):
line = between_line[(new_list[k], new_list[k+1])]
sta = new_list[k]
if line != oldline:
passed_line.append(line)
passes_sta.append((oldsta, sta))
if k==len(new_list)-2:
passes_sta.append((sta, s2))
oldsta = sta
oldline = line
big_list[i][0] = new_list
# big_list[i].append(len(passed_line)-1)
big_list[i].append(passes_sta)
big_list[i].append(len(passed_line)-1)
big_list[i].append(passed_line)
big_list.sort(key=self.takeWeight3)
big_list = big_list[:3]
# 5.计算各路线权重
minest = 10
for line in big_list:
if minest > line[3]:
minest = line[3]
total = 0
for i in range(len(big_list)):
v = 0 + ( -0.75 * (big_list[i][1])/big_list[0][1] ) + (-1.33 * (big_list[i][3]-minest))
total += math.exp(v)
big_list[i].append(v)
big_list[i].append(math.exp(v))
# 6. 计算权重比例
for i in range(len(big_list)):
big_list[i].append(big_list[i][6]/total)
# 5. 总时间
return big_list
def takeWeight1(self,iterable):
return iterable[1]
def takeWeight3(self,iterable):
return iterable[3]
class DB():
def __init__(self, DB):
DB_USER = 'maker0'
DB_PASS = 'Maker0000'
DB_HOST = 'rm-bp11labi01950io698o.mysql.rds.aliyuncs.com'
DB_PORT = 3306
DATABASE = DB
self.connect_info = 'mysql+pymysql://{}:{}@{}:{}/{}?charset=utf8'.format(DB_USER, DB_PASS, DB_HOST, DB_PORT, DATABASE) #1
# 查询语句,选出testexcel表中的所有数据
# sql = """select * from trips"""
# read_sql_query的两个参数: sql语句, 数据库连接
# df = pd.read_sql_query(sql,con=self.connect_info)
# 输出testexcel表的查询结果
print('连接成功')
def excute(self, sql="""select * from trips"""):
df = pd.read_sql_query(sql,con=self.connect_info)
print('筛选完毕')
return(df)
# def getPassRate(self, s1, s2):
class UseDB():
@staticmethod
def getPassRate(s1,s2):
conn = DB('library1')
df = conn.excute(f"SELECT * FROM trips WHERE 进站名称={s1} and 出站名称={s2}")
df['time'] = (df['出站时间'] - df['进站时间']).dt.total_seconds()
print(np.array(df['time']))
np_df = np.array(df['time']).reshape(-1,1)
result_list = Sklearn().kmean(np_df, 2)
print(result_list)
result_dict = list(Counter(result_list).values())
result_rate = []
for item in result_dict:
result_rate.append(item/(result_dict[0] + result_dict[1]))
return result_rate
class Sklearn():
def __init__(self):
pass
# X = np.array([[1, 2], [1, 4], [1, 0],[10, 2], [10, 4], [10, 0]])
# kmeans = KMeans(n_clusters=2, random_state=0).fit(X)
# print(kmeans.labels_)
# a = kmeans.predict([[0, 0], [12, 3]])
# print(kmeans.cluster_centers_)
@staticmethod
def kmean(self, x, n):
kmeans = KMeans(n_clusters=n, random_state=0).fit(x)
return(kmeans.labels_)
def main():
# db = DB('library_flow')
# cursor = db.connect_info.cursor()
# rf_txt = ReadFile()
# rf_txt.readTxt()
# b = UseDB.getPassRate('Sta77','Sta115')
a = Dfs()
print(a.getPassInfo('Sta17','Sta72'))
# print()
if __name__ == '__main__':
main()
|
{"hexsha": "adf8307c0e877fc916ba73062bc6372d86bd6241", "size": 35741, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/subwayflow/graph.py", "max_stars_repo_name": "StevenRCE0/MassTransportUtilizer", "max_stars_repo_head_hexsha": "6b3ac1cd447d736094cf16e8a482924b16076705", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/subwayflow/graph.py", "max_issues_repo_name": "StevenRCE0/MassTransportUtilizer", "max_issues_repo_head_hexsha": "6b3ac1cd447d736094cf16e8a482924b16076705", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/subwayflow/graph.py", "max_forks_repo_name": "StevenRCE0/MassTransportUtilizer", "max_forks_repo_head_hexsha": "6b3ac1cd447d736094cf16e8a482924b16076705", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 58.0211038961, "max_line_length": 9603, "alphanum_fraction": 0.4885985283, "include": true, "reason": "import numpy", "num_tokens": 14094}
|
import streamlit as st
import json
from wordcloud import WordCloud
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
def load_data():
with open('data/weekly.json','r') as file:
weekly_keywords = json.load(file)
with open('data/combined.json') as file:
combined_keyword = json.load(file)
dates = [date for date in weekly_keywords]
return combined_keyword,weekly_keywords,dates
def get_word_cloud(image,data,max_words,max_font_size):
if image == 'default':
wordcloud = WordCloud(width=400, height=400, repeat=True, max_words=max_words,
max_font_size= max_font_size,background_color='white',
).generate_from_frequencies(data)
else:
path = f'data/image_masks/{image}.jpg'
mask = np.array(Image.open(path))
wordcloud = WordCloud(width=400, height=400, repeat=True, max_words=max_words,
max_font_size= max_font_size,background_color='white',
mask = mask).generate_from_frequencies(data)
return wordcloud
st.title("2020 Word Clouds based on Google Keyword and Twitter Hashtag trends")
image = st.sidebar.selectbox(label='Select Image Mask',options=['default','twitter','hashtag','heart'])
combined_keyword,weekly_keywords,dates = load_data()
st.header("Entire Year")
wordcloud = get_word_cloud(image,combined_keyword,800,15)
fig1 = plt.figure()
plt.imshow(wordcloud)
plt.axis("off")
st.pyplot(fig1)
st.header("Weekly")
date = st.selectbox(label='Select Date',options=dates)
keywords = weekly_keywords[date]
wordcloud = get_word_cloud(image , keywords,200,25)
fig2 = plt.figure()
plt.imshow(wordcloud)
plt.axis("off")
st.pyplot(fig2)
|
{"hexsha": "5143b96216c626906717cac65c1f7eb9b048631f", "size": 1767, "ext": "py", "lang": "Python", "max_stars_repo_path": "app.py", "max_stars_repo_name": "rahulbanerjee26/Word_Clouds", "max_stars_repo_head_hexsha": "765ccabf07aa5a6cf382a0e49c579c1ab8a7de04", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-12-31T17:55:25.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-31T17:55:25.000Z", "max_issues_repo_path": "app.py", "max_issues_repo_name": "rahulbanerjee26/Word_Clouds", "max_issues_repo_head_hexsha": "765ccabf07aa5a6cf382a0e49c579c1ab8a7de04", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "app.py", "max_forks_repo_name": "rahulbanerjee26/Word_Clouds", "max_forks_repo_head_hexsha": "765ccabf07aa5a6cf382a0e49c579c1ab8a7de04", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.5957446809, "max_line_length": 104, "alphanum_fraction": 0.6898698359, "include": true, "reason": "import numpy", "num_tokens": 394}
|
import os
import pickle
import sys
import time
import math
import argparse
from typing import Any, List
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torchvision as vsn
from skimage.io import imread
from tqdm import tqdm
from skimage.transform import resize
from models.nets import ResUNet
from utils.data_loaders import get_data_loaders, get_test_loader
from utils.data_vis import plot_from_torch
from utils.evaluations import DiceLoss, calc_metric, get_iou_vector
import re
# important: parse fold number from filename to avoid typos
def parse_fold_number(filename: str) -> int:
match = re.match(r'.*_fold-(\d)\.pth', filename)
if match:
fold_num = int(match.group(1))
print("detected fold", fold_num)
else:
assert(False)
return fold_num
parser = argparse.ArgumentParser(description='Make Preds')
parser.add_argument('--imsize', default=128, type=int,
help='imsize to use for training')
parser.add_argument('--batch_size', default=128, type=int,
help='size of batches')
parser.add_argument('--gpu', default=0, type=int,
help='which gpu to run')
# parser.add_argument('--fold_num', type=int, required=True, #default=0,
# help='which fold to make predictions for')
parser.add_argument('--weight_file', default='resunet.pth', type=str,
help='which weight file to make predictions for')
parser.add_argument('--num_folds', default=5, type=int,
help='number of cross val folds')
#parser.add_argument('--model_name', default='resunet', type=str,
# help='name of model for saving/loading weights')
#parser.add_argument('--exp_name', default='tgs_slt', type=str,
# help='name of experiment for saving files')
parser.add_argument('--debug', action='store_true',
help='whether to display debug info')
parser.add_argument('--flip_tta', action='store_true',
help='whether to horizontal flip TTA')
#parser.add_argument('--use_mt', action='store_true',
# help='whether to use mean teacher model')
#parser.add_argument('--use_swa', action='store_true',
# help='whether to use mean teacher model')
parser.add_argument('--use_bool', action='store_true',
help='whether to use empty predictions')
parser.add_argument('--save_raw', action='store_true',
help='whether to export predicts without thresholds')
parser.add_argument('--mosaic', default=0, type=int,
help='how to use mosaic: 0-disabled, 1-channel 1, 2 - channel 2')
parser.add_argument('--score_only', action='store_true',
help='don\'t generate predictions')
def predict(net: Any, test_loader: Any, fold_num: int, to_csv: bool, threshold: float) -> Any:
net.eval()
all_predicts = []
all_masks = []
rles = []
ids = []
# no gradients during validation
with torch.no_grad():
for i, data in enumerate(tqdm(test_loader)):
test_imgs = data['img'].to(device)
test_ids = data['id']
blanks = data['blank']
# get predictions
preds, chck_preds, edges_preds = net(test_imgs)
preds = preds.sigmoid()
chck_preds = chck_preds.sigmoid() > 0.5
if args.flip_tta:
test_imgs_lr = data['img_lr'].to(device)
preds_lr, check_lr, edges_preds_lr = net(test_imgs_lr)
preds_lr_ = preds_lr.sigmoid()
check_lr = check_lr.sigmoid() > 0.5
chck_preds = (check_lr + chck_preds) / 2.
preds_lr = np.zeros((preds_lr_.size())).astype(np.float32)
# preds_lr = np.copy(preds_lr_.data.cpu().numpy()[:,:,:,::-1])
preds_lr = np.copy(preds_lr_.data.cpu().numpy()[:,:,:,::-1])
# print(preds_lr.shape)
preds = (preds + torch.from_numpy(preds_lr).to(device)) / 2.
# set masks to 0 with low probability of having mask
if args.use_bool:
chck_preds = chck_preds > 0.5
preds *= chck_preds.view(chck_preds.size(0),1,1,1).expand_as(preds).float()
preds *= blanks.view(blanks.size(0),1,1,1).expand_as(preds).float().to(device)
if args.debug and i == 0:
img_grid = vsn.utils.make_grid(test_imgs, normalize=True)
msk_grid = vsn.utils.make_grid(preds)
if args.flip_tta:
img_lr_grid = vsn.utils.make_grid(test_imgs_lr, normalize=True)
vsn.utils.save_image(img_lr_grid, '../imgs/test_imgs_lr.png')
vsn.utils.save_image(img_grid, '../imgs/test_imgs.png')
vsn.utils.save_image(msk_grid, '../imgs/test_pred.png')
pred_np = preds.data.cpu().numpy()
pred_np = pred_np.reshape((-1, pred_np.shape[2], pred_np.shape[3]))
for j in range(pred_np.shape[0]):
if args.imsize == 256:
predicted_mask = resize(pred_np[j][27:229, 27:229], (101,101),
preserve_range=True)
else:
predicted_mask = pred_np[j][13:114, 13:114]
ids.append(test_ids[j])
if to_csv:
predicted_mask = np.where(predicted_mask > threshold, 1, 0)
rles.append(rle_encode(predicted_mask.astype(np.int32)))
else:
all_predicts.append(predicted_mask)
if 'msk' in data:
masks = data['msk'].cpu().numpy()
masks = masks.reshape(-1, masks.shape[2], masks.shape[3])
all_masks.append(masks[j, 13:114, 13:114])
return (ids, rles) if to_csv else (ids, np.array(all_predicts), np.array(all_masks))
def valid(net, valid_loader, fold_num, use_lovasz=False, save_imgs=False):
pred = predict(net, valid_loader, fold_num, to_csv=False, threshold=0.4)
return pred
# src: https://www.kaggle.com/aglotero/another-iou-metric
def iou_metric(y_true_in, y_pred_in, print_table=False):
labels = y_true_in
y_pred = y_pred_in
true_objects = 2
pred_objects = 2
# if all zeros, original code generate wrong bins [-0.5 0 0.5],
temp1 = np.histogram2d(labels.flatten(), y_pred.flatten(), bins=([0,0.5,1], [0,0.5, 1]))
intersection = temp1[0]
# Compute areas (needed for finding the union between all objects)
area_true = np.histogram(labels,bins=[0,0.5,1])[0]
area_pred = np.histogram(y_pred, bins=[0,0.5,1])[0]
area_true = np.expand_dims(area_true, -1)
area_pred = np.expand_dims(area_pred, 0)
# Compute union
union = area_true + area_pred - intersection
# Exclude background from the analysis
intersection = intersection[1:,1:]
intersection[intersection == 0] = 1e-9
union = union[1:,1:]
union[union == 0] = 1e-9
# Compute the intersection over union
iou = intersection / union
# Precision helper function
def precision_at(threshold, iou):
matches = iou > threshold
true_positives = np.sum(matches, axis=1) == 1 # Correct objects
false_positives = np.sum(matches, axis=0) == 0 # Missed objects
false_negatives = np.sum(matches, axis=1) == 0 # Extra objects
tp, fp, fn = np.sum(true_positives), np.sum(false_positives), np.sum(false_negatives)
return tp, fp, fn
# Loop over IoU thresholds
prec = []
if print_table:
print("Thresh\tTP\tFP\tFN\tPrec.")
for t in np.arange(0.5, 1.0, 0.05):
tp, fp, fn = precision_at(t, iou)
if (tp + fp + fn) > 0:
p = tp / (tp + fp + fn)
else:
p = 0
if print_table:
print("{:1.3f}\t{}\t{}\t{}\t{:1.3f}".format(t, tp, fp, fn, p))
prec.append(p)
if print_table:
print("AP\t-\t-\t-\t{:1.3f}".format(np.mean(prec)))
return np.mean(prec)
def iou_metric_batch(y_true_in, y_pred_in, threshold):
# print("iou_metric_batch: y_true_in", y_true_in.shape, "y_pred_in", y_pred_in.shape)
batch_size = y_true_in.shape[0]
metric = []
for batch in range(batch_size):
value = iou_metric(y_true_in[batch], y_pred_in[batch] > threshold)
metric.append(value)
return np.mean(metric)
def load_masks(fold_num):
train_df = pd.read_csv("../data/train.csv", index_col="id", usecols=[0])
# depths_df = pd.read_csv("../data/depths.csv", index_col="id")
# train_df = train_df.join(depths_df)
# test_df = depths_df[~depths_df.index.isin(train_df.index)]
print("train_df", train_df.shape)
assert(train_df.shape[0] == 4000)
print("reading train masks")
masks = np.array([imread("../data/train/masks/%s.png" % idx)
for idx in train_df.index])
masks = np.expand_dims(masks, axis=-1)
print("masks range:", np.amin(masks), np.amax(masks))
masks = masks.astype(float) / np.amax(masks)
masks = np.squeeze(masks)
print("masks", masks.dtype)
with open("../data/fixed_folds.pkl", "rb") as f:
splits = pickle.load(f)
train_idx, valid_idx = splits[fold_num]
masks = masks[valid_idx]
print("masks", masks.shape)
return masks
def rle_encode(im: Any) -> str:
'''
im: numpy array, 1-mask, 0-background
Returns run length as string
'''
pixels = im.flatten(order='F')
pixels = np.concatenate([[0], pixels, [0]])
runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
runs[1::2] -= runs[::2]
return ' '.join(str(x) for x in runs)
def write_csv(filename: str, ids: List[str], rles: List[str]):
subm = pd.DataFrame.from_dict({'id':ids, 'rle_mask':rles}, orient='index').T
#if args.use_mt:
# subm.to_csv('../subm/{}_{}_mt_fold-{}.csv'.format(args.model_name, args.exp_name, fold_num), index=False)
#elif args.use_swa:
# subm.to_csv('../subm/{}_{}_swa_fold-{}.csv'.format(args.model_name, args.exp_name, fold_num), index=False)
#else:
# subm.to_csv('../subm/{}_{}_best_fold-{}.csv'.format(args.model_name, args.exp_name, fold_num), index=False)
subm.to_csv(filename, index=False)
subm.index.names = ['id']
subm.columns = ['id', 'rle_mask']
print(subm.head())
def make_preds():
_, valid_loader = get_data_loaders(imsize=args.imsize,
batch_size=args.batch_size,
num_folds=args.num_folds,
mosaic_mode=args.mosaic,
fold=fold_num)
print("predicting on the validation dataset")
ids, preds_val, masks = valid(net, valid_loader, fold_num)
# TODO: scipy.optimize.minimize
print("searching for the best threshold")
thresholds = np.linspace(0.3, 0.7, 31)
ious = np.array([get_iou_vector(masks, preds_val, threshold) for threshold in tqdm(thresholds)])
print("iou", ious)
threshold_best_index = np.argmax(ious)
best_iou = ious[threshold_best_index]
best_threshold = thresholds[threshold_best_index]
print("validation:", best_iou, "best threshold", best_threshold)
if args.score_only:
return
# write predicts for the train set
directory, name_ext = os.path.split(MODEL_CKPT)
name, ext = os.path.splitext(name_ext)
train_predicts = os.path.join(directory, "loc%.04f_train_" % best_iou + name +
(".pkl" if args.save_raw else ".csv" ))
print('generating train predictions to %s' % train_predicts)
rles = []
if args.save_raw:
with open(train_predicts, "wb") as f:
pickle.dump(preds_val, f)
else:
for pred in preds_val:
pred = np.where(pred > best_threshold, 1, 0)
rles.append(rle_encode(pred.astype(np.int32)))
write_csv(train_predicts, ids, rles)
# write predicts for the test set
print("predicting on the test dataset")
test_predicts = os.path.join(directory, "loc%.04f_test_" % best_iou + name +
(".pkl" if args.save_raw else ".csv" ))
print('generating test predictions to %s' % test_predicts)
if args.save_raw:
ids, preds, masks = predict(net, test_loader, fold_num, False, 0)
print(len(ids), len(preds))
assert(len(ids) == len(preds))
with open(test_predicts, "wb") as f:
pickle.dump((ids, preds), f)
else:
ids, preds = predict(net, test_loader, fold_num, True, best_threshold)
print(len(ids), len(preds))
assert(len(ids) == len(preds))
write_csv(test_predicts, ids, preds)
if __name__ == '__main__':
args = parser.parse_args()
print("predicting on", args.weight_file)
fold_num = parse_fold_number(args.weight_file)
# set model filenames
#model_params = [args.model_name, args.exp_name, fold_num]
#if args.use_mt:
# MODEL_CKPT = '../model_weights/best_meanteacher_{}_{}_fold-{}.pth'.format(*model_params)
#elif args.use_swa:
# MODEL_CKPT = '../model_weights/swa_{}_{}_fold-{}.pth'.format(*model_params)
#else:
# MODEL_CKPT = '../model_weights/best_{}_{}_fold-{}.pth'.format(*model_params)
MODEL_CKPT = args.weight_file
# get the loaders
test_loader = get_test_loader(imsize=args.imsize, batch_size=args.batch_size,
mosaic_mode=args.mosaic)
net = ResUNet(use_bool=True)
if args.gpu == 99:
device = torch.device("cuda:0")
net = nn.DataParallel(net, device_ids=[0,1]).cuda()
else:
device = torch.device("cuda:{}".format(args.gpu) if torch.cuda.is_available() else "cpu")
# torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
net.to(device)
state_dict = torch.load(MODEL_CKPT, map_location=lambda storage, loc: storage.cuda(args.gpu))
net.load_state_dict(state_dict)
make_preds()
|
{"hexsha": "08fa4bf6e1150d7e7b327b0ba1a1deb9b005e482", "size": 14220, "ext": "py", "lang": "Python", "max_stars_repo_path": "code_florian/scripts/make_preds.py", "max_stars_repo_name": "artyompal/kaggle_salt", "max_stars_repo_head_hexsha": "3c323755730745ac7bbfd106f1f20919cceef0ee", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code_florian/scripts/make_preds.py", "max_issues_repo_name": "artyompal/kaggle_salt", "max_issues_repo_head_hexsha": "3c323755730745ac7bbfd106f1f20919cceef0ee", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-03-25T23:31:26.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-25T23:31:28.000Z", "max_forks_repo_path": "code_florian/scripts/make_preds.py", "max_forks_repo_name": "artyompal/kaggle_salt", "max_forks_repo_head_hexsha": "3c323755730745ac7bbfd106f1f20919cceef0ee", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-11-08T09:30:38.000Z", "max_forks_repo_forks_event_max_datetime": "2018-11-08T09:30:38.000Z", "avg_line_length": 37.92, "max_line_length": 116, "alphanum_fraction": 0.6124472574, "include": true, "reason": "import numpy", "num_tokens": 3606}
|
#!/usr/bin/env python
# coding: utf-8
# In[4]:
import pandas
from pandas import read_csv
from pandas import datetime
from matplotlib import pyplot
from statsmodels.tsa.arima.model import ARIMA
from sklearn.metrics import mean_squared_error
from math import sqrt
import os
import numpy as np
def prediction_arima(tsA):
if not tsA:
print("tsA videeeeeeeeeeeeeeeeeeeeeeeeeeeeeee")
try:
sdata = open('data\\sampledata.csv')
except:
sdata = open('sampledata.csv')
#print("sdata.read().split('\n') = \n",sdata.read().split('\n'))
print("done")
tsA = sdata.read().split('\n')
tsA = list(map(int, tsA))
# split into train and test sets
size = int(len(tsA) * 0.66)
train, test = tsA[0:size], tsA[size:len(tsA)]
history = [x for x in train]
predictions = list()
LADATA = [x for x in train]
LAPREDICTION = [x for x in train]
# walk-forward validation
for t in range(len(test)):
model = ARIMA(history, order=(5, 1, 0))
model_fit = model.fit()
output = model_fit.forecast()
yhat = output[0]
predictions.append(yhat);LAPREDICTION.append(yhat)
obs = test[t];LADATA.append(test[t])
history.append(obs)
print('predicted=%f, expected=%f' % (yhat, obs))
# evaluate forecasts
rmse = sqrt(mean_squared_error(test, predictions))
rmse=100000000000000
print('Test RMSE: %.3f' % rmse)
print("predit!!!!!!!! à len = ", len(predictions))
# plot forecasts against actual outcomes
# pyplot.plot(test)
# pyplot.plot(predictions, color='red')
# pyplot.show()
return(LAPREDICTION,rmse,LADATA) #prediction et erreur quadratique
# In[ ]:
if __name__ == "__main__":
# load dataset
# get current directory
path = os.getcwd()
print("Current Directory", path)
# goTo parent directory
parent = os.path.abspath(os.path.join(path, os.pardir))
print(parent)
os.chdir(parent)
sdata = open('back_codes\\samples\\sampledata.csv')
#print("sdata.read().split('\n') = \n",sdata.read().split('\n'))
print("done")
tsA = sdata.read().split('\n')
tsA = list(map(int, tsA))
print("-------------------------------")
print("database.type(): ", type(tsA)) #tsA est une liste
print("done")
p=prediction_arima(tsA)
print("-------------------------------")
print("p[0]: ",type(p[0]))
A = np.array(tsA)
print("database in array(): ", A)
print("done")
print("database in array(): ", A.shape)
print("done")
#print("database.describe(): ", tsA.describe())
print("done")
#print("arima(tsA) = ", arima(tsA))
# In[ ]:
|
{"hexsha": "50a8764c2fa2d8b63462b1d0eea2e7efebdb345f", "size": 2734, "ext": "py", "lang": "Python", "max_stars_repo_path": "back_codes/arima_function.py", "max_stars_repo_name": "Hermann-web/Demand-forecasting-with-python", "max_stars_repo_head_hexsha": "393fcb115a02073d359bd407ec7257127e877188", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "back_codes/arima_function.py", "max_issues_repo_name": "Hermann-web/Demand-forecasting-with-python", "max_issues_repo_head_hexsha": "393fcb115a02073d359bd407ec7257127e877188", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "back_codes/arima_function.py", "max_forks_repo_name": "Hermann-web/Demand-forecasting-with-python", "max_forks_repo_head_hexsha": "393fcb115a02073d359bd407ec7257127e877188", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-10-04T23:55:25.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-04T23:55:25.000Z", "avg_line_length": 26.8039215686, "max_line_length": 72, "alphanum_fraction": 0.5885149963, "include": true, "reason": "import numpy,from statsmodels", "num_tokens": 747}
|
from ..parametric import _compute_hommel_value, _true_positive_fraction
from nilearn.glm.thresholding import _compute_hommel_value as nl_compute_hommel_value
from nilearn.glm.thresholding import _true_positive_fraction as nl_true_positive_fraction
from numpy.testing import assert_allclose
from scipy.stats import norm
import numpy as np
def _test_hommel(alpha = .05):
'''
verify that results are equivalent between this and nilearn's
implementation of parametric ARI.
'''
p_vals = np.random.uniform(size = 100)
hom = _compute_hommel_value(p_vals, alpha)
z_vals = norm.isf(p_vals)
hom_nl = nl_compute_hommel_value(z_vals, alpha)
assert(hom == hom_nl)
tpf = _true_positive_fraction(p_vals, hom, alpha)
tpf_nl = nl_true_positive_fraction(z_vals, hom, alpha)
assert_allclose(tpf, tpf_nl, atol = 1e-5)
def test_hommel():
np.random.seed(0)
for i in range(100):
_test_hommel()
|
{"hexsha": "2501ed3f4ca91dbca4833ca3af2b368d1bae417c", "size": 939, "ext": "py", "lang": "Python", "max_stars_repo_path": "mne_ari/ari/tests/test_hommel.py", "max_stars_repo_name": "john-veillette/mne-ari", "max_stars_repo_head_hexsha": "ae80b38140a8ebb69c6aee34c7f7240f53d160b3", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-03-11T15:14:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T14:52:08.000Z", "max_issues_repo_path": "mne_ari/ari/tests/test_hommel.py", "max_issues_repo_name": "john-veillette/mne-ari", "max_issues_repo_head_hexsha": "ae80b38140a8ebb69c6aee34c7f7240f53d160b3", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2022-03-03T17:07:01.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-08T12:32:40.000Z", "max_forks_repo_path": "mne_ari/ari/tests/test_hommel.py", "max_forks_repo_name": "john-veillette/mne-ari", "max_forks_repo_head_hexsha": "ae80b38140a8ebb69c6aee34c7f7240f53d160b3", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.5357142857, "max_line_length": 89, "alphanum_fraction": 0.7550585729, "include": true, "reason": "import numpy,from numpy,from scipy", "num_tokens": 252}
|
function [ r, center ] = circle_dia2imp_2d ( p1, p2 )
%*****************************************************************************80
%
%% CIRCLE_DIA2IMP_2D converts a diameter to an implicit circle in 2D.
%
% Discussion:
%
% The diameter form of a circle is:
%
% P1 and P2 are the endpoints of a diameter.
%
% The implicit form of a circle in 2D is:
%
% ( X - CENTER(1) )^2 + ( Y - CENTER(2) )^2 = R^2
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 04 December 2010
%
% Author:
%
% John Burkardt
%
% Parameters:
%
% Input, real P1(2,1), P2(2,1), two points that are the
% endpoints of a diameter of the circle.
%
% Output, real R, the radius of the circle.
%
% Output, real CENTER(2,1), the center of the circle.
%
r = 0.5 * sqrt ( sum ( ( p2(1:2,1) - p1(1:2,1) ).^2 ) );
center(1:2,1) = 0.5 * ( p1(1:2,1) + p2(1:2,1) );
return
end
|
{"author": "johannesgerer", "repo": "jburkardt-m", "sha": "1726deb4a34dd08a49c26359d44ef47253f006c1", "save_path": "github-repos/MATLAB/johannesgerer-jburkardt-m", "path": "github-repos/MATLAB/johannesgerer-jburkardt-m/jburkardt-m-1726deb4a34dd08a49c26359d44ef47253f006c1/geometry/circle_dia2imp_2d.m"}
|
# Note that this script can accept some limited command-line arguments, run
# `julia build_tarballs.jl --help` to see a usage message.
using BinaryBuilder
name = "FastJet_Julia_Wrapper"
version = v"0.7.0"
# Collection of sources required to build FastJet_Julia_Wrapper
sources = [
GitSource("https://github.com/jstrube/FastJet_Julia_Wrapper.git", "6d4181ca351c7a40348745097c87afd97bf9ce62"; unpack_target="FastJet_Julia_Wrapper"),
ArchiveSource("https://julialang-s3.julialang.org/bin/linux/x64/1.3/julia-1.3.1-linux-x86_64.tar.gz", "faa707c8343780a6fe5eaf13490355e8190acf8e2c189b9e7ecbddb0fa2643ad"; unpack_target="julia-x86_64-linux-gnu"),
ArchiveSource("https://github.com/Gnimuc/JuliaBuilder/releases/download/v1.3.0/julia-1.3.0-x86_64-apple-darwin14.tar.gz", "f2e5359f03314656c06e2a0a28a497f62e78f027dbe7f5155a5710b4914439b1"; unpack_target="julia-x86_64-apple-darwin14"),
]
# Bash recipe for building across all platforms
script = raw"""
case "$target" in
arm-linux-gnueabihf|x86_64-linux-gnu)
Julia_PREFIX=${WORKSPACE}/srcdir/julia-$target/julia-1.3.1
;;
x86_64-apple-darwin14|x86_64-w64-mingw32)
Julia_PREFIX=${WORKSPACE}/srcdir/julia-$target/juliabin
;;
esac
cd ${WORKSPACE}/srcdir/FastJet_Julia_Wrapper/FastJet_Julia_Wrapper*
mkdir build && cd build
cmake -DJulia_PREFIX=${Julia_PREFIX} -DCMAKE_INSTALL_PREFIX=${prefix} -DCMAKE_FIND_ROOT_PATH=${prefix} -DCMAKE_TOOLCHAIN_FILE=${CMAKE_TARGET_TOOLCHAIN} -DCMAKE_BUILD_TYPE=Release ..
VERBOSE=ON cmake --build . --config Release --target install
install_license $WORKSPACE/srcdir/FastJet_Julia_Wrapper/FastJet_Julia_Wrapper/LICENSE.md
"""
# These are the platforms we will build for by default, unless further
# platforms are passed in on the command line
platforms = Platform[
Linux(:x86_64, libc=:glibc),
MacOS(:x86_64)
]
platforms = expand_cxxstring_abis(platforms)
# The products that we will ensure are always built
products = [
LibraryProduct("libfastjetwrap", :libfastjetwrap)
]
# Dependencies that must be installed before this package can be built
dependencies = [
Dependency("libcxxwrap_julia_jll"),
Dependency("FastJet_jll")
]
build_tarballs(ARGS, name, version, sources, script, platforms, products, dependencies; preferred_gcc_version=v"7")
|
{"hexsha": "1cd5de47dd63efc487180fec724542c9694dbcaf", "size": 2285, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "F/FastJet_Julia_Wrapper/build_tarballs.jl", "max_stars_repo_name": "tbeason/Yggdrasil", "max_stars_repo_head_hexsha": "57539ff4219d0defad6f64f790b96ff22066a54e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "F/FastJet_Julia_Wrapper/build_tarballs.jl", "max_issues_repo_name": "tbeason/Yggdrasil", "max_issues_repo_head_hexsha": "57539ff4219d0defad6f64f790b96ff22066a54e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "F/FastJet_Julia_Wrapper/build_tarballs.jl", "max_forks_repo_name": "tbeason/Yggdrasil", "max_forks_repo_head_hexsha": "57539ff4219d0defad6f64f790b96ff22066a54e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.1132075472, "max_line_length": 239, "alphanum_fraction": 0.7789934354, "num_tokens": 699}
|
clear;
disp('Program started');
% control mode (0: impedance control
% 1: admittance control(velocity command)
% 2: admittance control(position command))
control_mode = 0;
% reference trajectory mode (0: horizontal line
% 1: circle)
trajectory_mode = 0;
% dyanmic params(the same as those used in vrep)
I1 = 1;
I2 = 1;
l1 = 0.3;
l2 = 0.3;
m1 = 0.5;
m2 = 0.5;
a1 = 0.15;
a2 = 0.15;
g = 9.81;
% impedance model params
kpx = 150;
kpy = 150;
kdx = 70;
kdy = 70;
mx = 1;
my = 1;
% for moving avarage force filter
windowSize = 5;
b = (1/windowSize)*ones(1,windowSize);
a = 1;
raw_externalforces = zeros(2,windowSize);
filtered_externalforce = zeros(2,1);
vrep=remApi('remoteApi'); % using the prototype file (remoteApiProto.m)
vrep.simxFinish(-1); % just in case, close all opened connections
id=vrep.simxStart('127.0.0.1',19997,true,true,5000,5); % connect to vrep server
if (id>-1)
disp('Connected to remote API server');
% timestep
dt = 0.01;
vrep.simxSetFloatingParameter(id,vrep.sim_floatparam_simulation_time_step,dt,vrep.simx_opmode_oneshot_wait);
% set sychronous mode
vrep.simxSynchronous(id,true);
% start the simulation:
vrep.simxStartSimulation(id,vrep.simx_opmode_oneshot_wait);
% trigger one step simulation and pause a short time to clear dirty
% data in vrep buffer
vrep.simxSynchronousTrigger(id);
% pause(0.3);
handles = two_link_workcell_init(vrep,id);
armJoints = handles.armJoints;
forcesensor = handles.forcesensor;
x0 = [0;0;0;0];
tau = [0;0];
% set joint control mode in vrep
if control_mode == 2
for i=1:2
vrep.simxSetObjectIntParameter(id,armJoints(i),2001,1,vrep.simx_opmode_oneshot_wait);
end
else
for i=1:2
vrep.simxSetObjectIntParameter(id,armJoints(i),2001,0,vrep.simx_opmode_oneshot_wait);
end
end
% get init joint states
for i=1:2
[res,x0(i)] = vrep.simxGetJointPosition(id,armJoints(i),vrep.simx_opmode_buffer);vrchk(vrep, res, true);
[res,x0(i+2)] = vrep.simxGetObjectFloatParameter(id,armJoints(i),2012,vrep.simx_opmode_buffer);vrchk(vrep, res, true);
[res,tau(i)] = vrep.simxGetJointForce(id,armJoints(i),vrep.simx_opmode_buffer);vrchk(vrep, res, true);
end
% get external force in force sensor frame
[res,state,fexternal,tauexternal] = vrep.simxReadForceSensor(id,forcesensor,vrep.simx_opmode_buffer);vrchk(vrep, res, true);
% transform external force into the robot base frame(here the robot base frame coincides with the world frame in vrep)
[res,eulerAngles]=vrep.simxGetObjectOrientation(id,forcesensor,-1,vrep.simx_opmode_buffer);vrchk(vrep, res, true);
forcesensorrotation = rotx(double(eulerAngles(1)*180/pi))*roty(double(eulerAngles(2)*180/pi))*rotz(double(eulerAngles(3)*180/pi));
fexternal_inertial = forcesensorrotation*[fexternal(1);fexternal(2);fexternal(3)];
raw_externalforces(1,:) = fexternal_inertial(1)*ones(1,windowSize);
raw_externalforces(2,:) = fexternal_inertial(3)*ones(1,windowSize);
% record data
recordData.x = [];
recordData.tau = [];
recordData.fmea = [];
recordData.u = [];
recordData.t = [];
% max simulation duration
maxsimtime = 20;
% current time
current_time = 0;
% reference joint trajectory
qref = x0(1:2);
qdotref = [0;0];
qdotdotref = [0;0];
% reference ee trajectory
eeposref = Direct_Kinematics(x0(1:2),[l1;l2]);
eevref = [0;0];
eearef = [0;0];
% for impedance model
xe_admittance = [0;0];
xedot_admittance = [0;0];
xedotdot_admittance = [0;0];
q_admittance = [0;0];
qdot_admittance = [0;0];
qdotdot_admittance = [0;0];
while vrep.simxGetConnectionId(id)~=-1
% get robot joint states
for i=1:2
[res,x0(i)] = vrep.simxGetJointPosition(id,armJoints(i),vrep.simx_opmode_buffer);vrchk(vrep, res, true);
[res,x0(i+2)] = vrep.simxGetObjectFloatParameter(id,armJoints(i),2012,vrep.simx_opmode_buffer);vrchk(vrep, res, true);
[res,tau(i)] = vrep.simxGetJointForce(id,armJoints(i),vrep.simx_opmode_buffer);vrchk(vrep, res, true);
end
% get external force
[res,state,fexternal,tauexternal] = vrep.simxReadForceSensor(id,forcesensor,vrep.simx_opmode_buffer);vrchk(vrep, res, true);
% transfor external force form force sensor frame to robot base frame
[res,eulerAngles]=vrep.simxGetObjectOrientation(id,forcesensor,-1,vrep.simx_opmode_buffer);vrchk(vrep, res, true);
forcesensorrotation = rotx(double(eulerAngles(1)*180/pi))*roty(double(eulerAngles(2)*180/pi))*rotz(double(eulerAngles(3)*180/pi));
fexternal_inertial = forcesensorrotation*[fexternal(1);fexternal(2);fexternal(3)];
% shift raw external forces array
for i=1:windowSize-1
raw_externalforces(:,i) = raw_externalforces(:,i+1);
end
raw_externalforces(1,windowSize) = fexternal_inertial(1);
raw_externalforces(2,windowSize) = fexternal_inertial(3);
% force filtering
filtered_externalforces_x = filter(b,a,raw_externalforces(1,:));
filtered_externalforces_y = filter(b,a,raw_externalforces(2,:));
filtered_externalforce = [filtered_externalforces_x(windowSize);filtered_externalforces_y(windowSize)];
% feedbacked joint force in v-rep is the force sufferred by joint,
% so change the sign to get the joint drive torque
tau = -tau;
if (current_time > maxsimtime)
break;
end
% get reference trajectory point
[eeposref,eevref,eearef] = ReferenceTrajectory(current_time,trajectory_mode);
%% impedance control
if control_mode == 0
J =Geometric_Jacobian(x0(1:2),[l1;l2]);
dJ_dt = Geometric_Jacobian_Derivative(x0,[l1;l2]);
qdotdotref = J\(eearef - dJ_dt*x0(3:4));
Mass = Mass_Matrix(x0(1:2),[I1 I2 l1 l2 m1 m2 a1 a2 g]');
u = Impedance_Controller(x0,[l1;l2],[kpx;kpy;kdx;kdy],eeposref,eevref)...
+ Mass*qdotdotref...
+ Coriolis_Centrifugal_Torque(x0,[l1;l2;m1;m2;a1;a2;g])...
+ Gravity_Torque(x0(1:2),[l1;l2;m1;m2;a1;a2;g]);
% limit joint tourque
for i=1:2
if u(i) > 25
u(i) = 25;
elseif u(i) < -25
u(i) = -25;
end
end
% send torque command to vrep
for i=1:2
if u(i) < 0
res = vrep.simxSetJointTargetVelocity(id,armJoints(i),-99999,vrep.simx_opmode_oneshot);vrchk(vrep, res, true);
res = vrep.simxSetJointForce(id,armJoints(i),-u(i),vrep.simx_opmode_oneshot);vrchk(vrep, res, true);
else
res = vrep.simxSetJointTargetVelocity(id,armJoints(i),99999,vrep.simx_opmode_oneshot);vrchk(vrep, res, true);
res = vrep.simxSetJointForce(id,armJoints(i),u(i),vrep.simx_opmode_oneshot);vrchk(vrep, res, true);
end
end
%% admittance control
elseif control_mode == 1
% --- velocity command --- (qdot_d + qdot_admittance)
% compute qdotdot corresponds to impedance model
qdotdot_admittance = Admittance_Controller(x0,filtered_externalforce,[I1 I2 l1 l2 m1 m2 a1 a2 g]',[mx;my;kpx;kpy;kdx;kdy],eeposref,eevref);
J =Geometric_Jacobian(x0(1:2),[l1;l2]);
dJ_dt = Geometric_Jacobian_Derivative(x0,[l1;l2]);
qdotdot_tracking = J\(eearef - dJ_dt*x0(3:4));
qdot_admittance = qdot_admittance + (qdotdot_admittance)*dt;
qdotref = J\eevref + qdot_admittance;
u = qdotref;
% send velocity command to vrep
for i=1:2
vrep.simxSetJointTargetVelocity(id,armJoints(i),qdotref(i),vrep.simx_opmode_oneshot);vrchk(vrep, res, true);
end
elseif control_mode == 2
% --- position command --- qd + q_admittance
%compute qdotdot corresponds to impedance model
qdotdot_admittance = Admittance_Controller(x0,filtered_externalforce,[I1 I2 l1 l2 m1 m2 a1 a2 g]',[mx;my;kpx;kpy;kdx;kdy],eeposref-eevref*dt,eevref-eearef*dt);%-eevref*dt -eearef*dt
J =Geometric_Jacobian(x0(1:2),[l1;l2]);
dJ_dt = Geometric_Jacobian_Derivative(x0,[l1;l2]);
qdotdot_tracking = J\(eearef - dJ_dt*x0(3:4));
q_admittance = q_admittance + qdot_admittance*dt + 1/2*qdotdot_admittance*dt^2;
qdot_admittance = qdot_admittance + qdotdot_admittance*dt;
qref = Inverse_Kinematics(x0(1:2),eeposref,[l1 l2]) + q_admittance;
u = qref;
% send position command to vrep
for i=1:2
vrep.simxSetJointTargetPosition(id,armJoints(i),u(i),vrep.simx_opmode_oneshot);vrchk(vrep, res, true);
end
end
%% record data
recordData.x = [recordData.x,x0];
recordData.tau = [recordData.tau,tau];
recordData.fmea = [recordData.fmea,raw_externalforces(:,end)];
recordData.u = [recordData.u,u];
recordData.t = [recordData.t,current_time];
% update current_time
current_time = current_time + dt;
vrep.simxSynchronousTrigger(id);
end
% display result
titles = {'q1','q2','dq1','dq2'};
figure('name','state');
for i=1:4
subplot(2,2,i);
plot(recordData.t,recordData.x(i,:));
title(titles{i});
xlabel('t/s');
end
titles = {'u1','u2'};
figure('name','input');
for i=1:2
subplot(1,2,i);
plot(recordData.t,recordData.u(i,:));
title(titles{i});
xlabel('t/s');
end
figure('name','external force');
subplot(1,2,1);
plot(recordData.t,recordData.fmea(1,:));
title('fextx');
xlabel('t/s');
hold off
subplot(1,2,2);
plot(recordData.t,recordData.fmea(2,:));
title('fexty');
xlabel('t/s');
hold off
else
disp('Failed connecting to remote API server1');
end
vrep.simxStopSimulation(id,vrep.simx_opmode_oneshot_wait);
vrep.simxFinish(id);
vrep.delete();
|
{"author": "xuhuairuogu", "repo": "V-REP-Simulation-Projects", "sha": "841b944af4ea3a8fb250578d36434515f577f411", "save_path": "github-repos/MATLAB/xuhuairuogu-V-REP-Simulation-Projects", "path": "github-repos/MATLAB/xuhuairuogu-V-REP-Simulation-Projects/V-REP-Simulation-Projects-841b944af4ea3a8fb250578d36434515f577f411/two_link_manipulator_impedance_admittance_control/Simu_vrep.m"}
|
module Day3
function read_input(filepath)
# Parse the file into a 1000x12 matrix of Ints
lines = readlines(filepath)
n_rows = length(lines)
n_cols = length(lines[1])
A = zeros(Int64, (n_rows, n_cols))
for row = 1:n_rows
for col = 1:n_cols
A[row,col] = parse(Int64, lines[row][col])
end
end
return A
end
function calculate_gamma_and_epsilon_rates(diagnostics::Matrix{Int64})
column_sums = sum(diagnostics, dims=1)
n_rows = size(diagnostics)[1]
gamma_rate_binary = string("0b", join(Int(i >= n_rows/2) for i in column_sums))
epsilon_rate_binary = string("0b", join(Int(i < n_rows/2) for i in column_sums))
gamma_rate_decimal = parse(Int, gamma_rate_binary)
epsilon_rate_decimal = parse(Int, epsilon_rate_binary)
return (gamma_rate_decimal, epsilon_rate_decimal)
end
function calculate_oxygen_and_co2_ratings(diagnostics::Matrix{Int64})
# To calculate the oxygen rating:
# Start with the 1st column, determining the most common number in the column (tie goes to 1)
# Discard all rows whose bit in this column are not that number.
# Do we have only 1 row left? If not, move to the next column and repeat until we have only 1 row remaining.
# if we cycle through all columns, start again
n_cols = size(diagnostics)[2]
# Calculate oxygen rating
oxygen_rating_binary = begin
candidates = diagnostics
n_iter = 0
while size(candidates)[1] > 1
current_column = (n_iter % n_cols) + 1 # so that we can cycle through the columns indefinitely 1,2,...,n_cols,1,2,...
n_rows = size(candidates)[1] # current number of rows remaining
n_ones_in_column = sum(candidates[:, current_column])
most_common_value_in_column = (n_ones_in_column / n_rows) >= 0.5 ? 1 : 0
# Filter candidates matrix further to only include rows where current column contains most common bit
candidates = candidates[candidates[:, current_column] .== most_common_value_in_column, :]
n_iter += 1
end
join(candidates[1,:]) # collapse the lone remaining row into a string
end
# Calculate co2 rating
co2_rating_binary = begin
candidates = diagnostics
n_iter = 0
while size(candidates)[1] > 1
current_column = (n_iter % n_cols) + 1 # so that we can cycle through the columns indefinitely 1,2,...,n_cols,1,2,...
n_rows = size(candidates)[1] # current number of rows remaining
n_ones_in_column = sum(candidates[:, current_column])
least_common_value_in_column = (n_ones_in_column / n_rows) < 0.5 ? 1 : 0
# Filter candidates matrix further to only include rows where current column contains most common bit
candidates = candidates[candidates[:, current_column] .== least_common_value_in_column, :]
n_iter += 1
end
join(candidates[1,:]) # collapse the lone remaining row into a string
end
oxygen_rating_decimal = parse(Int, string("0b", oxygen_rating_binary))
co2_rating_decimal = parse(Int, string("0b", co2_rating_binary))
return (oxygen_rating_decimal, co2_rating_decimal)
end
function part1_answer()
diagnostics = read_input(joinpath(@__DIR__, "input.txt"))
(γ, ϵ) = calculate_gamma_and_epsilon_rates(diagnostics)
return γ * ϵ
end
function part2_answer()
diagnostics = read_input(joinpath(@__DIR__, "input.txt"))
(o2, co2) = calculate_oxygen_and_co2_ratings(diagnostics)
return o2 * co2
end
end
|
{"hexsha": "1b8651b6da7469abe0d333ce156e391c32ae0068", "size": 3636, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/day03/day03.jl", "max_stars_repo_name": "PriceHardman/AdventOfCode2021", "max_stars_repo_head_hexsha": "5a02982448a736c8089940ae0c97d894850f1938", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/day03/day03.jl", "max_issues_repo_name": "PriceHardman/AdventOfCode2021", "max_issues_repo_head_hexsha": "5a02982448a736c8089940ae0c97d894850f1938", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/day03/day03.jl", "max_forks_repo_name": "PriceHardman/AdventOfCode2021", "max_forks_repo_head_hexsha": "5a02982448a736c8089940ae0c97d894850f1938", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.6808510638, "max_line_length": 129, "alphanum_fraction": 0.6680418042, "num_tokens": 908}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.